repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
edelooff/sqlalchemy-json
sqlalchemy_json/track.py
TrackedObject.convert_items
def convert_items(self, items): """Generator like `convert_iterable`, but for 2-tuple iterators.""" return ((key, self.convert(value, self)) for key, value in items)
python
def convert_items(self, items): """Generator like `convert_iterable`, but for 2-tuple iterators.""" return ((key, self.convert(value, self)) for key, value in items)
[ "def", "convert_items", "(", "self", ",", "items", ")", ":", "return", "(", "(", "key", ",", "self", ".", "convert", "(", "value", ",", "self", ")", ")", "for", "key", ",", "value", "in", "items", ")" ]
Generator like `convert_iterable`, but for 2-tuple iterators.
[ "Generator", "like", "convert_iterable", "but", "for", "2", "-", "tuple", "iterators", "." ]
train
https://github.com/edelooff/sqlalchemy-json/blob/4e5df0d61dc09ed9a52e24ab291a1f1e14aa95cc/sqlalchemy_json/track.py#L77-L79
edelooff/sqlalchemy-json
sqlalchemy_json/track.py
TrackedObject.convert_mapping
def convert_mapping(self, mapping): """Convenience method to track either a dict or a 2-tuple iterator.""" if isinstance(mapping, dict): return self.convert_items(iteritems(mapping)) return self.convert_items(mapping)
python
def convert_mapping(self, mapping): """Convenience method to track either a dict or a 2-tuple iterator.""" if isinstance(mapping, dict): return self.convert_items(iteritems(mapping)) return self.convert_items(mapping)
[ "def", "convert_mapping", "(", "self", ",", "mapping", ")", ":", "if", "isinstance", "(", "mapping", ",", "dict", ")", ":", "return", "self", ".", "convert_items", "(", "iteritems", "(", "mapping", ")", ")", "return", "self", ".", "convert_items", "(", "mapping", ")" ]
Convenience method to track either a dict or a 2-tuple iterator.
[ "Convenience", "method", "to", "track", "either", "a", "dict", "or", "a", "2", "-", "tuple", "iterator", "." ]
train
https://github.com/edelooff/sqlalchemy-json/blob/4e5df0d61dc09ed9a52e24ab291a1f1e14aa95cc/sqlalchemy_json/track.py#L81-L85
praekelt/django-preferences
preferences/admin.py
PreferencesAdmin.changelist_view
def changelist_view(self, request, extra_context=None): """ If we only have a single preference object redirect to it, otherwise display listing. """ model = self.model if model.objects.all().count() > 1: return super(PreferencesAdmin, self).changelist_view(request) else: obj = model.singleton.get() return redirect( reverse( 'admin:%s_%s_change' % ( model._meta.app_label, model._meta.model_name ), args=(obj.id,) ) )
python
def changelist_view(self, request, extra_context=None): """ If we only have a single preference object redirect to it, otherwise display listing. """ model = self.model if model.objects.all().count() > 1: return super(PreferencesAdmin, self).changelist_view(request) else: obj = model.singleton.get() return redirect( reverse( 'admin:%s_%s_change' % ( model._meta.app_label, model._meta.model_name ), args=(obj.id,) ) )
[ "def", "changelist_view", "(", "self", ",", "request", ",", "extra_context", "=", "None", ")", ":", "model", "=", "self", ".", "model", "if", "model", ".", "objects", ".", "all", "(", ")", ".", "count", "(", ")", ">", "1", ":", "return", "super", "(", "PreferencesAdmin", ",", "self", ")", ".", "changelist_view", "(", "request", ")", "else", ":", "obj", "=", "model", ".", "singleton", ".", "get", "(", ")", "return", "redirect", "(", "reverse", "(", "'admin:%s_%s_change'", "%", "(", "model", ".", "_meta", ".", "app_label", ",", "model", ".", "_meta", ".", "model_name", ")", ",", "args", "=", "(", "obj", ".", "id", ",", ")", ")", ")" ]
If we only have a single preference object redirect to it, otherwise display listing.
[ "If", "we", "only", "have", "a", "single", "preference", "object", "redirect", "to", "it", "otherwise", "display", "listing", "." ]
train
https://github.com/praekelt/django-preferences/blob/724f23da45449e96feb5179cb34e3d380cf151a1/preferences/admin.py#L13-L30
voyages-sncf-technologies/nexus_uploader
setup.py
md2rst
def md2rst(md_lines): 'Only converts headers' lvl2header_char = {1: '=', 2: '-', 3: '~'} for md_line in md_lines: if md_line.startswith('#'): header_indent, header_text = md_line.split(' ', 1) yield header_text header_char = lvl2header_char[len(header_indent)] yield header_char * len(header_text) else: yield md_line
python
def md2rst(md_lines): 'Only converts headers' lvl2header_char = {1: '=', 2: '-', 3: '~'} for md_line in md_lines: if md_line.startswith('#'): header_indent, header_text = md_line.split(' ', 1) yield header_text header_char = lvl2header_char[len(header_indent)] yield header_char * len(header_text) else: yield md_line
[ "def", "md2rst", "(", "md_lines", ")", ":", "lvl2header_char", "=", "{", "1", ":", "'='", ",", "2", ":", "'-'", ",", "3", ":", "'~'", "}", "for", "md_line", "in", "md_lines", ":", "if", "md_line", ".", "startswith", "(", "'#'", ")", ":", "header_indent", ",", "header_text", "=", "md_line", ".", "split", "(", "' '", ",", "1", ")", "yield", "header_text", "header_char", "=", "lvl2header_char", "[", "len", "(", "header_indent", ")", "]", "yield", "header_char", "*", "len", "(", "header_text", ")", "else", ":", "yield", "md_line" ]
Only converts headers
[ "Only", "converts", "headers" ]
train
https://github.com/voyages-sncf-technologies/nexus_uploader/blob/dca654f9080264b1dcaabfc2fd19f26b1c4f59fe/setup.py#L24-L34
voyages-sncf-technologies/nexus_uploader
nexus_uploader/utils.py
aslist
def aslist(generator): 'Function decorator to transform a generator into a list' def wrapper(*args, **kwargs): return list(generator(*args, **kwargs)) return wrapper
python
def aslist(generator): 'Function decorator to transform a generator into a list' def wrapper(*args, **kwargs): return list(generator(*args, **kwargs)) return wrapper
[ "def", "aslist", "(", "generator", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "list", "(", "generator", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "return", "wrapper" ]
Function decorator to transform a generator into a list
[ "Function", "decorator", "to", "transform", "a", "generator", "into", "a", "list" ]
train
https://github.com/voyages-sncf-technologies/nexus_uploader/blob/dca654f9080264b1dcaabfc2fd19f26b1c4f59fe/nexus_uploader/utils.py#L17-L21
voyages-sncf-technologies/nexus_uploader
nexus_uploader/pypi.py
get_package_release_from_pypi
def get_package_release_from_pypi(pkg_name, version, pypi_json_api_url, allowed_classifiers): """ No classifier-based selection of Python packages is currently implemented: for now we don't fetch any .whl or .egg Eventually, we should select the best release available, based on the classifier & PEP 425: https://www.python.org/dev/peps/pep-0425/ E.g. a wheel when available but NOT for tornado 4.3 for example, where available wheels are only for Windows. Note also that some packages don't have .whl distributed, e.g. https://bugs.launchpad.net/lxml/+bug/1176147 """ matching_releases = get_package_releases_matching_version(pkg_name, version, pypi_json_api_url) src_releases = [release for release in matching_releases if release['python_version'] == 'source'] if src_releases: return select_src_release(src_releases, pkg_name, target_classifiers=('py2.py3-none-any',), select_arbitrary_version_if_none_match=True) if allowed_classifiers: return select_src_release(matching_releases, pkg_name, target_classifiers=allowed_classifiers) raise PypiQueryError('No source supported found for package {} version {}'.format(pkg_name, version))
python
def get_package_release_from_pypi(pkg_name, version, pypi_json_api_url, allowed_classifiers): """ No classifier-based selection of Python packages is currently implemented: for now we don't fetch any .whl or .egg Eventually, we should select the best release available, based on the classifier & PEP 425: https://www.python.org/dev/peps/pep-0425/ E.g. a wheel when available but NOT for tornado 4.3 for example, where available wheels are only for Windows. Note also that some packages don't have .whl distributed, e.g. https://bugs.launchpad.net/lxml/+bug/1176147 """ matching_releases = get_package_releases_matching_version(pkg_name, version, pypi_json_api_url) src_releases = [release for release in matching_releases if release['python_version'] == 'source'] if src_releases: return select_src_release(src_releases, pkg_name, target_classifiers=('py2.py3-none-any',), select_arbitrary_version_if_none_match=True) if allowed_classifiers: return select_src_release(matching_releases, pkg_name, target_classifiers=allowed_classifiers) raise PypiQueryError('No source supported found for package {} version {}'.format(pkg_name, version))
[ "def", "get_package_release_from_pypi", "(", "pkg_name", ",", "version", ",", "pypi_json_api_url", ",", "allowed_classifiers", ")", ":", "matching_releases", "=", "get_package_releases_matching_version", "(", "pkg_name", ",", "version", ",", "pypi_json_api_url", ")", "src_releases", "=", "[", "release", "for", "release", "in", "matching_releases", "if", "release", "[", "'python_version'", "]", "==", "'source'", "]", "if", "src_releases", ":", "return", "select_src_release", "(", "src_releases", ",", "pkg_name", ",", "target_classifiers", "=", "(", "'py2.py3-none-any'", ",", ")", ",", "select_arbitrary_version_if_none_match", "=", "True", ")", "if", "allowed_classifiers", ":", "return", "select_src_release", "(", "matching_releases", ",", "pkg_name", ",", "target_classifiers", "=", "allowed_classifiers", ")", "raise", "PypiQueryError", "(", "'No source supported found for package {} version {}'", ".", "format", "(", "pkg_name", ",", "version", ")", ")" ]
No classifier-based selection of Python packages is currently implemented: for now we don't fetch any .whl or .egg Eventually, we should select the best release available, based on the classifier & PEP 425: https://www.python.org/dev/peps/pep-0425/ E.g. a wheel when available but NOT for tornado 4.3 for example, where available wheels are only for Windows. Note also that some packages don't have .whl distributed, e.g. https://bugs.launchpad.net/lxml/+bug/1176147
[ "No", "classifier", "-", "based", "selection", "of", "Python", "packages", "is", "currently", "implemented", ":", "for", "now", "we", "don", "t", "fetch", "any", ".", "whl", "or", ".", "egg", "Eventually", "we", "should", "select", "the", "best", "release", "available", "based", "on", "the", "classifier", "&", "PEP", "425", ":", "https", ":", "//", "www", ".", "python", ".", "org", "/", "dev", "/", "peps", "/", "pep", "-", "0425", "/", "E", ".", "g", ".", "a", "wheel", "when", "available", "but", "NOT", "for", "tornado", "4", ".", "3", "for", "example", "where", "available", "wheels", "are", "only", "for", "Windows", ".", "Note", "also", "that", "some", "packages", "don", "t", "have", ".", "whl", "distributed", "e", ".", "g", ".", "https", ":", "//", "bugs", ".", "launchpad", ".", "net", "/", "lxml", "/", "+", "bug", "/", "1176147" ]
train
https://github.com/voyages-sncf-technologies/nexus_uploader/blob/dca654f9080264b1dcaabfc2fd19f26b1c4f59fe/nexus_uploader/pypi.py#L27-L40
voyages-sncf-technologies/nexus_uploader
nexus_uploader/pypi.py
extract_classifier_and_extension
def extract_classifier_and_extension(pkg_name, filename): """ Returns a PEP425-compliant classifier (or 'py2.py3-none-any' if it cannot be extracted), and the file extension TODO: return a classifier 3-members namedtuple instead of a single string """ basename, _, extension = filename.rpartition('.') if extension == 'gz' and filename.endswith('.tar.gz'): extension = 'tar.gz' basename = filename[:-7] if basename == pkg_name or basename[len(pkg_name)] != '-': return 'py2.py3-none-any', extension basename = basename[len(pkg_name)+1:] classifier_parts = basename.split('-') if len(classifier_parts) < 3: return 'py2.py3-none-any', extension if len(classifier_parts) == 3: _, _, classifier_parts[0] = classifier_parts[0].rpartition('.') return '-'.join(classifier_parts[-3:]), extension
python
def extract_classifier_and_extension(pkg_name, filename): """ Returns a PEP425-compliant classifier (or 'py2.py3-none-any' if it cannot be extracted), and the file extension TODO: return a classifier 3-members namedtuple instead of a single string """ basename, _, extension = filename.rpartition('.') if extension == 'gz' and filename.endswith('.tar.gz'): extension = 'tar.gz' basename = filename[:-7] if basename == pkg_name or basename[len(pkg_name)] != '-': return 'py2.py3-none-any', extension basename = basename[len(pkg_name)+1:] classifier_parts = basename.split('-') if len(classifier_parts) < 3: return 'py2.py3-none-any', extension if len(classifier_parts) == 3: _, _, classifier_parts[0] = classifier_parts[0].rpartition('.') return '-'.join(classifier_parts[-3:]), extension
[ "def", "extract_classifier_and_extension", "(", "pkg_name", ",", "filename", ")", ":", "basename", ",", "_", ",", "extension", "=", "filename", ".", "rpartition", "(", "'.'", ")", "if", "extension", "==", "'gz'", "and", "filename", ".", "endswith", "(", "'.tar.gz'", ")", ":", "extension", "=", "'tar.gz'", "basename", "=", "filename", "[", ":", "-", "7", "]", "if", "basename", "==", "pkg_name", "or", "basename", "[", "len", "(", "pkg_name", ")", "]", "!=", "'-'", ":", "return", "'py2.py3-none-any'", ",", "extension", "basename", "=", "basename", "[", "len", "(", "pkg_name", ")", "+", "1", ":", "]", "classifier_parts", "=", "basename", ".", "split", "(", "'-'", ")", "if", "len", "(", "classifier_parts", ")", "<", "3", ":", "return", "'py2.py3-none-any'", ",", "extension", "if", "len", "(", "classifier_parts", ")", "==", "3", ":", "_", ",", "_", ",", "classifier_parts", "[", "0", "]", "=", "classifier_parts", "[", "0", "]", ".", "rpartition", "(", "'.'", ")", "return", "'-'", ".", "join", "(", "classifier_parts", "[", "-", "3", ":", "]", ")", ",", "extension" ]
Returns a PEP425-compliant classifier (or 'py2.py3-none-any' if it cannot be extracted), and the file extension TODO: return a classifier 3-members namedtuple instead of a single string
[ "Returns", "a", "PEP425", "-", "compliant", "classifier", "(", "or", "py2", ".", "py3", "-", "none", "-", "any", "if", "it", "cannot", "be", "extracted", ")", "and", "the", "file", "extension", "TODO", ":", "return", "a", "classifier", "3", "-", "members", "namedtuple", "instead", "of", "a", "single", "string" ]
train
https://github.com/voyages-sncf-technologies/nexus_uploader/blob/dca654f9080264b1dcaabfc2fd19f26b1c4f59fe/nexus_uploader/pypi.py#L62-L80
edelooff/sqlalchemy-json
sqlalchemy_json/__init__.py
NestedMutable.coerce
def coerce(cls, key, value): """Convert plain dictionary to NestedMutable.""" if value is None: return value if isinstance(value, cls): return value if isinstance(value, dict): return NestedMutableDict.coerce(key, value) if isinstance(value, list): return NestedMutableList.coerce(key, value) return super(cls).coerce(key, value)
python
def coerce(cls, key, value): """Convert plain dictionary to NestedMutable.""" if value is None: return value if isinstance(value, cls): return value if isinstance(value, dict): return NestedMutableDict.coerce(key, value) if isinstance(value, list): return NestedMutableList.coerce(key, value) return super(cls).coerce(key, value)
[ "def", "coerce", "(", "cls", ",", "key", ",", "value", ")", ":", "if", "value", "is", "None", ":", "return", "value", "if", "isinstance", "(", "value", ",", "cls", ")", ":", "return", "value", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "return", "NestedMutableDict", ".", "coerce", "(", "key", ",", "value", ")", "if", "isinstance", "(", "value", ",", "list", ")", ":", "return", "NestedMutableList", ".", "coerce", "(", "key", ",", "value", ")", "return", "super", "(", "cls", ")", ".", "coerce", "(", "key", ",", "value", ")" ]
Convert plain dictionary to NestedMutable.
[ "Convert", "plain", "dictionary", "to", "NestedMutable", "." ]
train
https://github.com/edelooff/sqlalchemy-json/blob/4e5df0d61dc09ed9a52e24ab291a1f1e14aa95cc/sqlalchemy_json/__init__.py#L36-L46
shoeffner/cvloop
tools/create_functions_ipynb.py
is_mod_function
def is_mod_function(mod, fun): """Checks if a function in a module was declared in that module. http://stackoverflow.com/a/1107150/3004221 Args: mod: the module fun: the function """ return inspect.isfunction(fun) and inspect.getmodule(fun) == mod
python
def is_mod_function(mod, fun): """Checks if a function in a module was declared in that module. http://stackoverflow.com/a/1107150/3004221 Args: mod: the module fun: the function """ return inspect.isfunction(fun) and inspect.getmodule(fun) == mod
[ "def", "is_mod_function", "(", "mod", ",", "fun", ")", ":", "return", "inspect", ".", "isfunction", "(", "fun", ")", "and", "inspect", ".", "getmodule", "(", "fun", ")", "==", "mod" ]
Checks if a function in a module was declared in that module. http://stackoverflow.com/a/1107150/3004221 Args: mod: the module fun: the function
[ "Checks", "if", "a", "function", "in", "a", "module", "was", "declared", "in", "that", "module", "." ]
train
https://github.com/shoeffner/cvloop/blob/3ddd311e9b679d16c8fd36779931380374de343c/tools/create_functions_ipynb.py#L15-L24
shoeffner/cvloop
tools/create_functions_ipynb.py
is_mod_class
def is_mod_class(mod, cls): """Checks if a class in a module was declared in that module. Args: mod: the module cls: the class """ return inspect.isclass(cls) and inspect.getmodule(cls) == mod
python
def is_mod_class(mod, cls): """Checks if a class in a module was declared in that module. Args: mod: the module cls: the class """ return inspect.isclass(cls) and inspect.getmodule(cls) == mod
[ "def", "is_mod_class", "(", "mod", ",", "cls", ")", ":", "return", "inspect", ".", "isclass", "(", "cls", ")", "and", "inspect", ".", "getmodule", "(", "cls", ")", "==", "mod" ]
Checks if a class in a module was declared in that module. Args: mod: the module cls: the class
[ "Checks", "if", "a", "class", "in", "a", "module", "was", "declared", "in", "that", "module", "." ]
train
https://github.com/shoeffner/cvloop/blob/3ddd311e9b679d16c8fd36779931380374de343c/tools/create_functions_ipynb.py#L27-L34
shoeffner/cvloop
tools/create_functions_ipynb.py
list_functions
def list_functions(mod_name): """Lists all functions declared in a module. http://stackoverflow.com/a/1107150/3004221 Args: mod_name: the module name Returns: A list of functions declared in that module. """ mod = sys.modules[mod_name] return [func.__name__ for func in mod.__dict__.values() if is_mod_function(mod, func)]
python
def list_functions(mod_name): """Lists all functions declared in a module. http://stackoverflow.com/a/1107150/3004221 Args: mod_name: the module name Returns: A list of functions declared in that module. """ mod = sys.modules[mod_name] return [func.__name__ for func in mod.__dict__.values() if is_mod_function(mod, func)]
[ "def", "list_functions", "(", "mod_name", ")", ":", "mod", "=", "sys", ".", "modules", "[", "mod_name", "]", "return", "[", "func", ".", "__name__", "for", "func", "in", "mod", ".", "__dict__", ".", "values", "(", ")", "if", "is_mod_function", "(", "mod", ",", "func", ")", "]" ]
Lists all functions declared in a module. http://stackoverflow.com/a/1107150/3004221 Args: mod_name: the module name Returns: A list of functions declared in that module.
[ "Lists", "all", "functions", "declared", "in", "a", "module", "." ]
train
https://github.com/shoeffner/cvloop/blob/3ddd311e9b679d16c8fd36779931380374de343c/tools/create_functions_ipynb.py#L37-L49
shoeffner/cvloop
tools/create_functions_ipynb.py
list_classes
def list_classes(mod_name): """Lists all classes declared in a module. Args: mod_name: the module name Returns: A list of functions declared in that module. """ mod = sys.modules[mod_name] return [cls.__name__ for cls in mod.__dict__.values() if is_mod_class(mod, cls)]
python
def list_classes(mod_name): """Lists all classes declared in a module. Args: mod_name: the module name Returns: A list of functions declared in that module. """ mod = sys.modules[mod_name] return [cls.__name__ for cls in mod.__dict__.values() if is_mod_class(mod, cls)]
[ "def", "list_classes", "(", "mod_name", ")", ":", "mod", "=", "sys", ".", "modules", "[", "mod_name", "]", "return", "[", "cls", ".", "__name__", "for", "cls", "in", "mod", ".", "__dict__", ".", "values", "(", ")", "if", "is_mod_class", "(", "mod", ",", "cls", ")", "]" ]
Lists all classes declared in a module. Args: mod_name: the module name Returns: A list of functions declared in that module.
[ "Lists", "all", "classes", "declared", "in", "a", "module", "." ]
train
https://github.com/shoeffner/cvloop/blob/3ddd311e9b679d16c8fd36779931380374de343c/tools/create_functions_ipynb.py#L52-L62
shoeffner/cvloop
tools/create_functions_ipynb.py
get_linenumbers
def get_linenumbers(functions, module, searchstr='def {}(image):\n'): """Returns a dictionary which maps function names to line numbers. Args: functions: a list of function names module: the module to look the functions up searchstr: the string to search for Returns: A dictionary with functions as keys and their line numbers as values. """ lines = inspect.getsourcelines(module)[0] line_numbers = {} for function in functions: try: line_numbers[function] = lines.index( searchstr.format(function)) + 1 except ValueError: print(r'Can not find `{}`'.format(searchstr.format(function))) line_numbers[function] = 0 return line_numbers
python
def get_linenumbers(functions, module, searchstr='def {}(image):\n'): """Returns a dictionary which maps function names to line numbers. Args: functions: a list of function names module: the module to look the functions up searchstr: the string to search for Returns: A dictionary with functions as keys and their line numbers as values. """ lines = inspect.getsourcelines(module)[0] line_numbers = {} for function in functions: try: line_numbers[function] = lines.index( searchstr.format(function)) + 1 except ValueError: print(r'Can not find `{}`'.format(searchstr.format(function))) line_numbers[function] = 0 return line_numbers
[ "def", "get_linenumbers", "(", "functions", ",", "module", ",", "searchstr", "=", "'def {}(image):\\n'", ")", ":", "lines", "=", "inspect", ".", "getsourcelines", "(", "module", ")", "[", "0", "]", "line_numbers", "=", "{", "}", "for", "function", "in", "functions", ":", "try", ":", "line_numbers", "[", "function", "]", "=", "lines", ".", "index", "(", "searchstr", ".", "format", "(", "function", ")", ")", "+", "1", "except", "ValueError", ":", "print", "(", "r'Can not find `{}`'", ".", "format", "(", "searchstr", ".", "format", "(", "function", ")", ")", ")", "line_numbers", "[", "function", "]", "=", "0", "return", "line_numbers" ]
Returns a dictionary which maps function names to line numbers. Args: functions: a list of function names module: the module to look the functions up searchstr: the string to search for Returns: A dictionary with functions as keys and their line numbers as values.
[ "Returns", "a", "dictionary", "which", "maps", "function", "names", "to", "line", "numbers", "." ]
train
https://github.com/shoeffner/cvloop/blob/3ddd311e9b679d16c8fd36779931380374de343c/tools/create_functions_ipynb.py#L65-L84
shoeffner/cvloop
tools/create_functions_ipynb.py
format_doc
def format_doc(fun): """Formats the documentation in a nicer way and for notebook cells.""" SEPARATOR = '=============================' func = cvloop.functions.__dict__[fun] doc_lines = ['{}'.format(l).strip() for l in func.__doc__.split('\n')] if hasattr(func, '__init__'): doc_lines.append(SEPARATOR) doc_lines += ['{}'.format(l).strip() for l in func.__init__.__doc__.split('\n')] mod_lines = [] argblock = False returnblock = False for line in doc_lines: if line == SEPARATOR: mod_lines.append('\n#### `{}.__init__(...)`:\n\n'.format(fun)) elif 'Args:' in line: argblock = True if GENERATE_ARGS: mod_lines.append('**{}**\n'.format(line)) elif 'Returns:' in line: returnblock = True mod_lines.append('\n**{}**'.format(line)) elif not argblock and not returnblock: mod_lines.append('{}\n'.format(line)) elif argblock and not returnblock and ':' in line: if GENERATE_ARGS: mod_lines.append('- *{}:* {}\n'.format( *line.split(':'))) elif returnblock: mod_lines.append(line) else: mod_lines.append('{}\n'.format(line)) return mod_lines
python
def format_doc(fun): """Formats the documentation in a nicer way and for notebook cells.""" SEPARATOR = '=============================' func = cvloop.functions.__dict__[fun] doc_lines = ['{}'.format(l).strip() for l in func.__doc__.split('\n')] if hasattr(func, '__init__'): doc_lines.append(SEPARATOR) doc_lines += ['{}'.format(l).strip() for l in func.__init__.__doc__.split('\n')] mod_lines = [] argblock = False returnblock = False for line in doc_lines: if line == SEPARATOR: mod_lines.append('\n#### `{}.__init__(...)`:\n\n'.format(fun)) elif 'Args:' in line: argblock = True if GENERATE_ARGS: mod_lines.append('**{}**\n'.format(line)) elif 'Returns:' in line: returnblock = True mod_lines.append('\n**{}**'.format(line)) elif not argblock and not returnblock: mod_lines.append('{}\n'.format(line)) elif argblock and not returnblock and ':' in line: if GENERATE_ARGS: mod_lines.append('- *{}:* {}\n'.format( *line.split(':'))) elif returnblock: mod_lines.append(line) else: mod_lines.append('{}\n'.format(line)) return mod_lines
[ "def", "format_doc", "(", "fun", ")", ":", "SEPARATOR", "=", "'============================='", "func", "=", "cvloop", ".", "functions", ".", "__dict__", "[", "fun", "]", "doc_lines", "=", "[", "'{}'", ".", "format", "(", "l", ")", ".", "strip", "(", ")", "for", "l", "in", "func", ".", "__doc__", ".", "split", "(", "'\\n'", ")", "]", "if", "hasattr", "(", "func", ",", "'__init__'", ")", ":", "doc_lines", ".", "append", "(", "SEPARATOR", ")", "doc_lines", "+=", "[", "'{}'", ".", "format", "(", "l", ")", ".", "strip", "(", ")", "for", "l", "in", "func", ".", "__init__", ".", "__doc__", ".", "split", "(", "'\\n'", ")", "]", "mod_lines", "=", "[", "]", "argblock", "=", "False", "returnblock", "=", "False", "for", "line", "in", "doc_lines", ":", "if", "line", "==", "SEPARATOR", ":", "mod_lines", ".", "append", "(", "'\\n#### `{}.__init__(...)`:\\n\\n'", ".", "format", "(", "fun", ")", ")", "elif", "'Args:'", "in", "line", ":", "argblock", "=", "True", "if", "GENERATE_ARGS", ":", "mod_lines", ".", "append", "(", "'**{}**\\n'", ".", "format", "(", "line", ")", ")", "elif", "'Returns:'", "in", "line", ":", "returnblock", "=", "True", "mod_lines", ".", "append", "(", "'\\n**{}**'", ".", "format", "(", "line", ")", ")", "elif", "not", "argblock", "and", "not", "returnblock", ":", "mod_lines", ".", "append", "(", "'{}\\n'", ".", "format", "(", "line", ")", ")", "elif", "argblock", "and", "not", "returnblock", "and", "':'", "in", "line", ":", "if", "GENERATE_ARGS", ":", "mod_lines", ".", "append", "(", "'- *{}:* {}\\n'", ".", "format", "(", "*", "line", ".", "split", "(", "':'", ")", ")", ")", "elif", "returnblock", ":", "mod_lines", ".", "append", "(", "line", ")", "else", ":", "mod_lines", ".", "append", "(", "'{}\\n'", ".", "format", "(", "line", ")", ")", "return", "mod_lines" ]
Formats the documentation in a nicer way and for notebook cells.
[ "Formats", "the", "documentation", "in", "a", "nicer", "way", "and", "for", "notebook", "cells", "." ]
train
https://github.com/shoeffner/cvloop/blob/3ddd311e9b679d16c8fd36779931380374de343c/tools/create_functions_ipynb.py#L87-L121
shoeffner/cvloop
tools/create_functions_ipynb.py
main
def main(): """Main function creates the cvloop.functions example notebook.""" notebook = { 'cells': [ { 'cell_type': 'markdown', 'metadata': {}, 'source': [ '# cvloop functions\n\n', 'This notebook shows an overview over all cvloop ', 'functions provided in the [`cvloop.functions` module](', 'https://github.com/shoeffner/cvloop/blob/', 'develop/cvloop/functions.py).' ] }, ], 'nbformat': 4, 'nbformat_minor': 1, 'metadata': { 'language_info': { 'codemirror_mode': { 'name': 'ipython', 'version': 3 }, 'file_extension': '.py', 'mimetype': 'text/x-python', 'name': 'python', 'nbconvert_exporter': 'python', 'pygments_lexer': 'ipython3', 'version': '3.5.1+' } } } classes = list_classes('cvloop.functions') functions = list_functions('cvloop.functions') line_numbers_cls = get_linenumbers(classes, cvloop.functions, 'class {}:\n') line_numbers = get_linenumbers(functions, cvloop.functions) for cls in classes: line_number = line_numbers_cls[cls] notebook['cells'].append(create_description_cell(cls, line_number)) notebook['cells'].append(create_code_cell(cls, isclass=True)) for func in functions: line_number = line_numbers[func] notebook['cells'].append(create_description_cell(func, line_number)) notebook['cells'].append(create_code_cell(func)) with open(sys.argv[1], 'w') as nfile: json.dump(notebook, nfile, indent=4)
python
def main(): """Main function creates the cvloop.functions example notebook.""" notebook = { 'cells': [ { 'cell_type': 'markdown', 'metadata': {}, 'source': [ '# cvloop functions\n\n', 'This notebook shows an overview over all cvloop ', 'functions provided in the [`cvloop.functions` module](', 'https://github.com/shoeffner/cvloop/blob/', 'develop/cvloop/functions.py).' ] }, ], 'nbformat': 4, 'nbformat_minor': 1, 'metadata': { 'language_info': { 'codemirror_mode': { 'name': 'ipython', 'version': 3 }, 'file_extension': '.py', 'mimetype': 'text/x-python', 'name': 'python', 'nbconvert_exporter': 'python', 'pygments_lexer': 'ipython3', 'version': '3.5.1+' } } } classes = list_classes('cvloop.functions') functions = list_functions('cvloop.functions') line_numbers_cls = get_linenumbers(classes, cvloop.functions, 'class {}:\n') line_numbers = get_linenumbers(functions, cvloop.functions) for cls in classes: line_number = line_numbers_cls[cls] notebook['cells'].append(create_description_cell(cls, line_number)) notebook['cells'].append(create_code_cell(cls, isclass=True)) for func in functions: line_number = line_numbers[func] notebook['cells'].append(create_description_cell(func, line_number)) notebook['cells'].append(create_code_cell(func)) with open(sys.argv[1], 'w') as nfile: json.dump(notebook, nfile, indent=4)
[ "def", "main", "(", ")", ":", "notebook", "=", "{", "'cells'", ":", "[", "{", "'cell_type'", ":", "'markdown'", ",", "'metadata'", ":", "{", "}", ",", "'source'", ":", "[", "'# cvloop functions\\n\\n'", ",", "'This notebook shows an overview over all cvloop '", ",", "'functions provided in the [`cvloop.functions` module]('", ",", "'https://github.com/shoeffner/cvloop/blob/'", ",", "'develop/cvloop/functions.py).'", "]", "}", ",", "]", ",", "'nbformat'", ":", "4", ",", "'nbformat_minor'", ":", "1", ",", "'metadata'", ":", "{", "'language_info'", ":", "{", "'codemirror_mode'", ":", "{", "'name'", ":", "'ipython'", ",", "'version'", ":", "3", "}", ",", "'file_extension'", ":", "'.py'", ",", "'mimetype'", ":", "'text/x-python'", ",", "'name'", ":", "'python'", ",", "'nbconvert_exporter'", ":", "'python'", ",", "'pygments_lexer'", ":", "'ipython3'", ",", "'version'", ":", "'3.5.1+'", "}", "}", "}", "classes", "=", "list_classes", "(", "'cvloop.functions'", ")", "functions", "=", "list_functions", "(", "'cvloop.functions'", ")", "line_numbers_cls", "=", "get_linenumbers", "(", "classes", ",", "cvloop", ".", "functions", ",", "'class {}:\\n'", ")", "line_numbers", "=", "get_linenumbers", "(", "functions", ",", "cvloop", ".", "functions", ")", "for", "cls", "in", "classes", ":", "line_number", "=", "line_numbers_cls", "[", "cls", "]", "notebook", "[", "'cells'", "]", ".", "append", "(", "create_description_cell", "(", "cls", ",", "line_number", ")", ")", "notebook", "[", "'cells'", "]", ".", "append", "(", "create_code_cell", "(", "cls", ",", "isclass", "=", "True", ")", ")", "for", "func", "in", "functions", ":", "line_number", "=", "line_numbers", "[", "func", "]", "notebook", "[", "'cells'", "]", ".", "append", "(", "create_description_cell", "(", "func", ",", "line_number", ")", ")", "notebook", "[", "'cells'", "]", ".", "append", "(", "create_code_cell", "(", "func", ")", ")", "with", "open", "(", "sys", ".", "argv", "[", "1", "]", ",", "'w'", ")", "as", "nfile", ":", "json", ".", "dump", "(", "notebook", ",", "nfile", ",", "indent", "=", "4", ")" ]
Main function creates the cvloop.functions example notebook.
[ "Main", "function", "creates", "the", "cvloop", ".", "functions", "example", "notebook", "." ]
train
https://github.com/shoeffner/cvloop/blob/3ddd311e9b679d16c8fd36779931380374de343c/tools/create_functions_ipynb.py#L161-L212
shoeffner/cvloop
cvloop/cvloop.py
prepare_axes
def prepare_axes(axes, title, size, cmap=None): """Prepares an axes object for clean plotting. Removes x and y axes labels and ticks, sets the aspect ratio to be equal, uses the size to determine the drawing area and fills the image with random colors as visual feedback. Creates an AxesImage to be shown inside the axes object and sets the needed properties. Args: axes: The axes object to modify. title: The title. size: The size of the expected image. cmap: The colormap if a custom color map is needed. (Default: None) Returns: The AxesImage's handle. """ if axes is None: return None # prepare axis itself axes.set_xlim([0, size[1]]) axes.set_ylim([size[0], 0]) axes.set_aspect('equal') axes.axis('off') if isinstance(cmap, str): title = '{} (cmap: {})'.format(title, cmap) axes.set_title(title) # prepare image data axes_image = image.AxesImage(axes, cmap=cmap, extent=(0, size[1], size[0], 0)) axes_image.set_data(np.random.random((size[0], size[1], 3))) axes.add_image(axes_image) return axes_image
python
def prepare_axes(axes, title, size, cmap=None): """Prepares an axes object for clean plotting. Removes x and y axes labels and ticks, sets the aspect ratio to be equal, uses the size to determine the drawing area and fills the image with random colors as visual feedback. Creates an AxesImage to be shown inside the axes object and sets the needed properties. Args: axes: The axes object to modify. title: The title. size: The size of the expected image. cmap: The colormap if a custom color map is needed. (Default: None) Returns: The AxesImage's handle. """ if axes is None: return None # prepare axis itself axes.set_xlim([0, size[1]]) axes.set_ylim([size[0], 0]) axes.set_aspect('equal') axes.axis('off') if isinstance(cmap, str): title = '{} (cmap: {})'.format(title, cmap) axes.set_title(title) # prepare image data axes_image = image.AxesImage(axes, cmap=cmap, extent=(0, size[1], size[0], 0)) axes_image.set_data(np.random.random((size[0], size[1], 3))) axes.add_image(axes_image) return axes_image
[ "def", "prepare_axes", "(", "axes", ",", "title", ",", "size", ",", "cmap", "=", "None", ")", ":", "if", "axes", "is", "None", ":", "return", "None", "# prepare axis itself", "axes", ".", "set_xlim", "(", "[", "0", ",", "size", "[", "1", "]", "]", ")", "axes", ".", "set_ylim", "(", "[", "size", "[", "0", "]", ",", "0", "]", ")", "axes", ".", "set_aspect", "(", "'equal'", ")", "axes", ".", "axis", "(", "'off'", ")", "if", "isinstance", "(", "cmap", ",", "str", ")", ":", "title", "=", "'{} (cmap: {})'", ".", "format", "(", "title", ",", "cmap", ")", "axes", ".", "set_title", "(", "title", ")", "# prepare image data", "axes_image", "=", "image", ".", "AxesImage", "(", "axes", ",", "cmap", "=", "cmap", ",", "extent", "=", "(", "0", ",", "size", "[", "1", "]", ",", "size", "[", "0", "]", ",", "0", ")", ")", "axes_image", ".", "set_data", "(", "np", ".", "random", ".", "random", "(", "(", "size", "[", "0", "]", ",", "size", "[", "1", "]", ",", "3", ")", ")", ")", "axes", ".", "add_image", "(", "axes_image", ")", "return", "axes_image" ]
Prepares an axes object for clean plotting. Removes x and y axes labels and ticks, sets the aspect ratio to be equal, uses the size to determine the drawing area and fills the image with random colors as visual feedback. Creates an AxesImage to be shown inside the axes object and sets the needed properties. Args: axes: The axes object to modify. title: The title. size: The size of the expected image. cmap: The colormap if a custom color map is needed. (Default: None) Returns: The AxesImage's handle.
[ "Prepares", "an", "axes", "object", "for", "clean", "plotting", "." ]
train
https://github.com/shoeffner/cvloop/blob/3ddd311e9b679d16c8fd36779931380374de343c/cvloop/cvloop.py#L29-L67
shoeffner/cvloop
cvloop/cvloop.py
cvloop.connect_event_handlers
def connect_event_handlers(self): """Connects event handlers to the figure.""" self.figure.canvas.mpl_connect('close_event', self.evt_release) self.figure.canvas.mpl_connect('pause_event', self.evt_toggle_pause)
python
def connect_event_handlers(self): """Connects event handlers to the figure.""" self.figure.canvas.mpl_connect('close_event', self.evt_release) self.figure.canvas.mpl_connect('pause_event', self.evt_toggle_pause)
[ "def", "connect_event_handlers", "(", "self", ")", ":", "self", ".", "figure", ".", "canvas", ".", "mpl_connect", "(", "'close_event'", ",", "self", ".", "evt_release", ")", "self", ".", "figure", ".", "canvas", ".", "mpl_connect", "(", "'pause_event'", ",", "self", ".", "evt_toggle_pause", ")" ]
Connects event handlers to the figure.
[ "Connects", "event", "handlers", "to", "the", "figure", "." ]
train
https://github.com/shoeffner/cvloop/blob/3ddd311e9b679d16c8fd36779931380374de343c/cvloop/cvloop.py#L237-L240
shoeffner/cvloop
cvloop/cvloop.py
cvloop.evt_toggle_pause
def evt_toggle_pause(self, *args): # pylint: disable=unused-argument """Pauses and resumes the video source.""" if self.event_source._timer is None: # noqa: e501 pylint: disable=protected-access self.event_source.start() else: self.event_source.stop()
python
def evt_toggle_pause(self, *args): # pylint: disable=unused-argument """Pauses and resumes the video source.""" if self.event_source._timer is None: # noqa: e501 pylint: disable=protected-access self.event_source.start() else: self.event_source.stop()
[ "def", "evt_toggle_pause", "(", "self", ",", "*", "args", ")", ":", "# pylint: disable=unused-argument", "if", "self", ".", "event_source", ".", "_timer", "is", "None", ":", "# noqa: e501 pylint: disable=protected-access", "self", ".", "event_source", ".", "start", "(", ")", "else", ":", "self", ".", "event_source", ".", "stop", "(", ")" ]
Pauses and resumes the video source.
[ "Pauses", "and", "resumes", "the", "video", "source", "." ]
train
https://github.com/shoeffner/cvloop/blob/3ddd311e9b679d16c8fd36779931380374de343c/cvloop/cvloop.py#L249-L254
shoeffner/cvloop
cvloop/cvloop.py
cvloop.print_info
def print_info(self, capture): """Prints information about the unprocessed image. Reads one frame from the source to determine image colors, dimensions and data types. Args: capture: the source to read from. """ self.frame_offset += 1 ret, frame = capture.read() if ret: print('Capture Information') print('\tDimensions (HxW): {}x{}'.format(*frame.shape[0:2])) print('\tColor channels: {}'.format(frame.shape[2] if len(frame.shape) > 2 else 1)) print('\tColor range: {}-{}'.format(np.min(frame), np.max(frame))) print('\tdtype: {}'.format(frame.dtype)) else: print('No source found.')
python
def print_info(self, capture): """Prints information about the unprocessed image. Reads one frame from the source to determine image colors, dimensions and data types. Args: capture: the source to read from. """ self.frame_offset += 1 ret, frame = capture.read() if ret: print('Capture Information') print('\tDimensions (HxW): {}x{}'.format(*frame.shape[0:2])) print('\tColor channels: {}'.format(frame.shape[2] if len(frame.shape) > 2 else 1)) print('\tColor range: {}-{}'.format(np.min(frame), np.max(frame))) print('\tdtype: {}'.format(frame.dtype)) else: print('No source found.')
[ "def", "print_info", "(", "self", ",", "capture", ")", ":", "self", ".", "frame_offset", "+=", "1", "ret", ",", "frame", "=", "capture", ".", "read", "(", ")", "if", "ret", ":", "print", "(", "'Capture Information'", ")", "print", "(", "'\\tDimensions (HxW): {}x{}'", ".", "format", "(", "*", "frame", ".", "shape", "[", "0", ":", "2", "]", ")", ")", "print", "(", "'\\tColor channels: {}'", ".", "format", "(", "frame", ".", "shape", "[", "2", "]", "if", "len", "(", "frame", ".", "shape", ")", ">", "2", "else", "1", ")", ")", "print", "(", "'\\tColor range: {}-{}'", ".", "format", "(", "np", ".", "min", "(", "frame", ")", ",", "np", ".", "max", "(", "frame", ")", ")", ")", "print", "(", "'\\tdtype: {}'", ".", "format", "(", "frame", ".", "dtype", ")", ")", "else", ":", "print", "(", "'No source found.'", ")" ]
Prints information about the unprocessed image. Reads one frame from the source to determine image colors, dimensions and data types. Args: capture: the source to read from.
[ "Prints", "information", "about", "the", "unprocessed", "image", "." ]
train
https://github.com/shoeffner/cvloop/blob/3ddd311e9b679d16c8fd36779931380374de343c/cvloop/cvloop.py#L256-L276
shoeffner/cvloop
cvloop/cvloop.py
cvloop.determine_size
def determine_size(self, capture): """Determines the height and width of the image source. If no dimensions are available, this method defaults to a resolution of 640x480, thus returns (480, 640). If capture has a get method it is assumed to understand `cv2.CAP_PROP_FRAME_WIDTH` and `cv2.CAP_PROP_FRAME_HEIGHT` to get the information. Otherwise it reads one frame from the source to determine image dimensions. Args: capture: the source to read from. Returns: A tuple containing integers of height and width (simple casts). """ width = 640 height = 480 if capture and hasattr(capture, 'get'): width = capture.get(cv2.CAP_PROP_FRAME_WIDTH) height = capture.get(cv2.CAP_PROP_FRAME_HEIGHT) else: self.frame_offset += 1 ret, frame = capture.read() if ret: width = frame.shape[1] height = frame.shape[0] return (int(height), int(width))
python
def determine_size(self, capture): """Determines the height and width of the image source. If no dimensions are available, this method defaults to a resolution of 640x480, thus returns (480, 640). If capture has a get method it is assumed to understand `cv2.CAP_PROP_FRAME_WIDTH` and `cv2.CAP_PROP_FRAME_HEIGHT` to get the information. Otherwise it reads one frame from the source to determine image dimensions. Args: capture: the source to read from. Returns: A tuple containing integers of height and width (simple casts). """ width = 640 height = 480 if capture and hasattr(capture, 'get'): width = capture.get(cv2.CAP_PROP_FRAME_WIDTH) height = capture.get(cv2.CAP_PROP_FRAME_HEIGHT) else: self.frame_offset += 1 ret, frame = capture.read() if ret: width = frame.shape[1] height = frame.shape[0] return (int(height), int(width))
[ "def", "determine_size", "(", "self", ",", "capture", ")", ":", "width", "=", "640", "height", "=", "480", "if", "capture", "and", "hasattr", "(", "capture", ",", "'get'", ")", ":", "width", "=", "capture", ".", "get", "(", "cv2", ".", "CAP_PROP_FRAME_WIDTH", ")", "height", "=", "capture", ".", "get", "(", "cv2", ".", "CAP_PROP_FRAME_HEIGHT", ")", "else", ":", "self", ".", "frame_offset", "+=", "1", "ret", ",", "frame", "=", "capture", ".", "read", "(", ")", "if", "ret", ":", "width", "=", "frame", ".", "shape", "[", "1", "]", "height", "=", "frame", ".", "shape", "[", "0", "]", "return", "(", "int", "(", "height", ")", ",", "int", "(", "width", ")", ")" ]
Determines the height and width of the image source. If no dimensions are available, this method defaults to a resolution of 640x480, thus returns (480, 640). If capture has a get method it is assumed to understand `cv2.CAP_PROP_FRAME_WIDTH` and `cv2.CAP_PROP_FRAME_HEIGHT` to get the information. Otherwise it reads one frame from the source to determine image dimensions. Args: capture: the source to read from. Returns: A tuple containing integers of height and width (simple casts).
[ "Determines", "the", "height", "and", "width", "of", "the", "image", "source", "." ]
train
https://github.com/shoeffner/cvloop/blob/3ddd311e9b679d16c8fd36779931380374de343c/cvloop/cvloop.py#L278-L305
shoeffner/cvloop
cvloop/cvloop.py
cvloop._init_draw
def _init_draw(self): """Initializes the drawing of the frames by setting the images to random colors. This function is called by TimedAnimation. """ if self.original is not None: self.original.set_data(np.random.random((10, 10, 3))) self.processed.set_data(np.random.random((10, 10, 3)))
python
def _init_draw(self): """Initializes the drawing of the frames by setting the images to random colors. This function is called by TimedAnimation. """ if self.original is not None: self.original.set_data(np.random.random((10, 10, 3))) self.processed.set_data(np.random.random((10, 10, 3)))
[ "def", "_init_draw", "(", "self", ")", ":", "if", "self", ".", "original", "is", "not", "None", ":", "self", ".", "original", ".", "set_data", "(", "np", ".", "random", ".", "random", "(", "(", "10", ",", "10", ",", "3", ")", ")", ")", "self", ".", "processed", ".", "set_data", "(", "np", ".", "random", ".", "random", "(", "(", "10", ",", "10", ",", "3", ")", ")", ")" ]
Initializes the drawing of the frames by setting the images to random colors. This function is called by TimedAnimation.
[ "Initializes", "the", "drawing", "of", "the", "frames", "by", "setting", "the", "images", "to", "random", "colors", "." ]
train
https://github.com/shoeffner/cvloop/blob/3ddd311e9b679d16c8fd36779931380374de343c/cvloop/cvloop.py#L320-L328
shoeffner/cvloop
cvloop/cvloop.py
cvloop.read_frame
def read_frame(self): """Reads a frame and converts the color if needed. In case no frame is available, i.e. self.capture.read() returns False as the first return value, the event_source of the TimedAnimation is stopped, and if possible the capture source released. Returns: None if stopped, otherwise the color converted source image. """ ret, frame = self.capture.read() if not ret: self.event_source.stop() try: self.capture.release() except AttributeError: # has no release method, thus just pass pass return None if self.convert_color != -1 and is_color_image(frame): return cv2.cvtColor(frame, self.convert_color) return frame
python
def read_frame(self): """Reads a frame and converts the color if needed. In case no frame is available, i.e. self.capture.read() returns False as the first return value, the event_source of the TimedAnimation is stopped, and if possible the capture source released. Returns: None if stopped, otherwise the color converted source image. """ ret, frame = self.capture.read() if not ret: self.event_source.stop() try: self.capture.release() except AttributeError: # has no release method, thus just pass pass return None if self.convert_color != -1 and is_color_image(frame): return cv2.cvtColor(frame, self.convert_color) return frame
[ "def", "read_frame", "(", "self", ")", ":", "ret", ",", "frame", "=", "self", ".", "capture", ".", "read", "(", ")", "if", "not", "ret", ":", "self", ".", "event_source", ".", "stop", "(", ")", "try", ":", "self", ".", "capture", ".", "release", "(", ")", "except", "AttributeError", ":", "# has no release method, thus just pass", "pass", "return", "None", "if", "self", ".", "convert_color", "!=", "-", "1", "and", "is_color_image", "(", "frame", ")", ":", "return", "cv2", ".", "cvtColor", "(", "frame", ",", "self", ".", "convert_color", ")", "return", "frame" ]
Reads a frame and converts the color if needed. In case no frame is available, i.e. self.capture.read() returns False as the first return value, the event_source of the TimedAnimation is stopped, and if possible the capture source released. Returns: None if stopped, otherwise the color converted source image.
[ "Reads", "a", "frame", "and", "converts", "the", "color", "if", "needed", "." ]
train
https://github.com/shoeffner/cvloop/blob/3ddd311e9b679d16c8fd36779931380374de343c/cvloop/cvloop.py#L330-L351
shoeffner/cvloop
cvloop/cvloop.py
cvloop.annotate
def annotate(self, framedata): """Annotates the processed axis with given annotations for the provided framedata. Args: framedata: The current frame number. """ for artist in self.annotation_artists: artist.remove() self.annotation_artists = [] for annotation in self.annotations: if annotation[2] > framedata: return if annotation[2] == framedata: pos = annotation[0:2] shape = self.annotations_default['shape'] color = self.annotations_default['color'] size = self.annotations_default['size'] line = self.annotations_default['line'] if len(annotation) > 3: shape = annotation[3].get('shape', shape) color = annotation[3].get('color', color) size = annotation[3].get('size', size) line = annotation[3].get('line', line) if shape == 'CIRC' and hasattr(size, '__len__'): size = 30 if not hasattr(color, '__len__'): color = (color,) * 3 if shape == 'RECT': patch = patches.Rectangle((pos[0] - size[0] // 2, pos[1] - size[1] // 2), size[0], size[1], fill=False, lw=line, fc='none', ec=color) elif shape == 'CIRC': patch = patches.CirclePolygon(pos, radius=size, fc='none', ec=color, lw=line) self.annotation_artists.append(patch) self.axes_processed.add_artist(self.annotation_artists[-1])
python
def annotate(self, framedata): """Annotates the processed axis with given annotations for the provided framedata. Args: framedata: The current frame number. """ for artist in self.annotation_artists: artist.remove() self.annotation_artists = [] for annotation in self.annotations: if annotation[2] > framedata: return if annotation[2] == framedata: pos = annotation[0:2] shape = self.annotations_default['shape'] color = self.annotations_default['color'] size = self.annotations_default['size'] line = self.annotations_default['line'] if len(annotation) > 3: shape = annotation[3].get('shape', shape) color = annotation[3].get('color', color) size = annotation[3].get('size', size) line = annotation[3].get('line', line) if shape == 'CIRC' and hasattr(size, '__len__'): size = 30 if not hasattr(color, '__len__'): color = (color,) * 3 if shape == 'RECT': patch = patches.Rectangle((pos[0] - size[0] // 2, pos[1] - size[1] // 2), size[0], size[1], fill=False, lw=line, fc='none', ec=color) elif shape == 'CIRC': patch = patches.CirclePolygon(pos, radius=size, fc='none', ec=color, lw=line) self.annotation_artists.append(patch) self.axes_processed.add_artist(self.annotation_artists[-1])
[ "def", "annotate", "(", "self", ",", "framedata", ")", ":", "for", "artist", "in", "self", ".", "annotation_artists", ":", "artist", ".", "remove", "(", ")", "self", ".", "annotation_artists", "=", "[", "]", "for", "annotation", "in", "self", ".", "annotations", ":", "if", "annotation", "[", "2", "]", ">", "framedata", ":", "return", "if", "annotation", "[", "2", "]", "==", "framedata", ":", "pos", "=", "annotation", "[", "0", ":", "2", "]", "shape", "=", "self", ".", "annotations_default", "[", "'shape'", "]", "color", "=", "self", ".", "annotations_default", "[", "'color'", "]", "size", "=", "self", ".", "annotations_default", "[", "'size'", "]", "line", "=", "self", ".", "annotations_default", "[", "'line'", "]", "if", "len", "(", "annotation", ")", ">", "3", ":", "shape", "=", "annotation", "[", "3", "]", ".", "get", "(", "'shape'", ",", "shape", ")", "color", "=", "annotation", "[", "3", "]", ".", "get", "(", "'color'", ",", "color", ")", "size", "=", "annotation", "[", "3", "]", ".", "get", "(", "'size'", ",", "size", ")", "line", "=", "annotation", "[", "3", "]", ".", "get", "(", "'line'", ",", "line", ")", "if", "shape", "==", "'CIRC'", "and", "hasattr", "(", "size", ",", "'__len__'", ")", ":", "size", "=", "30", "if", "not", "hasattr", "(", "color", ",", "'__len__'", ")", ":", "color", "=", "(", "color", ",", ")", "*", "3", "if", "shape", "==", "'RECT'", ":", "patch", "=", "patches", ".", "Rectangle", "(", "(", "pos", "[", "0", "]", "-", "size", "[", "0", "]", "//", "2", ",", "pos", "[", "1", "]", "-", "size", "[", "1", "]", "//", "2", ")", ",", "size", "[", "0", "]", ",", "size", "[", "1", "]", ",", "fill", "=", "False", ",", "lw", "=", "line", ",", "fc", "=", "'none'", ",", "ec", "=", "color", ")", "elif", "shape", "==", "'CIRC'", ":", "patch", "=", "patches", ".", "CirclePolygon", "(", "pos", ",", "radius", "=", "size", ",", "fc", "=", "'none'", ",", "ec", "=", "color", ",", "lw", "=", "line", ")", "self", ".", "annotation_artists", ".", "append", "(", "patch", ")", "self", ".", "axes_processed", ".", "add_artist", "(", "self", ".", "annotation_artists", "[", "-", "1", "]", ")" ]
Annotates the processed axis with given annotations for the provided framedata. Args: framedata: The current frame number.
[ "Annotates", "the", "processed", "axis", "with", "given", "annotations", "for", "the", "provided", "framedata", "." ]
train
https://github.com/shoeffner/cvloop/blob/3ddd311e9b679d16c8fd36779931380374de343c/cvloop/cvloop.py#L364-L403
shoeffner/cvloop
cvloop/cvloop.py
cvloop._draw_frame
def _draw_frame(self, framedata): """Reads, processes and draws the frames. If needed for color maps, conversions to gray scale are performed. In case the images are no color images and no custom color maps are defined, the colormap `gray` is applied. This function is called by TimedAnimation. Args: framedata: The frame data. """ original = self.read_frame() if original is None: self.update_info(self.info_string(message='Finished.', frame=framedata)) return if self.original is not None: processed = self.process_frame(original.copy()) if self.cmap_original is not None: original = to_gray(original) elif not is_color_image(original): self.original.set_cmap('gray') self.original.set_data(original) else: processed = self.process_frame(original) if self.cmap_processed is not None: processed = to_gray(processed) elif not is_color_image(processed): self.processed.set_cmap('gray') if self.annotations: self.annotate(framedata) self.processed.set_data(processed) self.update_info(self.info_string(frame=framedata))
python
def _draw_frame(self, framedata): """Reads, processes and draws the frames. If needed for color maps, conversions to gray scale are performed. In case the images are no color images and no custom color maps are defined, the colormap `gray` is applied. This function is called by TimedAnimation. Args: framedata: The frame data. """ original = self.read_frame() if original is None: self.update_info(self.info_string(message='Finished.', frame=framedata)) return if self.original is not None: processed = self.process_frame(original.copy()) if self.cmap_original is not None: original = to_gray(original) elif not is_color_image(original): self.original.set_cmap('gray') self.original.set_data(original) else: processed = self.process_frame(original) if self.cmap_processed is not None: processed = to_gray(processed) elif not is_color_image(processed): self.processed.set_cmap('gray') if self.annotations: self.annotate(framedata) self.processed.set_data(processed) self.update_info(self.info_string(frame=framedata))
[ "def", "_draw_frame", "(", "self", ",", "framedata", ")", ":", "original", "=", "self", ".", "read_frame", "(", ")", "if", "original", "is", "None", ":", "self", ".", "update_info", "(", "self", ".", "info_string", "(", "message", "=", "'Finished.'", ",", "frame", "=", "framedata", ")", ")", "return", "if", "self", ".", "original", "is", "not", "None", ":", "processed", "=", "self", ".", "process_frame", "(", "original", ".", "copy", "(", ")", ")", "if", "self", ".", "cmap_original", "is", "not", "None", ":", "original", "=", "to_gray", "(", "original", ")", "elif", "not", "is_color_image", "(", "original", ")", ":", "self", ".", "original", ".", "set_cmap", "(", "'gray'", ")", "self", ".", "original", ".", "set_data", "(", "original", ")", "else", ":", "processed", "=", "self", ".", "process_frame", "(", "original", ")", "if", "self", ".", "cmap_processed", "is", "not", "None", ":", "processed", "=", "to_gray", "(", "processed", ")", "elif", "not", "is_color_image", "(", "processed", ")", ":", "self", ".", "processed", ".", "set_cmap", "(", "'gray'", ")", "if", "self", ".", "annotations", ":", "self", ".", "annotate", "(", "framedata", ")", "self", ".", "processed", ".", "set_data", "(", "processed", ")", "self", ".", "update_info", "(", "self", ".", "info_string", "(", "frame", "=", "framedata", ")", ")" ]
Reads, processes and draws the frames. If needed for color maps, conversions to gray scale are performed. In case the images are no color images and no custom color maps are defined, the colormap `gray` is applied. This function is called by TimedAnimation. Args: framedata: The frame data.
[ "Reads", "processes", "and", "draws", "the", "frames", "." ]
train
https://github.com/shoeffner/cvloop/blob/3ddd311e9b679d16c8fd36779931380374de343c/cvloop/cvloop.py#L405-L444
shoeffner/cvloop
cvloop/cvloop.py
cvloop.update_info
def update_info(self, custom=None): """Updates the figure's suptitle. Calls self.info_string() unless custom is provided. Args: custom: Overwrite it with this string, unless None. """ self.figure.suptitle(self.info_string() if custom is None else custom)
python
def update_info(self, custom=None): """Updates the figure's suptitle. Calls self.info_string() unless custom is provided. Args: custom: Overwrite it with this string, unless None. """ self.figure.suptitle(self.info_string() if custom is None else custom)
[ "def", "update_info", "(", "self", ",", "custom", "=", "None", ")", ":", "self", ".", "figure", ".", "suptitle", "(", "self", ".", "info_string", "(", ")", "if", "custom", "is", "None", "else", "custom", ")" ]
Updates the figure's suptitle. Calls self.info_string() unless custom is provided. Args: custom: Overwrite it with this string, unless None.
[ "Updates", "the", "figure", "s", "suptitle", "." ]
train
https://github.com/shoeffner/cvloop/blob/3ddd311e9b679d16c8fd36779931380374de343c/cvloop/cvloop.py#L446-L454
shoeffner/cvloop
cvloop/cvloop.py
cvloop.info_string
def info_string(self, size=None, message='', frame=-1): """Returns information about the stream. Generates a string containing size, frame number, and info messages. Omits unnecessary information (e.g. empty messages and frame -1). This method is primarily used to update the suptitle of the plot figure. Returns: An info string. """ info = [] if size is not None: info.append('Size: {1}x{0}'.format(*size)) elif self.size is not None: info.append('Size: {1}x{0}'.format(*self.size)) if frame >= 0: info.append('Frame: {}'.format(frame)) if message != '': info.append('{}'.format(message)) return ' '.join(info)
python
def info_string(self, size=None, message='', frame=-1): """Returns information about the stream. Generates a string containing size, frame number, and info messages. Omits unnecessary information (e.g. empty messages and frame -1). This method is primarily used to update the suptitle of the plot figure. Returns: An info string. """ info = [] if size is not None: info.append('Size: {1}x{0}'.format(*size)) elif self.size is not None: info.append('Size: {1}x{0}'.format(*self.size)) if frame >= 0: info.append('Frame: {}'.format(frame)) if message != '': info.append('{}'.format(message)) return ' '.join(info)
[ "def", "info_string", "(", "self", ",", "size", "=", "None", ",", "message", "=", "''", ",", "frame", "=", "-", "1", ")", ":", "info", "=", "[", "]", "if", "size", "is", "not", "None", ":", "info", ".", "append", "(", "'Size: {1}x{0}'", ".", "format", "(", "*", "size", ")", ")", "elif", "self", ".", "size", "is", "not", "None", ":", "info", ".", "append", "(", "'Size: {1}x{0}'", ".", "format", "(", "*", "self", ".", "size", ")", ")", "if", "frame", ">=", "0", ":", "info", ".", "append", "(", "'Frame: {}'", ".", "format", "(", "frame", ")", ")", "if", "message", "!=", "''", ":", "info", ".", "append", "(", "'{}'", ".", "format", "(", "message", ")", ")", "return", "' '", ".", "join", "(", "info", ")" ]
Returns information about the stream. Generates a string containing size, frame number, and info messages. Omits unnecessary information (e.g. empty messages and frame -1). This method is primarily used to update the suptitle of the plot figure. Returns: An info string.
[ "Returns", "information", "about", "the", "stream", "." ]
train
https://github.com/shoeffner/cvloop/blob/3ddd311e9b679d16c8fd36779931380374de343c/cvloop/cvloop.py#L456-L477
shoeffner/cvloop
tools/sanitize_ipynb.py
main
def main(): """Sanitizes the loaded *.ipynb.""" with open(sys.argv[1], 'r') as nbfile: notebook = json.load(nbfile) # remove kernelspec (venvs) try: del notebook['metadata']['kernelspec'] except KeyError: pass # remove outputs and metadata, set execution counts to None for cell in notebook['cells']: try: if cell['cell_type'] == 'code': cell['outputs'] = [] cell['execution_count'] = None cell['metadata'] = {} except KeyError: pass with open(sys.argv[1], 'w') as nbfile: json.dump(notebook, nbfile, indent=1)
python
def main(): """Sanitizes the loaded *.ipynb.""" with open(sys.argv[1], 'r') as nbfile: notebook = json.load(nbfile) # remove kernelspec (venvs) try: del notebook['metadata']['kernelspec'] except KeyError: pass # remove outputs and metadata, set execution counts to None for cell in notebook['cells']: try: if cell['cell_type'] == 'code': cell['outputs'] = [] cell['execution_count'] = None cell['metadata'] = {} except KeyError: pass with open(sys.argv[1], 'w') as nbfile: json.dump(notebook, nbfile, indent=1)
[ "def", "main", "(", ")", ":", "with", "open", "(", "sys", ".", "argv", "[", "1", "]", ",", "'r'", ")", "as", "nbfile", ":", "notebook", "=", "json", ".", "load", "(", "nbfile", ")", "# remove kernelspec (venvs)", "try", ":", "del", "notebook", "[", "'metadata'", "]", "[", "'kernelspec'", "]", "except", "KeyError", ":", "pass", "# remove outputs and metadata, set execution counts to None", "for", "cell", "in", "notebook", "[", "'cells'", "]", ":", "try", ":", "if", "cell", "[", "'cell_type'", "]", "==", "'code'", ":", "cell", "[", "'outputs'", "]", "=", "[", "]", "cell", "[", "'execution_count'", "]", "=", "None", "cell", "[", "'metadata'", "]", "=", "{", "}", "except", "KeyError", ":", "pass", "with", "open", "(", "sys", ".", "argv", "[", "1", "]", ",", "'w'", ")", "as", "nbfile", ":", "json", ".", "dump", "(", "notebook", ",", "nbfile", ",", "indent", "=", "1", ")" ]
Sanitizes the loaded *.ipynb.
[ "Sanitizes", "the", "loaded", "*", ".", "ipynb", "." ]
train
https://github.com/shoeffner/cvloop/blob/3ddd311e9b679d16c8fd36779931380374de343c/tools/sanitize_ipynb.py#L8-L30
icoxfog417/pykintone
pykintone/comment_api.py
CommentAPI.create
def create(self, comment, mentions=()): """ create comment :param comment: :param mentions: list of pair of code and type("USER", "GROUP", and so on) :return: """ data = { "app": self.app_id, "record": self.record_id, "comment": { "text": comment, } } if len(mentions) > 0: _mentions = [] for m in mentions: if isinstance(m, (list, tuple)): if len(m) == 2: _mentions.append({ "code": m[0], "type": m[1] }) else: raise Exception("mention have to have code and target type. ex.[('user_1', 'USER')]") elif isinstance(m, Mention): _mentions.append(m.serialize()) data["comment"]["mentions"] = _mentions resp = self._request("POST", self._url, data) r = cr.CreateCommentResult(resp) return r
python
def create(self, comment, mentions=()): """ create comment :param comment: :param mentions: list of pair of code and type("USER", "GROUP", and so on) :return: """ data = { "app": self.app_id, "record": self.record_id, "comment": { "text": comment, } } if len(mentions) > 0: _mentions = [] for m in mentions: if isinstance(m, (list, tuple)): if len(m) == 2: _mentions.append({ "code": m[0], "type": m[1] }) else: raise Exception("mention have to have code and target type. ex.[('user_1', 'USER')]") elif isinstance(m, Mention): _mentions.append(m.serialize()) data["comment"]["mentions"] = _mentions resp = self._request("POST", self._url, data) r = cr.CreateCommentResult(resp) return r
[ "def", "create", "(", "self", ",", "comment", ",", "mentions", "=", "(", ")", ")", ":", "data", "=", "{", "\"app\"", ":", "self", ".", "app_id", ",", "\"record\"", ":", "self", ".", "record_id", ",", "\"comment\"", ":", "{", "\"text\"", ":", "comment", ",", "}", "}", "if", "len", "(", "mentions", ")", ">", "0", ":", "_mentions", "=", "[", "]", "for", "m", "in", "mentions", ":", "if", "isinstance", "(", "m", ",", "(", "list", ",", "tuple", ")", ")", ":", "if", "len", "(", "m", ")", "==", "2", ":", "_mentions", ".", "append", "(", "{", "\"code\"", ":", "m", "[", "0", "]", ",", "\"type\"", ":", "m", "[", "1", "]", "}", ")", "else", ":", "raise", "Exception", "(", "\"mention have to have code and target type. ex.[('user_1', 'USER')]\"", ")", "elif", "isinstance", "(", "m", ",", "Mention", ")", ":", "_mentions", ".", "append", "(", "m", ".", "serialize", "(", ")", ")", "data", "[", "\"comment\"", "]", "[", "\"mentions\"", "]", "=", "_mentions", "resp", "=", "self", ".", "_request", "(", "\"POST\"", ",", "self", ".", "_url", ",", "data", ")", "r", "=", "cr", ".", "CreateCommentResult", "(", "resp", ")", "return", "r" ]
create comment :param comment: :param mentions: list of pair of code and type("USER", "GROUP", and so on) :return:
[ "create", "comment", ":", "param", "comment", ":", ":", "param", "mentions", ":", "list", "of", "pair", "of", "code", "and", "type", "(", "USER", "GROUP", "and", "so", "on", ")", ":", "return", ":" ]
train
https://github.com/icoxfog417/pykintone/blob/756609fc956fc784325d58cc01473a67a640654c/pykintone/comment_api.py#L42-L76
uber-archive/h1-python
h1/lazy_listing.py
_consume
def _consume(iterator, n=None): """Advance the iterator n-steps ahead. If n is none, consume entirely.""" # Use functions that consume iterators at C speed. if n is None: # feed the entire iterator into a zero-length deque collections.deque(iterator, maxlen=0) else: # advance to the empty slice starting at position n next(itertools.islice(iterator, n, n), None)
python
def _consume(iterator, n=None): """Advance the iterator n-steps ahead. If n is none, consume entirely.""" # Use functions that consume iterators at C speed. if n is None: # feed the entire iterator into a zero-length deque collections.deque(iterator, maxlen=0) else: # advance to the empty slice starting at position n next(itertools.islice(iterator, n, n), None)
[ "def", "_consume", "(", "iterator", ",", "n", "=", "None", ")", ":", "# Use functions that consume iterators at C speed.", "if", "n", "is", "None", ":", "# feed the entire iterator into a zero-length deque", "collections", ".", "deque", "(", "iterator", ",", "maxlen", "=", "0", ")", "else", ":", "# advance to the empty slice starting at position n", "next", "(", "itertools", ".", "islice", "(", "iterator", ",", "n", ",", "n", ")", ",", "None", ")" ]
Advance the iterator n-steps ahead. If n is none, consume entirely.
[ "Advance", "the", "iterator", "n", "-", "steps", "ahead", ".", "If", "n", "is", "none", "consume", "entirely", "." ]
train
https://github.com/uber-archive/h1-python/blob/c91aec6a26887e453106af39e96ec6d5c7b00c9d/h1/lazy_listing.py#L31-L39
uber-archive/h1-python
h1/lazy_listing.py
_slice_required_len
def _slice_required_len(slice_obj): """ Calculate how many items must be in the collection to satisfy this slice returns `None` for slices may vary based on the length of the underlying collection such as `lst[-1]` or `lst[::]` """ if slice_obj.step and slice_obj.step != 1: return None # (None, None, *) requires the entire list if slice_obj.start is None and slice_obj.stop is None: return None # Negative indexes are hard without knowing the collection length if slice_obj.start and slice_obj.start < 0: return None if slice_obj.stop and slice_obj.stop < 0: return None if slice_obj.stop: if slice_obj.start and slice_obj.start > slice_obj.stop: return 0 return slice_obj.stop return slice_obj.start + 1
python
def _slice_required_len(slice_obj): """ Calculate how many items must be in the collection to satisfy this slice returns `None` for slices may vary based on the length of the underlying collection such as `lst[-1]` or `lst[::]` """ if slice_obj.step and slice_obj.step != 1: return None # (None, None, *) requires the entire list if slice_obj.start is None and slice_obj.stop is None: return None # Negative indexes are hard without knowing the collection length if slice_obj.start and slice_obj.start < 0: return None if slice_obj.stop and slice_obj.stop < 0: return None if slice_obj.stop: if slice_obj.start and slice_obj.start > slice_obj.stop: return 0 return slice_obj.stop return slice_obj.start + 1
[ "def", "_slice_required_len", "(", "slice_obj", ")", ":", "if", "slice_obj", ".", "step", "and", "slice_obj", ".", "step", "!=", "1", ":", "return", "None", "# (None, None, *) requires the entire list", "if", "slice_obj", ".", "start", "is", "None", "and", "slice_obj", ".", "stop", "is", "None", ":", "return", "None", "# Negative indexes are hard without knowing the collection length", "if", "slice_obj", ".", "start", "and", "slice_obj", ".", "start", "<", "0", ":", "return", "None", "if", "slice_obj", ".", "stop", "and", "slice_obj", ".", "stop", "<", "0", ":", "return", "None", "if", "slice_obj", ".", "stop", ":", "if", "slice_obj", ".", "start", "and", "slice_obj", ".", "start", ">", "slice_obj", ".", "stop", ":", "return", "0", "return", "slice_obj", ".", "stop", "return", "slice_obj", ".", "start", "+", "1" ]
Calculate how many items must be in the collection to satisfy this slice returns `None` for slices may vary based on the length of the underlying collection such as `lst[-1]` or `lst[::]`
[ "Calculate", "how", "many", "items", "must", "be", "in", "the", "collection", "to", "satisfy", "this", "slice" ]
train
https://github.com/uber-archive/h1-python/blob/c91aec6a26887e453106af39e96ec6d5c7b00c9d/h1/lazy_listing.py#L42-L65
dslackw/colored
colored/colored.py
stylize
def stylize(text, styles, reset=True): """conveniently styles your text as and resets ANSI codes at its end.""" terminator = attr("reset") if reset else "" return "{}{}{}".format("".join(styles), text, terminator)
python
def stylize(text, styles, reset=True): """conveniently styles your text as and resets ANSI codes at its end.""" terminator = attr("reset") if reset else "" return "{}{}{}".format("".join(styles), text, terminator)
[ "def", "stylize", "(", "text", ",", "styles", ",", "reset", "=", "True", ")", ":", "terminator", "=", "attr", "(", "\"reset\"", ")", "if", "reset", "else", "\"\"", "return", "\"{}{}{}\"", ".", "format", "(", "\"\"", ".", "join", "(", "styles", ")", ",", "text", ",", "terminator", ")" ]
conveniently styles your text as and resets ANSI codes at its end.
[ "conveniently", "styles", "your", "text", "as", "and", "resets", "ANSI", "codes", "at", "its", "end", "." ]
train
https://github.com/dslackw/colored/blob/064172b36bd5e456c60581c7fbf77060ca7829ba/colored/colored.py#L389-L392
dslackw/colored
colored/colored.py
stylize_interactive
def stylize_interactive(text, styles, reset=True): """stylize() variant that adds C0 control codes (SOH/STX) for readline safety.""" # problem: readline includes bare ANSI codes in width calculations. # solution: wrap nonprinting codes in SOH/STX when necessary. # see: https://github.com/dslackw/colored/issues/5 terminator = _c0wrap(attr("reset")) if reset else "" return "{}{}{}".format(_c0wrap(styles), text, terminator)
python
def stylize_interactive(text, styles, reset=True): """stylize() variant that adds C0 control codes (SOH/STX) for readline safety.""" # problem: readline includes bare ANSI codes in width calculations. # solution: wrap nonprinting codes in SOH/STX when necessary. # see: https://github.com/dslackw/colored/issues/5 terminator = _c0wrap(attr("reset")) if reset else "" return "{}{}{}".format(_c0wrap(styles), text, terminator)
[ "def", "stylize_interactive", "(", "text", ",", "styles", ",", "reset", "=", "True", ")", ":", "# problem: readline includes bare ANSI codes in width calculations.", "# solution: wrap nonprinting codes in SOH/STX when necessary.", "# see: https://github.com/dslackw/colored/issues/5", "terminator", "=", "_c0wrap", "(", "attr", "(", "\"reset\"", ")", ")", "if", "reset", "else", "\"\"", "return", "\"{}{}{}\"", ".", "format", "(", "_c0wrap", "(", "styles", ")", ",", "text", ",", "terminator", ")" ]
stylize() variant that adds C0 control codes (SOH/STX) for readline safety.
[ "stylize", "()", "variant", "that", "adds", "C0", "control", "codes", "(", "SOH", "/", "STX", ")", "for", "readline", "safety", "." ]
train
https://github.com/dslackw/colored/blob/064172b36bd5e456c60581c7fbf77060ca7829ba/colored/colored.py#L402-L409
dslackw/colored
colored/colored.py
colored.attribute
def attribute(self): """Set or reset attributes""" paint = { "bold": self.ESC + "1" + self.END, 1: self.ESC + "1" + self.END, "dim": self.ESC + "2" + self.END, 2: self.ESC + "2" + self.END, "underlined": self.ESC + "4" + self.END, 4: self.ESC + "4" + self.END, "blink": self.ESC + "5" + self.END, 5: self.ESC + "5" + self.END, "reverse": self.ESC + "7" + self.END, 7: self.ESC + "7" + self.END, "hidden": self.ESC + "8" + self.END, 8: self.ESC + "8" + self.END, "reset": self.ESC + "0" + self.END, 0: self.ESC + "0" + self.END, "res_bold": self.ESC + "21" + self.END, 21: self.ESC + "21" + self.END, "res_dim": self.ESC + "22" + self.END, 22: self.ESC + "22" + self.END, "res_underlined": self.ESC + "24" + self.END, 24: self.ESC + "24" + self.END, "res_blink": self.ESC + "25" + self.END, 25: self.ESC + "25" + self.END, "res_reverse": self.ESC + "27" + self.END, 27: self.ESC + "27" + self.END, "res_hidden": self.ESC + "28" + self.END, 28: self.ESC + "28" + self.END, } return paint[self.color]
python
def attribute(self): """Set or reset attributes""" paint = { "bold": self.ESC + "1" + self.END, 1: self.ESC + "1" + self.END, "dim": self.ESC + "2" + self.END, 2: self.ESC + "2" + self.END, "underlined": self.ESC + "4" + self.END, 4: self.ESC + "4" + self.END, "blink": self.ESC + "5" + self.END, 5: self.ESC + "5" + self.END, "reverse": self.ESC + "7" + self.END, 7: self.ESC + "7" + self.END, "hidden": self.ESC + "8" + self.END, 8: self.ESC + "8" + self.END, "reset": self.ESC + "0" + self.END, 0: self.ESC + "0" + self.END, "res_bold": self.ESC + "21" + self.END, 21: self.ESC + "21" + self.END, "res_dim": self.ESC + "22" + self.END, 22: self.ESC + "22" + self.END, "res_underlined": self.ESC + "24" + self.END, 24: self.ESC + "24" + self.END, "res_blink": self.ESC + "25" + self.END, 25: self.ESC + "25" + self.END, "res_reverse": self.ESC + "27" + self.END, 27: self.ESC + "27" + self.END, "res_hidden": self.ESC + "28" + self.END, 28: self.ESC + "28" + self.END, } return paint[self.color]
[ "def", "attribute", "(", "self", ")", ":", "paint", "=", "{", "\"bold\"", ":", "self", ".", "ESC", "+", "\"1\"", "+", "self", ".", "END", ",", "1", ":", "self", ".", "ESC", "+", "\"1\"", "+", "self", ".", "END", ",", "\"dim\"", ":", "self", ".", "ESC", "+", "\"2\"", "+", "self", ".", "END", ",", "2", ":", "self", ".", "ESC", "+", "\"2\"", "+", "self", ".", "END", ",", "\"underlined\"", ":", "self", ".", "ESC", "+", "\"4\"", "+", "self", ".", "END", ",", "4", ":", "self", ".", "ESC", "+", "\"4\"", "+", "self", ".", "END", ",", "\"blink\"", ":", "self", ".", "ESC", "+", "\"5\"", "+", "self", ".", "END", ",", "5", ":", "self", ".", "ESC", "+", "\"5\"", "+", "self", ".", "END", ",", "\"reverse\"", ":", "self", ".", "ESC", "+", "\"7\"", "+", "self", ".", "END", ",", "7", ":", "self", ".", "ESC", "+", "\"7\"", "+", "self", ".", "END", ",", "\"hidden\"", ":", "self", ".", "ESC", "+", "\"8\"", "+", "self", ".", "END", ",", "8", ":", "self", ".", "ESC", "+", "\"8\"", "+", "self", ".", "END", ",", "\"reset\"", ":", "self", ".", "ESC", "+", "\"0\"", "+", "self", ".", "END", ",", "0", ":", "self", ".", "ESC", "+", "\"0\"", "+", "self", ".", "END", ",", "\"res_bold\"", ":", "self", ".", "ESC", "+", "\"21\"", "+", "self", ".", "END", ",", "21", ":", "self", ".", "ESC", "+", "\"21\"", "+", "self", ".", "END", ",", "\"res_dim\"", ":", "self", ".", "ESC", "+", "\"22\"", "+", "self", ".", "END", ",", "22", ":", "self", ".", "ESC", "+", "\"22\"", "+", "self", ".", "END", ",", "\"res_underlined\"", ":", "self", ".", "ESC", "+", "\"24\"", "+", "self", ".", "END", ",", "24", ":", "self", ".", "ESC", "+", "\"24\"", "+", "self", ".", "END", ",", "\"res_blink\"", ":", "self", ".", "ESC", "+", "\"25\"", "+", "self", ".", "END", ",", "25", ":", "self", ".", "ESC", "+", "\"25\"", "+", "self", ".", "END", ",", "\"res_reverse\"", ":", "self", ".", "ESC", "+", "\"27\"", "+", "self", ".", "END", ",", "27", ":", "self", ".", "ESC", "+", "\"27\"", "+", "self", ".", "END", ",", "\"res_hidden\"", ":", "self", ".", "ESC", "+", "\"28\"", "+", "self", ".", "END", ",", "28", ":", "self", ".", "ESC", "+", "\"28\"", "+", "self", ".", "END", ",", "}", "return", "paint", "[", "self", ".", "color", "]" ]
Set or reset attributes
[ "Set", "or", "reset", "attributes" ]
train
https://github.com/dslackw/colored/blob/064172b36bd5e456c60581c7fbf77060ca7829ba/colored/colored.py#L312-L343
dslackw/colored
colored/colored.py
colored.foreground
def foreground(self): """Print 256 foreground colors""" code = self.ESC + "38;5;" if str(self.color).isdigit(): self.reverse_dict() color = self.reserve_paint[str(self.color)] return code + self.paint[color] + self.END elif self.color.startswith("#"): return code + str(self.HEX) + self.END else: return code + self.paint[self.color] + self.END
python
def foreground(self): """Print 256 foreground colors""" code = self.ESC + "38;5;" if str(self.color).isdigit(): self.reverse_dict() color = self.reserve_paint[str(self.color)] return code + self.paint[color] + self.END elif self.color.startswith("#"): return code + str(self.HEX) + self.END else: return code + self.paint[self.color] + self.END
[ "def", "foreground", "(", "self", ")", ":", "code", "=", "self", ".", "ESC", "+", "\"38;5;\"", "if", "str", "(", "self", ".", "color", ")", ".", "isdigit", "(", ")", ":", "self", ".", "reverse_dict", "(", ")", "color", "=", "self", ".", "reserve_paint", "[", "str", "(", "self", ".", "color", ")", "]", "return", "code", "+", "self", ".", "paint", "[", "color", "]", "+", "self", ".", "END", "elif", "self", ".", "color", ".", "startswith", "(", "\"#\"", ")", ":", "return", "code", "+", "str", "(", "self", ".", "HEX", ")", "+", "self", ".", "END", "else", ":", "return", "code", "+", "self", ".", "paint", "[", "self", ".", "color", "]", "+", "self", ".", "END" ]
Print 256 foreground colors
[ "Print", "256", "foreground", "colors" ]
train
https://github.com/dslackw/colored/blob/064172b36bd5e456c60581c7fbf77060ca7829ba/colored/colored.py#L345-L355
dslackw/colored
colored/colored.py
colored.reverse_dict
def reverse_dict(self): """reverse dictionary""" self.reserve_paint = dict(zip(self.paint.values(), self.paint.keys()))
python
def reverse_dict(self): """reverse dictionary""" self.reserve_paint = dict(zip(self.paint.values(), self.paint.keys()))
[ "def", "reverse_dict", "(", "self", ")", ":", "self", ".", "reserve_paint", "=", "dict", "(", "zip", "(", "self", ".", "paint", ".", "values", "(", ")", ",", "self", ".", "paint", ".", "keys", "(", ")", ")", ")" ]
reverse dictionary
[ "reverse", "dictionary" ]
train
https://github.com/dslackw/colored/blob/064172b36bd5e456c60581c7fbf77060ca7829ba/colored/colored.py#L369-L371
adafruit/Adafruit_CircuitPython_OneWire
adafruit_onewire/bus.py
OneWireBus.reset
def reset(self, required=False): """ Perform a reset and check for presence pulse. :param bool required: require presence pulse """ reset = self._ow.reset() if required and reset: raise OneWireError("No presence pulse found. Check devices and wiring.") return not reset
python
def reset(self, required=False): """ Perform a reset and check for presence pulse. :param bool required: require presence pulse """ reset = self._ow.reset() if required and reset: raise OneWireError("No presence pulse found. Check devices and wiring.") return not reset
[ "def", "reset", "(", "self", ",", "required", "=", "False", ")", ":", "reset", "=", "self", ".", "_ow", ".", "reset", "(", ")", "if", "required", "and", "reset", ":", "raise", "OneWireError", "(", "\"No presence pulse found. Check devices and wiring.\"", ")", "return", "not", "reset" ]
Perform a reset and check for presence pulse. :param bool required: require presence pulse
[ "Perform", "a", "reset", "and", "check", "for", "presence", "pulse", "." ]
train
https://github.com/adafruit/Adafruit_CircuitPython_OneWire/blob/113ca99b9087f7031f0b46a963472ad106520f9b/adafruit_onewire/bus.py#L97-L106
adafruit/Adafruit_CircuitPython_OneWire
adafruit_onewire/bus.py
OneWireBus.readinto
def readinto(self, buf, *, start=0, end=None): """ Read into ``buf`` from the device. The number of bytes read will be the length of ``buf``. If ``start`` or ``end`` is provided, then the buffer will be sliced as if ``buf[start:end]``. This will not cause an allocation like ``buf[start:end]`` will so it saves memory. :param bytearray buf: buffer to write into :param int start: Index to start writing at :param int end: Index to write up to but not include """ if end is None: end = len(buf) for i in range(start, end): buf[i] = self._readbyte()
python
def readinto(self, buf, *, start=0, end=None): """ Read into ``buf`` from the device. The number of bytes read will be the length of ``buf``. If ``start`` or ``end`` is provided, then the buffer will be sliced as if ``buf[start:end]``. This will not cause an allocation like ``buf[start:end]`` will so it saves memory. :param bytearray buf: buffer to write into :param int start: Index to start writing at :param int end: Index to write up to but not include """ if end is None: end = len(buf) for i in range(start, end): buf[i] = self._readbyte()
[ "def", "readinto", "(", "self", ",", "buf", ",", "*", ",", "start", "=", "0", ",", "end", "=", "None", ")", ":", "if", "end", "is", "None", ":", "end", "=", "len", "(", "buf", ")", "for", "i", "in", "range", "(", "start", ",", "end", ")", ":", "buf", "[", "i", "]", "=", "self", ".", "_readbyte", "(", ")" ]
Read into ``buf`` from the device. The number of bytes read will be the length of ``buf``. If ``start`` or ``end`` is provided, then the buffer will be sliced as if ``buf[start:end]``. This will not cause an allocation like ``buf[start:end]`` will so it saves memory. :param bytearray buf: buffer to write into :param int start: Index to start writing at :param int end: Index to write up to but not include
[ "Read", "into", "buf", "from", "the", "device", ".", "The", "number", "of", "bytes", "read", "will", "be", "the", "length", "of", "buf", "." ]
train
https://github.com/adafruit/Adafruit_CircuitPython_OneWire/blob/113ca99b9087f7031f0b46a963472ad106520f9b/adafruit_onewire/bus.py#L108-L124
adafruit/Adafruit_CircuitPython_OneWire
adafruit_onewire/bus.py
OneWireBus.write
def write(self, buf, *, start=0, end=None): """ Write the bytes from ``buf`` to the device. If ``start`` or ``end`` is provided, then the buffer will be sliced as if ``buffer[start:end]``. This will not cause an allocation like ``buffer[start:end]`` will so it saves memory. :param bytearray buf: buffer containing the bytes to write :param int start: Index to start writing from :param int end: Index to read up to but not include """ if end is None: end = len(buf) for i in range(start, end): self._writebyte(buf[i])
python
def write(self, buf, *, start=0, end=None): """ Write the bytes from ``buf`` to the device. If ``start`` or ``end`` is provided, then the buffer will be sliced as if ``buffer[start:end]``. This will not cause an allocation like ``buffer[start:end]`` will so it saves memory. :param bytearray buf: buffer containing the bytes to write :param int start: Index to start writing from :param int end: Index to read up to but not include """ if end is None: end = len(buf) for i in range(start, end): self._writebyte(buf[i])
[ "def", "write", "(", "self", ",", "buf", ",", "*", ",", "start", "=", "0", ",", "end", "=", "None", ")", ":", "if", "end", "is", "None", ":", "end", "=", "len", "(", "buf", ")", "for", "i", "in", "range", "(", "start", ",", "end", ")", ":", "self", ".", "_writebyte", "(", "buf", "[", "i", "]", ")" ]
Write the bytes from ``buf`` to the device. If ``start`` or ``end`` is provided, then the buffer will be sliced as if ``buffer[start:end]``. This will not cause an allocation like ``buffer[start:end]`` will so it saves memory. :param bytearray buf: buffer containing the bytes to write :param int start: Index to start writing from :param int end: Index to read up to but not include
[ "Write", "the", "bytes", "from", "buf", "to", "the", "device", "." ]
train
https://github.com/adafruit/Adafruit_CircuitPython_OneWire/blob/113ca99b9087f7031f0b46a963472ad106520f9b/adafruit_onewire/bus.py#L126-L141
adafruit/Adafruit_CircuitPython_OneWire
adafruit_onewire/bus.py
OneWireBus.scan
def scan(self): """Scan for devices on the bus and return a list of addresses.""" devices = [] diff = 65 rom = False count = 0 for _ in range(0xff): rom, diff = self._search_rom(rom, diff) if rom: count += 1 if count > self.maximum_devices: raise RuntimeError( "Maximum device count of {} exceeded."\ .format(self.maximum_devices)) devices.append(OneWireAddress(rom)) if diff == 0: break return devices
python
def scan(self): """Scan for devices on the bus and return a list of addresses.""" devices = [] diff = 65 rom = False count = 0 for _ in range(0xff): rom, diff = self._search_rom(rom, diff) if rom: count += 1 if count > self.maximum_devices: raise RuntimeError( "Maximum device count of {} exceeded."\ .format(self.maximum_devices)) devices.append(OneWireAddress(rom)) if diff == 0: break return devices
[ "def", "scan", "(", "self", ")", ":", "devices", "=", "[", "]", "diff", "=", "65", "rom", "=", "False", "count", "=", "0", "for", "_", "in", "range", "(", "0xff", ")", ":", "rom", ",", "diff", "=", "self", ".", "_search_rom", "(", "rom", ",", "diff", ")", "if", "rom", ":", "count", "+=", "1", "if", "count", ">", "self", ".", "maximum_devices", ":", "raise", "RuntimeError", "(", "\"Maximum device count of {} exceeded.\"", ".", "format", "(", "self", ".", "maximum_devices", ")", ")", "devices", ".", "append", "(", "OneWireAddress", "(", "rom", ")", ")", "if", "diff", "==", "0", ":", "break", "return", "devices" ]
Scan for devices on the bus and return a list of addresses.
[ "Scan", "for", "devices", "on", "the", "bus", "and", "return", "a", "list", "of", "addresses", "." ]
train
https://github.com/adafruit/Adafruit_CircuitPython_OneWire/blob/113ca99b9087f7031f0b46a963472ad106520f9b/adafruit_onewire/bus.py#L143-L160
adafruit/Adafruit_CircuitPython_OneWire
adafruit_onewire/bus.py
OneWireBus.crc8
def crc8(data): """ Perform the 1-Wire CRC check on the provided data. :param bytearray data: 8 byte array representing 64 bit ROM code """ crc = 0 for byte in data: crc ^= byte for _ in range(8): if crc & 0x01: crc = (crc >> 1) ^ 0x8C else: crc >>= 1 crc &= 0xFF return crc
python
def crc8(data): """ Perform the 1-Wire CRC check on the provided data. :param bytearray data: 8 byte array representing 64 bit ROM code """ crc = 0 for byte in data: crc ^= byte for _ in range(8): if crc & 0x01: crc = (crc >> 1) ^ 0x8C else: crc >>= 1 crc &= 0xFF return crc
[ "def", "crc8", "(", "data", ")", ":", "crc", "=", "0", "for", "byte", "in", "data", ":", "crc", "^=", "byte", "for", "_", "in", "range", "(", "8", ")", ":", "if", "crc", "&", "0x01", ":", "crc", "=", "(", "crc", ">>", "1", ")", "^", "0x8C", "else", ":", "crc", ">>=", "1", "crc", "&=", "0xFF", "return", "crc" ]
Perform the 1-Wire CRC check on the provided data. :param bytearray data: 8 byte array representing 64 bit ROM code
[ "Perform", "the", "1", "-", "Wire", "CRC", "check", "on", "the", "provided", "data", "." ]
train
https://github.com/adafruit/Adafruit_CircuitPython_OneWire/blob/113ca99b9087f7031f0b46a963472ad106520f9b/adafruit_onewire/bus.py#L202-L218
icoxfog417/pykintone
pykintone/structure.py
kintoneStructure._deserialize
def _deserialize(cls, json_body, get_value_and_type): """ deserialize json to model :param json_body: json data :param get_value_and_type: function(f: json_field) -> value, field_type_string(see FieldType) :return: """ instance = cls() is_set = False properties = cls._get_property_names(instance) def get_property_detail(name): p = [p for p in instance._property_details if p.name == name or p.field_name == name] return None if len(p) == 0 else p[0] for k in json_body: field = json_body[k] pd = get_property_detail(k) pn = k if not pd else pd.to_property_name(k) if pn in properties: v, t = get_value_and_type(field) initial_value = getattr(instance, pn) value = instance._field_to_property(v, t, pd, initial_value) setattr(instance, pn, value) is_set = True return instance if is_set else None
python
def _deserialize(cls, json_body, get_value_and_type): """ deserialize json to model :param json_body: json data :param get_value_and_type: function(f: json_field) -> value, field_type_string(see FieldType) :return: """ instance = cls() is_set = False properties = cls._get_property_names(instance) def get_property_detail(name): p = [p for p in instance._property_details if p.name == name or p.field_name == name] return None if len(p) == 0 else p[0] for k in json_body: field = json_body[k] pd = get_property_detail(k) pn = k if not pd else pd.to_property_name(k) if pn in properties: v, t = get_value_and_type(field) initial_value = getattr(instance, pn) value = instance._field_to_property(v, t, pd, initial_value) setattr(instance, pn, value) is_set = True return instance if is_set else None
[ "def", "_deserialize", "(", "cls", ",", "json_body", ",", "get_value_and_type", ")", ":", "instance", "=", "cls", "(", ")", "is_set", "=", "False", "properties", "=", "cls", ".", "_get_property_names", "(", "instance", ")", "def", "get_property_detail", "(", "name", ")", ":", "p", "=", "[", "p", "for", "p", "in", "instance", ".", "_property_details", "if", "p", ".", "name", "==", "name", "or", "p", ".", "field_name", "==", "name", "]", "return", "None", "if", "len", "(", "p", ")", "==", "0", "else", "p", "[", "0", "]", "for", "k", "in", "json_body", ":", "field", "=", "json_body", "[", "k", "]", "pd", "=", "get_property_detail", "(", "k", ")", "pn", "=", "k", "if", "not", "pd", "else", "pd", ".", "to_property_name", "(", "k", ")", "if", "pn", "in", "properties", ":", "v", ",", "t", "=", "get_value_and_type", "(", "field", ")", "initial_value", "=", "getattr", "(", "instance", ",", "pn", ")", "value", "=", "instance", ".", "_field_to_property", "(", "v", ",", "t", ",", "pd", ",", "initial_value", ")", "setattr", "(", "instance", ",", "pn", ",", "value", ")", "is_set", "=", "True", "return", "instance", "if", "is_set", "else", "None" ]
deserialize json to model :param json_body: json data :param get_value_and_type: function(f: json_field) -> value, field_type_string(see FieldType) :return:
[ "deserialize", "json", "to", "model", ":", "param", "json_body", ":", "json", "data", ":", "param", "get_value_and_type", ":", "function", "(", "f", ":", "json_field", ")", "-", ">", "value", "field_type_string", "(", "see", "FieldType", ")", ":", "return", ":" ]
train
https://github.com/icoxfog417/pykintone/blob/756609fc956fc784325d58cc01473a67a640654c/pykintone/structure.py#L35-L62
icoxfog417/pykintone
pykintone/structure.py
kintoneStructure._serialize
def _serialize(self, convert_to_key_and_value, ignore_missing=False): """ serialize model object to dictionary :param convert_to_key_and_value: function(field_name, value, property_detail) -> key, value :return: """ serialized = {} properties = self._get_property_names(self) def get_property_detail(name): p = [p for p in self._property_details if p.name == name] return None if len(p) == 0 else p[0] for p in properties: pd = get_property_detail(p) value = self._property_to_field(p, pd) field_name = p if not pd else pd.to_field_name() if value is None or (ignore_missing and not value) or (pd and pd.unsent): continue else: key, value = convert_to_key_and_value(field_name, value, pd) if key: serialized[key] = value return serialized
python
def _serialize(self, convert_to_key_and_value, ignore_missing=False): """ serialize model object to dictionary :param convert_to_key_and_value: function(field_name, value, property_detail) -> key, value :return: """ serialized = {} properties = self._get_property_names(self) def get_property_detail(name): p = [p for p in self._property_details if p.name == name] return None if len(p) == 0 else p[0] for p in properties: pd = get_property_detail(p) value = self._property_to_field(p, pd) field_name = p if not pd else pd.to_field_name() if value is None or (ignore_missing and not value) or (pd and pd.unsent): continue else: key, value = convert_to_key_and_value(field_name, value, pd) if key: serialized[key] = value return serialized
[ "def", "_serialize", "(", "self", ",", "convert_to_key_and_value", ",", "ignore_missing", "=", "False", ")", ":", "serialized", "=", "{", "}", "properties", "=", "self", ".", "_get_property_names", "(", "self", ")", "def", "get_property_detail", "(", "name", ")", ":", "p", "=", "[", "p", "for", "p", "in", "self", ".", "_property_details", "if", "p", ".", "name", "==", "name", "]", "return", "None", "if", "len", "(", "p", ")", "==", "0", "else", "p", "[", "0", "]", "for", "p", "in", "properties", ":", "pd", "=", "get_property_detail", "(", "p", ")", "value", "=", "self", ".", "_property_to_field", "(", "p", ",", "pd", ")", "field_name", "=", "p", "if", "not", "pd", "else", "pd", ".", "to_field_name", "(", ")", "if", "value", "is", "None", "or", "(", "ignore_missing", "and", "not", "value", ")", "or", "(", "pd", "and", "pd", ".", "unsent", ")", ":", "continue", "else", ":", "key", ",", "value", "=", "convert_to_key_and_value", "(", "field_name", ",", "value", ",", "pd", ")", "if", "key", ":", "serialized", "[", "key", "]", "=", "value", "return", "serialized" ]
serialize model object to dictionary :param convert_to_key_and_value: function(field_name, value, property_detail) -> key, value :return:
[ "serialize", "model", "object", "to", "dictionary", ":", "param", "convert_to_key_and_value", ":", "function", "(", "field_name", "value", "property_detail", ")", "-", ">", "key", "value", ":", "return", ":" ]
train
https://github.com/icoxfog417/pykintone/blob/756609fc956fc784325d58cc01473a67a640654c/pykintone/structure.py#L126-L152
adafruit/Adafruit_CircuitPython_OneWire
adafruit_onewire/device.py
OneWireDevice.readinto
def readinto(self, buf, *, start=0, end=None): """ Read into ``buf`` from the device. The number of bytes read will be the length of ``buf``. If ``start`` or ``end`` is provided, then the buffer will be sliced as if ``buf[start:end]``. This will not cause an allocation like ``buf[start:end]`` will so it saves memory. :param bytearray buf: buffer to write into :param int start: Index to start writing at :param int end: Index to write up to but not include """ self._bus.readinto(buf, start=start, end=end) if start == 0 and end is None and len(buf) >= 8: if self._bus.crc8(buf): raise RuntimeError('CRC error.')
python
def readinto(self, buf, *, start=0, end=None): """ Read into ``buf`` from the device. The number of bytes read will be the length of ``buf``. If ``start`` or ``end`` is provided, then the buffer will be sliced as if ``buf[start:end]``. This will not cause an allocation like ``buf[start:end]`` will so it saves memory. :param bytearray buf: buffer to write into :param int start: Index to start writing at :param int end: Index to write up to but not include """ self._bus.readinto(buf, start=start, end=end) if start == 0 and end is None and len(buf) >= 8: if self._bus.crc8(buf): raise RuntimeError('CRC error.')
[ "def", "readinto", "(", "self", ",", "buf", ",", "*", ",", "start", "=", "0", ",", "end", "=", "None", ")", ":", "self", ".", "_bus", ".", "readinto", "(", "buf", ",", "start", "=", "start", ",", "end", "=", "end", ")", "if", "start", "==", "0", "and", "end", "is", "None", "and", "len", "(", "buf", ")", ">=", "8", ":", "if", "self", ".", "_bus", ".", "crc8", "(", "buf", ")", ":", "raise", "RuntimeError", "(", "'CRC error.'", ")" ]
Read into ``buf`` from the device. The number of bytes read will be the length of ``buf``. If ``start`` or ``end`` is provided, then the buffer will be sliced as if ``buf[start:end]``. This will not cause an allocation like ``buf[start:end]`` will so it saves memory. :param bytearray buf: buffer to write into :param int start: Index to start writing at :param int end: Index to write up to but not include
[ "Read", "into", "buf", "from", "the", "device", ".", "The", "number", "of", "bytes", "read", "will", "be", "the", "length", "of", "buf", "." ]
train
https://github.com/adafruit/Adafruit_CircuitPython_OneWire/blob/113ca99b9087f7031f0b46a963472ad106520f9b/adafruit_onewire/device.py#L50-L66
adafruit/Adafruit_CircuitPython_OneWire
adafruit_onewire/device.py
OneWireDevice.write
def write(self, buf, *, start=0, end=None): """ Write the bytes from ``buf`` to the device. If ``start`` or ``end`` is provided, then the buffer will be sliced as if ``buffer[start:end]``. This will not cause an allocation like ``buffer[start:end]`` will so it saves memory. :param bytearray buf: buffer containing the bytes to write :param int start: Index to start writing from :param int end: Index to read up to but not include """ return self._bus.write(buf, start=start, end=end)
python
def write(self, buf, *, start=0, end=None): """ Write the bytes from ``buf`` to the device. If ``start`` or ``end`` is provided, then the buffer will be sliced as if ``buffer[start:end]``. This will not cause an allocation like ``buffer[start:end]`` will so it saves memory. :param bytearray buf: buffer containing the bytes to write :param int start: Index to start writing from :param int end: Index to read up to but not include """ return self._bus.write(buf, start=start, end=end)
[ "def", "write", "(", "self", ",", "buf", ",", "*", ",", "start", "=", "0", ",", "end", "=", "None", ")", ":", "return", "self", ".", "_bus", ".", "write", "(", "buf", ",", "start", "=", "start", ",", "end", "=", "end", ")" ]
Write the bytes from ``buf`` to the device. If ``start`` or ``end`` is provided, then the buffer will be sliced as if ``buffer[start:end]``. This will not cause an allocation like ``buffer[start:end]`` will so it saves memory. :param bytearray buf: buffer containing the bytes to write :param int start: Index to start writing from :param int end: Index to read up to but not include
[ "Write", "the", "bytes", "from", "buf", "to", "the", "device", "." ]
train
https://github.com/adafruit/Adafruit_CircuitPython_OneWire/blob/113ca99b9087f7031f0b46a963472ad106520f9b/adafruit_onewire/device.py#L68-L80
praekelt/django-preferences
preferences/models.py
preferences_class_prepared
def preferences_class_prepared(sender, *args, **kwargs): """ Adds various preferences members to preferences.preferences, thus enabling easy access from code. """ cls = sender if issubclass(cls, Preferences): # Add singleton manager to subclasses. cls.add_to_class('singleton', SingletonManager()) # Add property for preferences object to preferences.preferences. setattr(preferences.Preferences, cls._meta.object_name, property(lambda x: cls.singleton.get()))
python
def preferences_class_prepared(sender, *args, **kwargs): """ Adds various preferences members to preferences.preferences, thus enabling easy access from code. """ cls = sender if issubclass(cls, Preferences): # Add singleton manager to subclasses. cls.add_to_class('singleton', SingletonManager()) # Add property for preferences object to preferences.preferences. setattr(preferences.Preferences, cls._meta.object_name, property(lambda x: cls.singleton.get()))
[ "def", "preferences_class_prepared", "(", "sender", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "cls", "=", "sender", "if", "issubclass", "(", "cls", ",", "Preferences", ")", ":", "# Add singleton manager to subclasses.", "cls", ".", "add_to_class", "(", "'singleton'", ",", "SingletonManager", "(", ")", ")", "# Add property for preferences object to preferences.preferences.", "setattr", "(", "preferences", ".", "Preferences", ",", "cls", ".", "_meta", ".", "object_name", ",", "property", "(", "lambda", "x", ":", "cls", ".", "singleton", ".", "get", "(", ")", ")", ")" ]
Adds various preferences members to preferences.preferences, thus enabling easy access from code.
[ "Adds", "various", "preferences", "members", "to", "preferences", ".", "preferences", "thus", "enabling", "easy", "access", "from", "code", "." ]
train
https://github.com/praekelt/django-preferences/blob/724f23da45449e96feb5179cb34e3d380cf151a1/preferences/models.py#L30-L40
praekelt/django-preferences
preferences/models.py
site_cleanup
def site_cleanup(sender, action, instance, **kwargs): """ Make sure there is only a single preferences object per site. So remove sites from pre-existing preferences objects. """ if action == 'post_add': if isinstance(instance, Preferences) \ and hasattr(instance.__class__, 'objects'): site_conflicts = instance.__class__.objects.filter( sites__in=instance.sites.all() ).only('id').distinct() for conflict in site_conflicts: if conflict.id != instance.id: for site in instance.sites.all(): conflict.sites.remove(site)
python
def site_cleanup(sender, action, instance, **kwargs): """ Make sure there is only a single preferences object per site. So remove sites from pre-existing preferences objects. """ if action == 'post_add': if isinstance(instance, Preferences) \ and hasattr(instance.__class__, 'objects'): site_conflicts = instance.__class__.objects.filter( sites__in=instance.sites.all() ).only('id').distinct() for conflict in site_conflicts: if conflict.id != instance.id: for site in instance.sites.all(): conflict.sites.remove(site)
[ "def", "site_cleanup", "(", "sender", ",", "action", ",", "instance", ",", "*", "*", "kwargs", ")", ":", "if", "action", "==", "'post_add'", ":", "if", "isinstance", "(", "instance", ",", "Preferences", ")", "and", "hasattr", "(", "instance", ".", "__class__", ",", "'objects'", ")", ":", "site_conflicts", "=", "instance", ".", "__class__", ".", "objects", ".", "filter", "(", "sites__in", "=", "instance", ".", "sites", ".", "all", "(", ")", ")", ".", "only", "(", "'id'", ")", ".", "distinct", "(", ")", "for", "conflict", "in", "site_conflicts", ":", "if", "conflict", ".", "id", "!=", "instance", ".", "id", ":", "for", "site", "in", "instance", ".", "sites", ".", "all", "(", ")", ":", "conflict", ".", "sites", ".", "remove", "(", "site", ")" ]
Make sure there is only a single preferences object per site. So remove sites from pre-existing preferences objects.
[ "Make", "sure", "there", "is", "only", "a", "single", "preferences", "object", "per", "site", ".", "So", "remove", "sites", "from", "pre", "-", "existing", "preferences", "objects", "." ]
train
https://github.com/praekelt/django-preferences/blob/724f23da45449e96feb5179cb34e3d380cf151a1/preferences/models.py#L44-L59
praekelt/django-preferences
preferences/managers.py
SingletonManager.get_queryset
def get_queryset(self): """ Return the first preferences object for the current site. If preferences do not exist create it. """ queryset = super(SingletonManager, self).get_queryset() # Get current site current_site = None if getattr(settings, 'SITE_ID', None) is not None: current_site = Site.objects.get_current() # If site found limit queryset to site. if current_site is not None: queryset = queryset.filter(sites=settings.SITE_ID) if not queryset.exists(): # Create object (for current site) if it doesn't exist. obj = self.model.objects.create() if current_site is not None: obj.sites.add(current_site) return queryset
python
def get_queryset(self): """ Return the first preferences object for the current site. If preferences do not exist create it. """ queryset = super(SingletonManager, self).get_queryset() # Get current site current_site = None if getattr(settings, 'SITE_ID', None) is not None: current_site = Site.objects.get_current() # If site found limit queryset to site. if current_site is not None: queryset = queryset.filter(sites=settings.SITE_ID) if not queryset.exists(): # Create object (for current site) if it doesn't exist. obj = self.model.objects.create() if current_site is not None: obj.sites.add(current_site) return queryset
[ "def", "get_queryset", "(", "self", ")", ":", "queryset", "=", "super", "(", "SingletonManager", ",", "self", ")", ".", "get_queryset", "(", ")", "# Get current site", "current_site", "=", "None", "if", "getattr", "(", "settings", ",", "'SITE_ID'", ",", "None", ")", "is", "not", "None", ":", "current_site", "=", "Site", ".", "objects", ".", "get_current", "(", ")", "# If site found limit queryset to site.", "if", "current_site", "is", "not", "None", ":", "queryset", "=", "queryset", ".", "filter", "(", "sites", "=", "settings", ".", "SITE_ID", ")", "if", "not", "queryset", ".", "exists", "(", ")", ":", "# Create object (for current site) if it doesn't exist.", "obj", "=", "self", ".", "model", ".", "objects", ".", "create", "(", ")", "if", "current_site", "is", "not", "None", ":", "obj", ".", "sites", ".", "add", "(", "current_site", ")", "return", "queryset" ]
Return the first preferences object for the current site. If preferences do not exist create it.
[ "Return", "the", "first", "preferences", "object", "for", "the", "current", "site", ".", "If", "preferences", "do", "not", "exist", "create", "it", "." ]
train
https://github.com/praekelt/django-preferences/blob/724f23da45449e96feb5179cb34e3d380cf151a1/preferences/managers.py#L10-L33
lsbardel/python-stdnet
stdnet/utils/encoders.py
Encoder.load_iterable
def load_iterable(self, iterable, session=None): '''Load an ``iterable``. By default it returns a generator of data loaded via the :meth:`loads` method. :param iterable: an iterable over data to load. :param session: Optional :class:`stdnet.odm.Session`. :return: an iterable over decoded data. ''' data = [] load = self.loads for v in iterable: data.append(load(v)) return data
python
def load_iterable(self, iterable, session=None): '''Load an ``iterable``. By default it returns a generator of data loaded via the :meth:`loads` method. :param iterable: an iterable over data to load. :param session: Optional :class:`stdnet.odm.Session`. :return: an iterable over decoded data. ''' data = [] load = self.loads for v in iterable: data.append(load(v)) return data
[ "def", "load_iterable", "(", "self", ",", "iterable", ",", "session", "=", "None", ")", ":", "data", "=", "[", "]", "load", "=", "self", ".", "loads", "for", "v", "in", "iterable", ":", "data", ".", "append", "(", "load", "(", "v", ")", ")", "return", "data" ]
Load an ``iterable``. By default it returns a generator of data loaded via the :meth:`loads` method. :param iterable: an iterable over data to load. :param session: Optional :class:`stdnet.odm.Session`. :return: an iterable over decoded data.
[ "Load", "an", "iterable", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/utils/encoders.py#L67-L81
lsbardel/python-stdnet
stdnet/apps/searchengine/__init__.py
SearchEngine.search_model
def search_model(self, q, text, lookup=None): '''Implements :meth:`stdnet.odm.SearchEngine.search_model`. It return a new :class:`stdnet.odm.QueryElem` instance from the input :class:`Query` and the *text* to search.''' words = self.words_from_text(text, for_search=True) if not words: return q qs = self._search(words, include=(q.model,), lookup=lookup) qs = tuple((q.get_field('object_id') for q in qs)) return odm.intersect((q,)+qs)
python
def search_model(self, q, text, lookup=None): '''Implements :meth:`stdnet.odm.SearchEngine.search_model`. It return a new :class:`stdnet.odm.QueryElem` instance from the input :class:`Query` and the *text* to search.''' words = self.words_from_text(text, for_search=True) if not words: return q qs = self._search(words, include=(q.model,), lookup=lookup) qs = tuple((q.get_field('object_id') for q in qs)) return odm.intersect((q,)+qs)
[ "def", "search_model", "(", "self", ",", "q", ",", "text", ",", "lookup", "=", "None", ")", ":", "words", "=", "self", ".", "words_from_text", "(", "text", ",", "for_search", "=", "True", ")", "if", "not", "words", ":", "return", "q", "qs", "=", "self", ".", "_search", "(", "words", ",", "include", "=", "(", "q", ".", "model", ",", ")", ",", "lookup", "=", "lookup", ")", "qs", "=", "tuple", "(", "(", "q", ".", "get_field", "(", "'object_id'", ")", "for", "q", "in", "qs", ")", ")", "return", "odm", ".", "intersect", "(", "(", "q", ",", ")", "+", "qs", ")" ]
Implements :meth:`stdnet.odm.SearchEngine.search_model`. It return a new :class:`stdnet.odm.QueryElem` instance from the input :class:`Query` and the *text* to search.
[ "Implements", ":", "meth", ":", "stdnet", ".", "odm", ".", "SearchEngine", ".", "search_model", ".", "It", "return", "a", "new", ":", "class", ":", "stdnet", ".", "odm", ".", "QueryElem", "instance", "from", "the", "input", ":", "class", ":", "Query", "and", "the", "*", "text", "*", "to", "search", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/apps/searchengine/__init__.py#L164-L173
lsbardel/python-stdnet
stdnet/apps/searchengine/__init__.py
SearchEngine._search
def _search(self, words, include=None, exclude=None, lookup=None): '''Full text search. Return a list of queries to intersect.''' lookup = lookup or 'contains' query = self.router.worditem.query() if include: query = query.filter(model_type__in=include) if exclude: query = query.exclude(model_type__in=include) if not words: return [query] qs = [] if lookup == 'in': # we are looking for items with at least one word in it qs.append(query.filter(word__in=words)) elif lookup == 'contains': #we want to match every single words for word in words: qs.append(query.filter(word=word)) else: raise ValueError('Unknown lookup "{0}"'.format(lookup)) return qs
python
def _search(self, words, include=None, exclude=None, lookup=None): '''Full text search. Return a list of queries to intersect.''' lookup = lookup or 'contains' query = self.router.worditem.query() if include: query = query.filter(model_type__in=include) if exclude: query = query.exclude(model_type__in=include) if not words: return [query] qs = [] if lookup == 'in': # we are looking for items with at least one word in it qs.append(query.filter(word__in=words)) elif lookup == 'contains': #we want to match every single words for word in words: qs.append(query.filter(word=word)) else: raise ValueError('Unknown lookup "{0}"'.format(lookup)) return qs
[ "def", "_search", "(", "self", ",", "words", ",", "include", "=", "None", ",", "exclude", "=", "None", ",", "lookup", "=", "None", ")", ":", "lookup", "=", "lookup", "or", "'contains'", "query", "=", "self", ".", "router", ".", "worditem", ".", "query", "(", ")", "if", "include", ":", "query", "=", "query", ".", "filter", "(", "model_type__in", "=", "include", ")", "if", "exclude", ":", "query", "=", "query", ".", "exclude", "(", "model_type__in", "=", "include", ")", "if", "not", "words", ":", "return", "[", "query", "]", "qs", "=", "[", "]", "if", "lookup", "==", "'in'", ":", "# we are looking for items with at least one word in it\r", "qs", ".", "append", "(", "query", ".", "filter", "(", "word__in", "=", "words", ")", ")", "elif", "lookup", "==", "'contains'", ":", "#we want to match every single words\r", "for", "word", "in", "words", ":", "qs", ".", "append", "(", "query", ".", "filter", "(", "word", "=", "word", ")", ")", "else", ":", "raise", "ValueError", "(", "'Unknown lookup \"{0}\"'", ".", "format", "(", "lookup", ")", ")", "return", "qs" ]
Full text search. Return a list of queries to intersect.
[ "Full", "text", "search", ".", "Return", "a", "list", "of", "queries", "to", "intersect", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/apps/searchengine/__init__.py#L207-L227
lsbardel/python-stdnet
stdnet/backends/redisb/client/__init__.py
redis_client
def redis_client(address=None, connection_pool=None, timeout=None, parser=None, **kwargs): '''Get a new redis client. :param address: a ``host``, ``port`` tuple. :param connection_pool: optional connection pool. :param timeout: socket timeout. :param timeout: socket timeout. ''' if not connection_pool: if timeout == 0: if not async: raise ImportError('Asynchronous connection requires async ' 'bindings installed.') return async.pool.redis(address, **kwargs) else: kwargs['socket_timeout'] = timeout return Redis(address[0], address[1], **kwargs) else: return Redis(connection_pool=connection_pool)
python
def redis_client(address=None, connection_pool=None, timeout=None, parser=None, **kwargs): '''Get a new redis client. :param address: a ``host``, ``port`` tuple. :param connection_pool: optional connection pool. :param timeout: socket timeout. :param timeout: socket timeout. ''' if not connection_pool: if timeout == 0: if not async: raise ImportError('Asynchronous connection requires async ' 'bindings installed.') return async.pool.redis(address, **kwargs) else: kwargs['socket_timeout'] = timeout return Redis(address[0], address[1], **kwargs) else: return Redis(connection_pool=connection_pool)
[ "def", "redis_client", "(", "address", "=", "None", ",", "connection_pool", "=", "None", ",", "timeout", "=", "None", ",", "parser", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "connection_pool", ":", "if", "timeout", "==", "0", ":", "if", "not", "async", ":", "raise", "ImportError", "(", "'Asynchronous connection requires async '", "'bindings installed.'", ")", "return", "async", ".", "pool", ".", "redis", "(", "address", ",", "*", "*", "kwargs", ")", "else", ":", "kwargs", "[", "'socket_timeout'", "]", "=", "timeout", "return", "Redis", "(", "address", "[", "0", "]", ",", "address", "[", "1", "]", ",", "*", "*", "kwargs", ")", "else", ":", "return", "Redis", "(", "connection_pool", "=", "connection_pool", ")" ]
Get a new redis client. :param address: a ``host``, ``port`` tuple. :param connection_pool: optional connection pool. :param timeout: socket timeout. :param timeout: socket timeout.
[ "Get", "a", "new", "redis", "client", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/redisb/client/__init__.py#L16-L35
lsbardel/python-stdnet
stdnet/utils/py2py3.py
to_bytes
def to_bytes(s, encoding=None, errors='strict'): """Returns a bytestring version of 's', encoded as specified in 'encoding'.""" encoding = encoding or 'utf-8' if isinstance(s, bytes): if encoding != 'utf-8': return s.decode('utf-8', errors).encode(encoding, errors) else: return s if not is_string(s): s = string_type(s) return s.encode(encoding, errors)
python
def to_bytes(s, encoding=None, errors='strict'): """Returns a bytestring version of 's', encoded as specified in 'encoding'.""" encoding = encoding or 'utf-8' if isinstance(s, bytes): if encoding != 'utf-8': return s.decode('utf-8', errors).encode(encoding, errors) else: return s if not is_string(s): s = string_type(s) return s.encode(encoding, errors)
[ "def", "to_bytes", "(", "s", ",", "encoding", "=", "None", ",", "errors", "=", "'strict'", ")", ":", "encoding", "=", "encoding", "or", "'utf-8'", "if", "isinstance", "(", "s", ",", "bytes", ")", ":", "if", "encoding", "!=", "'utf-8'", ":", "return", "s", ".", "decode", "(", "'utf-8'", ",", "errors", ")", ".", "encode", "(", "encoding", ",", "errors", ")", "else", ":", "return", "s", "if", "not", "is_string", "(", "s", ")", ":", "s", "=", "string_type", "(", "s", ")", "return", "s", ".", "encode", "(", "encoding", ",", "errors", ")" ]
Returns a bytestring version of 's', encoded as specified in 'encoding'.
[ "Returns", "a", "bytestring", "version", "of", "s", "encoded", "as", "specified", "in", "encoding", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/utils/py2py3.py#L80-L91
lsbardel/python-stdnet
stdnet/utils/py2py3.py
to_string
def to_string(s, encoding=None, errors='strict'): """Inverse of to_bytes""" encoding = encoding or 'utf-8' if isinstance(s, bytes): return s.decode(encoding, errors) if not is_string(s): s = string_type(s) return s
python
def to_string(s, encoding=None, errors='strict'): """Inverse of to_bytes""" encoding = encoding or 'utf-8' if isinstance(s, bytes): return s.decode(encoding, errors) if not is_string(s): s = string_type(s) return s
[ "def", "to_string", "(", "s", ",", "encoding", "=", "None", ",", "errors", "=", "'strict'", ")", ":", "encoding", "=", "encoding", "or", "'utf-8'", "if", "isinstance", "(", "s", ",", "bytes", ")", ":", "return", "s", ".", "decode", "(", "encoding", ",", "errors", ")", "if", "not", "is_string", "(", "s", ")", ":", "s", "=", "string_type", "(", "s", ")", "return", "s" ]
Inverse of to_bytes
[ "Inverse", "of", "to_bytes" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/utils/py2py3.py#L94-L101
lsbardel/python-stdnet
stdnet/utils/jsontools.py
date_decimal_hook
def date_decimal_hook(dct): '''The default JSON decoder hook. It is the inverse of :class:`stdnet.utils.jsontools.JSONDateDecimalEncoder`.''' if '__datetime__' in dct: return todatetime(dct['__datetime__']) elif '__date__' in dct: return todatetime(dct['__date__']).date() elif '__decimal__' in dct: return Decimal(dct['__decimal__']) else: return dct
python
def date_decimal_hook(dct): '''The default JSON decoder hook. It is the inverse of :class:`stdnet.utils.jsontools.JSONDateDecimalEncoder`.''' if '__datetime__' in dct: return todatetime(dct['__datetime__']) elif '__date__' in dct: return todatetime(dct['__date__']).date() elif '__decimal__' in dct: return Decimal(dct['__decimal__']) else: return dct
[ "def", "date_decimal_hook", "(", "dct", ")", ":", "if", "'__datetime__'", "in", "dct", ":", "return", "todatetime", "(", "dct", "[", "'__datetime__'", "]", ")", "elif", "'__date__'", "in", "dct", ":", "return", "todatetime", "(", "dct", "[", "'__date__'", "]", ")", ".", "date", "(", ")", "elif", "'__decimal__'", "in", "dct", ":", "return", "Decimal", "(", "dct", "[", "'__decimal__'", "]", ")", "else", ":", "return", "dct" ]
The default JSON decoder hook. It is the inverse of :class:`stdnet.utils.jsontools.JSONDateDecimalEncoder`.
[ "The", "default", "JSON", "decoder", "hook", ".", "It", "is", "the", "inverse", "of", ":", "class", ":", "stdnet", ".", "utils", ".", "jsontools", ".", "JSONDateDecimalEncoder", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/utils/jsontools.py#L81-L91
lsbardel/python-stdnet
stdnet/utils/jsontools.py
flat_to_nested
def flat_to_nested(data, instance=None, attname=None, separator=None, loads=None): '''Convert a flat representation of a dictionary to a nested representation. Fields in the flat representation are separated by the *splitter* parameters. :parameter data: a flat dictionary of key value pairs. :parameter instance: optional instance of a model. :parameter attribute: optional attribute of a model. :parameter separator: optional separator. Default ``"__"``. :parameter loads: optional data unserializer. :rtype: a nested dictionary''' separator = separator or JSPLITTER val = {} flat_vals = {} for key, value in iteritems(data): if value is None: continue keys = key.split(separator) # first key equal to the attribute name if attname: if keys.pop(0) != attname: continue if loads: value = loads(value) # if an instance is available, inject the flat attribute if not keys: if value is None: val = flat_vals = {} break else: continue else: flat_vals[key] = value d = val lk = keys[-1] for k in keys[:-1]: if k not in d: nd = {} d[k] = nd else: nd = d[k] if not isinstance(nd, dict): nd = {'': nd} d[k] = nd d = nd if lk not in d: d[lk] = value else: d[lk][''] = value if instance and flat_vals: for attr, value in iteritems(flat_vals): setattr(instance, attr, value) return val
python
def flat_to_nested(data, instance=None, attname=None, separator=None, loads=None): '''Convert a flat representation of a dictionary to a nested representation. Fields in the flat representation are separated by the *splitter* parameters. :parameter data: a flat dictionary of key value pairs. :parameter instance: optional instance of a model. :parameter attribute: optional attribute of a model. :parameter separator: optional separator. Default ``"__"``. :parameter loads: optional data unserializer. :rtype: a nested dictionary''' separator = separator or JSPLITTER val = {} flat_vals = {} for key, value in iteritems(data): if value is None: continue keys = key.split(separator) # first key equal to the attribute name if attname: if keys.pop(0) != attname: continue if loads: value = loads(value) # if an instance is available, inject the flat attribute if not keys: if value is None: val = flat_vals = {} break else: continue else: flat_vals[key] = value d = val lk = keys[-1] for k in keys[:-1]: if k not in d: nd = {} d[k] = nd else: nd = d[k] if not isinstance(nd, dict): nd = {'': nd} d[k] = nd d = nd if lk not in d: d[lk] = value else: d[lk][''] = value if instance and flat_vals: for attr, value in iteritems(flat_vals): setattr(instance, attr, value) return val
[ "def", "flat_to_nested", "(", "data", ",", "instance", "=", "None", ",", "attname", "=", "None", ",", "separator", "=", "None", ",", "loads", "=", "None", ")", ":", "separator", "=", "separator", "or", "JSPLITTER", "val", "=", "{", "}", "flat_vals", "=", "{", "}", "for", "key", ",", "value", "in", "iteritems", "(", "data", ")", ":", "if", "value", "is", "None", ":", "continue", "keys", "=", "key", ".", "split", "(", "separator", ")", "# first key equal to the attribute name", "if", "attname", ":", "if", "keys", ".", "pop", "(", "0", ")", "!=", "attname", ":", "continue", "if", "loads", ":", "value", "=", "loads", "(", "value", ")", "# if an instance is available, inject the flat attribute", "if", "not", "keys", ":", "if", "value", "is", "None", ":", "val", "=", "flat_vals", "=", "{", "}", "break", "else", ":", "continue", "else", ":", "flat_vals", "[", "key", "]", "=", "value", "d", "=", "val", "lk", "=", "keys", "[", "-", "1", "]", "for", "k", "in", "keys", "[", ":", "-", "1", "]", ":", "if", "k", "not", "in", "d", ":", "nd", "=", "{", "}", "d", "[", "k", "]", "=", "nd", "else", ":", "nd", "=", "d", "[", "k", "]", "if", "not", "isinstance", "(", "nd", ",", "dict", ")", ":", "nd", "=", "{", "''", ":", "nd", "}", "d", "[", "k", "]", "=", "nd", "d", "=", "nd", "if", "lk", "not", "in", "d", ":", "d", "[", "lk", "]", "=", "value", "else", ":", "d", "[", "lk", "]", "[", "''", "]", "=", "value", "if", "instance", "and", "flat_vals", ":", "for", "attr", ",", "value", "in", "iteritems", "(", "flat_vals", ")", ":", "setattr", "(", "instance", ",", "attr", ",", "value", ")", "return", "val" ]
Convert a flat representation of a dictionary to a nested representation. Fields in the flat representation are separated by the *splitter* parameters. :parameter data: a flat dictionary of key value pairs. :parameter instance: optional instance of a model. :parameter attribute: optional attribute of a model. :parameter separator: optional separator. Default ``"__"``. :parameter loads: optional data unserializer. :rtype: a nested dictionary
[ "Convert", "a", "flat", "representation", "of", "a", "dictionary", "to", "a", "nested", "representation", ".", "Fields", "in", "the", "flat", "representation", "are", "separated", "by", "the", "*", "splitter", "*", "parameters", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/utils/jsontools.py#L98-L154
lsbardel/python-stdnet
stdnet/utils/jsontools.py
dict_flat_generator
def dict_flat_generator(value, attname=None, splitter=JSPLITTER, dumps=None, prefix=None, error=ValueError, recursive=True): '''Convert a nested dictionary into a flat dictionary representation''' if not isinstance(value, dict) or not recursive: if not prefix: raise error('Cannot assign a non dictionary to a JSON field') else: name = '%s%s%s' % (attname, splitter, prefix) if attname else prefix yield name, dumps(value) if dumps else value else: # loop over dictionary for field in value: val = value[field] key = prefix if field: key = '%s%s%s' % (prefix, splitter, field) if prefix else field for k, v2 in dict_flat_generator(val, attname, splitter, dumps, key, error, field): yield k, v2
python
def dict_flat_generator(value, attname=None, splitter=JSPLITTER, dumps=None, prefix=None, error=ValueError, recursive=True): '''Convert a nested dictionary into a flat dictionary representation''' if not isinstance(value, dict) or not recursive: if not prefix: raise error('Cannot assign a non dictionary to a JSON field') else: name = '%s%s%s' % (attname, splitter, prefix) if attname else prefix yield name, dumps(value) if dumps else value else: # loop over dictionary for field in value: val = value[field] key = prefix if field: key = '%s%s%s' % (prefix, splitter, field) if prefix else field for k, v2 in dict_flat_generator(val, attname, splitter, dumps, key, error, field): yield k, v2
[ "def", "dict_flat_generator", "(", "value", ",", "attname", "=", "None", ",", "splitter", "=", "JSPLITTER", ",", "dumps", "=", "None", ",", "prefix", "=", "None", ",", "error", "=", "ValueError", ",", "recursive", "=", "True", ")", ":", "if", "not", "isinstance", "(", "value", ",", "dict", ")", "or", "not", "recursive", ":", "if", "not", "prefix", ":", "raise", "error", "(", "'Cannot assign a non dictionary to a JSON field'", ")", "else", ":", "name", "=", "'%s%s%s'", "%", "(", "attname", ",", "splitter", ",", "prefix", ")", "if", "attname", "else", "prefix", "yield", "name", ",", "dumps", "(", "value", ")", "if", "dumps", "else", "value", "else", ":", "# loop over dictionary", "for", "field", "in", "value", ":", "val", "=", "value", "[", "field", "]", "key", "=", "prefix", "if", "field", ":", "key", "=", "'%s%s%s'", "%", "(", "prefix", ",", "splitter", ",", "field", ")", "if", "prefix", "else", "field", "for", "k", ",", "v2", "in", "dict_flat_generator", "(", "val", ",", "attname", ",", "splitter", ",", "dumps", ",", "key", ",", "error", ",", "field", ")", ":", "yield", "k", ",", "v2" ]
Convert a nested dictionary into a flat dictionary representation
[ "Convert", "a", "nested", "dictionary", "into", "a", "flat", "dictionary", "representation" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/utils/jsontools.py#L157-L178
lsbardel/python-stdnet
stdnet/utils/jsontools.py
addmul_number_dicts
def addmul_number_dicts(series): '''Multiply dictionaries by a numeric values and add them together. :parameter series: a tuple of two elements tuples. Each serie is of the form:: (weight,dictionary) where ``weight`` is a number and ``dictionary`` is a dictionary with numeric values. :parameter skip: optional list of field names to skip. Only common fields are aggregated. If a field has a non-numeric value it is not included either.''' if not series: return vtype = value_type((s[1] for s in series)) if vtype == 1: return sum((weight*float(d) for weight, d in series)) elif vtype == 3: keys = set(series[0][1]) for serie in series[1:]: keys.intersection_update(serie[1]) results = {} for key in keys: key_series = tuple((weight, d[key]) for weight, d in series) result = addmul_number_dicts(key_series) if result is not None: results[key] = result return results
python
def addmul_number_dicts(series): '''Multiply dictionaries by a numeric values and add them together. :parameter series: a tuple of two elements tuples. Each serie is of the form:: (weight,dictionary) where ``weight`` is a number and ``dictionary`` is a dictionary with numeric values. :parameter skip: optional list of field names to skip. Only common fields are aggregated. If a field has a non-numeric value it is not included either.''' if not series: return vtype = value_type((s[1] for s in series)) if vtype == 1: return sum((weight*float(d) for weight, d in series)) elif vtype == 3: keys = set(series[0][1]) for serie in series[1:]: keys.intersection_update(serie[1]) results = {} for key in keys: key_series = tuple((weight, d[key]) for weight, d in series) result = addmul_number_dicts(key_series) if result is not None: results[key] = result return results
[ "def", "addmul_number_dicts", "(", "series", ")", ":", "if", "not", "series", ":", "return", "vtype", "=", "value_type", "(", "(", "s", "[", "1", "]", "for", "s", "in", "series", ")", ")", "if", "vtype", "==", "1", ":", "return", "sum", "(", "(", "weight", "*", "float", "(", "d", ")", "for", "weight", ",", "d", "in", "series", ")", ")", "elif", "vtype", "==", "3", ":", "keys", "=", "set", "(", "series", "[", "0", "]", "[", "1", "]", ")", "for", "serie", "in", "series", "[", "1", ":", "]", ":", "keys", ".", "intersection_update", "(", "serie", "[", "1", "]", ")", "results", "=", "{", "}", "for", "key", "in", "keys", ":", "key_series", "=", "tuple", "(", "(", "weight", ",", "d", "[", "key", "]", ")", "for", "weight", ",", "d", "in", "series", ")", "result", "=", "addmul_number_dicts", "(", "key_series", ")", "if", "result", "is", "not", "None", ":", "results", "[", "key", "]", "=", "result", "return", "results" ]
Multiply dictionaries by a numeric values and add them together. :parameter series: a tuple of two elements tuples. Each serie is of the form:: (weight,dictionary) where ``weight`` is a number and ``dictionary`` is a dictionary with numeric values. :parameter skip: optional list of field names to skip. Only common fields are aggregated. If a field has a non-numeric value it is not included either.
[ "Multiply", "dictionaries", "by", "a", "numeric", "values", "and", "add", "them", "together", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/utils/jsontools.py#L201-L229
rodluger/everest
everest/missions/k2/pbs.py
Download
def Download(campaign=0, queue='build', email=None, walltime=8, **kwargs): ''' Submits a cluster job to the build queue to download all TPFs for a given campaign. :param int campaign: The `K2` campaign to run :param str queue: The name of the queue to submit to. Default `build` :param str email: The email to send job status notifications to. \ Default `None` :param int walltime: The number of hours to request. Default `8` ''' # Figure out the subcampaign if type(campaign) is int: subcampaign = -1 elif type(campaign) is float: x, y = divmod(campaign, 1) campaign = int(x) subcampaign = round(y * 10) # Submit the cluster job pbsfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'download.pbs') str_w = 'walltime=%d:00:00' % walltime str_v = 'EVEREST_DAT=%s,CAMPAIGN=%d,SUBCAMPAIGN=%d' % ( EVEREST_DAT, campaign, subcampaign) if subcampaign == -1: str_name = 'download_c%02d' % campaign else: str_name = 'download_c%02d.%d' % (campaign, subcampaign) str_out = os.path.join(EVEREST_DAT, 'k2', str_name + '.log') qsub_args = ['qsub', pbsfile, '-q', queue, '-v', str_v, '-o', str_out, '-j', 'oe', '-N', str_name, '-l', str_w] if email is not None: qsub_args.append(['-M', email, '-m', 'ae']) # Now we submit the job print("Submitting the job...") subprocess.call(qsub_args)
python
def Download(campaign=0, queue='build', email=None, walltime=8, **kwargs): ''' Submits a cluster job to the build queue to download all TPFs for a given campaign. :param int campaign: The `K2` campaign to run :param str queue: The name of the queue to submit to. Default `build` :param str email: The email to send job status notifications to. \ Default `None` :param int walltime: The number of hours to request. Default `8` ''' # Figure out the subcampaign if type(campaign) is int: subcampaign = -1 elif type(campaign) is float: x, y = divmod(campaign, 1) campaign = int(x) subcampaign = round(y * 10) # Submit the cluster job pbsfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'download.pbs') str_w = 'walltime=%d:00:00' % walltime str_v = 'EVEREST_DAT=%s,CAMPAIGN=%d,SUBCAMPAIGN=%d' % ( EVEREST_DAT, campaign, subcampaign) if subcampaign == -1: str_name = 'download_c%02d' % campaign else: str_name = 'download_c%02d.%d' % (campaign, subcampaign) str_out = os.path.join(EVEREST_DAT, 'k2', str_name + '.log') qsub_args = ['qsub', pbsfile, '-q', queue, '-v', str_v, '-o', str_out, '-j', 'oe', '-N', str_name, '-l', str_w] if email is not None: qsub_args.append(['-M', email, '-m', 'ae']) # Now we submit the job print("Submitting the job...") subprocess.call(qsub_args)
[ "def", "Download", "(", "campaign", "=", "0", ",", "queue", "=", "'build'", ",", "email", "=", "None", ",", "walltime", "=", "8", ",", "*", "*", "kwargs", ")", ":", "# Figure out the subcampaign", "if", "type", "(", "campaign", ")", "is", "int", ":", "subcampaign", "=", "-", "1", "elif", "type", "(", "campaign", ")", "is", "float", ":", "x", ",", "y", "=", "divmod", "(", "campaign", ",", "1", ")", "campaign", "=", "int", "(", "x", ")", "subcampaign", "=", "round", "(", "y", "*", "10", ")", "# Submit the cluster job", "pbsfile", "=", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'download.pbs'", ")", "str_w", "=", "'walltime=%d:00:00'", "%", "walltime", "str_v", "=", "'EVEREST_DAT=%s,CAMPAIGN=%d,SUBCAMPAIGN=%d'", "%", "(", "EVEREST_DAT", ",", "campaign", ",", "subcampaign", ")", "if", "subcampaign", "==", "-", "1", ":", "str_name", "=", "'download_c%02d'", "%", "campaign", "else", ":", "str_name", "=", "'download_c%02d.%d'", "%", "(", "campaign", ",", "subcampaign", ")", "str_out", "=", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "str_name", "+", "'.log'", ")", "qsub_args", "=", "[", "'qsub'", ",", "pbsfile", ",", "'-q'", ",", "queue", ",", "'-v'", ",", "str_v", ",", "'-o'", ",", "str_out", ",", "'-j'", ",", "'oe'", ",", "'-N'", ",", "str_name", ",", "'-l'", ",", "str_w", "]", "if", "email", "is", "not", "None", ":", "qsub_args", ".", "append", "(", "[", "'-M'", ",", "email", ",", "'-m'", ",", "'ae'", "]", ")", "# Now we submit the job", "print", "(", "\"Submitting the job...\"", ")", "subprocess", ".", "call", "(", "qsub_args", ")" ]
Submits a cluster job to the build queue to download all TPFs for a given campaign. :param int campaign: The `K2` campaign to run :param str queue: The name of the queue to submit to. Default `build` :param str email: The email to send job status notifications to. \ Default `None` :param int walltime: The number of hours to request. Default `8`
[ "Submits", "a", "cluster", "job", "to", "the", "build", "queue", "to", "download", "all", "TPFs", "for", "a", "given", "campaign", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/pbs.py#L34-L75
rodluger/everest
everest/missions/k2/pbs.py
_Download
def _Download(campaign, subcampaign): ''' Download all stars from a given campaign. This is called from ``missions/k2/download.pbs`` ''' # Are we doing a subcampaign? if subcampaign != -1: campaign = campaign + 0.1 * subcampaign # Get all star IDs for this campaign stars = [s[0] for s in GetK2Campaign(campaign)] nstars = len(stars) # Download the TPF data for each one for i, EPIC in enumerate(stars): print("Downloading data for EPIC %d (%d/%d)..." % (EPIC, i + 1, nstars)) if not os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % int(campaign), ('%09d' % EPIC)[:4] + '00000', ('%09d' % EPIC)[4:], 'data.npz')): try: GetData(EPIC, season=campaign, download_only=True) except KeyboardInterrupt: sys.exit() except: # Some targets could be corrupted... print("ERROR downloading EPIC %d." % EPIC) exctype, value, tb = sys.exc_info() for line in traceback.format_exception_only(exctype, value): ln = line.replace('\n', '') print(ln) continue
python
def _Download(campaign, subcampaign): ''' Download all stars from a given campaign. This is called from ``missions/k2/download.pbs`` ''' # Are we doing a subcampaign? if subcampaign != -1: campaign = campaign + 0.1 * subcampaign # Get all star IDs for this campaign stars = [s[0] for s in GetK2Campaign(campaign)] nstars = len(stars) # Download the TPF data for each one for i, EPIC in enumerate(stars): print("Downloading data for EPIC %d (%d/%d)..." % (EPIC, i + 1, nstars)) if not os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % int(campaign), ('%09d' % EPIC)[:4] + '00000', ('%09d' % EPIC)[4:], 'data.npz')): try: GetData(EPIC, season=campaign, download_only=True) except KeyboardInterrupt: sys.exit() except: # Some targets could be corrupted... print("ERROR downloading EPIC %d." % EPIC) exctype, value, tb = sys.exc_info() for line in traceback.format_exception_only(exctype, value): ln = line.replace('\n', '') print(ln) continue
[ "def", "_Download", "(", "campaign", ",", "subcampaign", ")", ":", "# Are we doing a subcampaign?", "if", "subcampaign", "!=", "-", "1", ":", "campaign", "=", "campaign", "+", "0.1", "*", "subcampaign", "# Get all star IDs for this campaign", "stars", "=", "[", "s", "[", "0", "]", "for", "s", "in", "GetK2Campaign", "(", "campaign", ")", "]", "nstars", "=", "len", "(", "stars", ")", "# Download the TPF data for each one", "for", "i", ",", "EPIC", "in", "enumerate", "(", "stars", ")", ":", "print", "(", "\"Downloading data for EPIC %d (%d/%d)...\"", "%", "(", "EPIC", ",", "i", "+", "1", ",", "nstars", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "'c%02d'", "%", "int", "(", "campaign", ")", ",", "(", "'%09d'", "%", "EPIC", ")", "[", ":", "4", "]", "+", "'00000'", ",", "(", "'%09d'", "%", "EPIC", ")", "[", "4", ":", "]", ",", "'data.npz'", ")", ")", ":", "try", ":", "GetData", "(", "EPIC", ",", "season", "=", "campaign", ",", "download_only", "=", "True", ")", "except", "KeyboardInterrupt", ":", "sys", ".", "exit", "(", ")", "except", ":", "# Some targets could be corrupted...", "print", "(", "\"ERROR downloading EPIC %d.\"", "%", "EPIC", ")", "exctype", ",", "value", ",", "tb", "=", "sys", ".", "exc_info", "(", ")", "for", "line", "in", "traceback", ".", "format_exception_only", "(", "exctype", ",", "value", ")", ":", "ln", "=", "line", ".", "replace", "(", "'\\n'", ",", "''", ")", "print", "(", "ln", ")", "continue" ]
Download all stars from a given campaign. This is called from ``missions/k2/download.pbs``
[ "Download", "all", "stars", "from", "a", "given", "campaign", ".", "This", "is", "called", "from", "missions", "/", "k2", "/", "download", ".", "pbs" ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/pbs.py#L78-L111
rodluger/everest
everest/missions/k2/pbs.py
Run
def Run(campaign=0, EPIC=None, nodes=5, ppn=12, walltime=100, mpn=None, email=None, queue=None, **kwargs): ''' Submits a cluster job to compute and plot data for all targets in a given campaign. :param campaign: The K2 campaign number. If this is an :py:class:`int`, \ returns all targets in that campaign. If a :py:class:`float` \ in the form `X.Y`, runs the `Y^th` decile of campaign `X`. :param str queue: The queue to submit to. Default `None` (default queue) :param str email: The email to send job status notifications to. \ Default `None` :param int walltime: The number of hours to request. Default `100` :param int nodes: The number of nodes to request. Default `5` :param int ppn: The number of processors per node to request. Default `12` :param int mpn: Memory per node in gb to request. Default no setting. ''' # Figure out the subcampaign if type(campaign) is int: subcampaign = -1 elif type(campaign) is float: x, y = divmod(campaign, 1) campaign = int(x) subcampaign = round(y * 10) # DEV hack: limit backfill jobs to 10 hours if EVEREST_DEV and (queue == 'bf'): walltime = min(10, walltime) # Convert kwargs to string. This is really hacky. Pickle creates an array # of bytes, which we must convert into a regular string to pass to the pbs # script and then back into python. Decoding the bytes isn't enough, since # we have pesky escaped characters such as newlines that don't behave well # when passing this string around. My braindead hack is to replace newlines # with '%%%', then undo the replacement when reading the kwargs. This works # for most cases, but sometimes pickle creates a byte array that can't be # decoded into utf-8; this happens when trying to pass numpy arrays around, # for instance. This needs to be fixed in the future, but for now we'll # restrict the kwargs to be ints, floats, lists, and strings. try: strkwargs = pickle.dumps(kwargs, 0).decode( 'utf-8').replace('\n', '%%%') except UnicodeDecodeError: raise ValueError('Unable to pickle `kwargs`. Currently the `kwargs` ' + 'values may only be `int`s, `float`s, `string`s, ' + '`bool`s, or lists of these.') # Submit the cluster job pbsfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'run.pbs') if mpn is not None: str_n = 'nodes=%d:ppn=%d,feature=%dcore,mem=%dgb' % ( nodes, ppn, ppn, mpn * nodes) else: str_n = 'nodes=%d:ppn=%d,feature=%dcore' % (nodes, ppn, ppn) str_w = 'walltime=%d:00:00' % walltime str_v = "EVEREST_DAT=%s,NODES=%d," % (EVEREST_DAT, nodes) + \ "EPIC=%d," % (0 if EPIC is None else EPIC) + \ "CAMPAIGN=%d,SUBCAMPAIGN=%d,STRKWARGS='%s'" % \ (campaign, subcampaign, strkwargs) if EPIC is None: if subcampaign == -1: str_name = 'c%02d' % campaign else: str_name = 'c%02d.%d' % (campaign, subcampaign) else: str_name = 'EPIC%d' % EPIC str_out = os.path.join(EVEREST_DAT, 'k2', str_name + '.log') qsub_args = ['qsub', pbsfile, '-v', str_v, '-o', str_out, '-j', 'oe', '-N', str_name, '-l', str_n, '-l', str_w] if email is not None: qsub_args.append(['-M', email, '-m', 'ae']) if queue is not None: qsub_args += ['-q', queue] # Now we submit the job print("Submitting the job...") subprocess.call(qsub_args)
python
def Run(campaign=0, EPIC=None, nodes=5, ppn=12, walltime=100, mpn=None, email=None, queue=None, **kwargs): ''' Submits a cluster job to compute and plot data for all targets in a given campaign. :param campaign: The K2 campaign number. If this is an :py:class:`int`, \ returns all targets in that campaign. If a :py:class:`float` \ in the form `X.Y`, runs the `Y^th` decile of campaign `X`. :param str queue: The queue to submit to. Default `None` (default queue) :param str email: The email to send job status notifications to. \ Default `None` :param int walltime: The number of hours to request. Default `100` :param int nodes: The number of nodes to request. Default `5` :param int ppn: The number of processors per node to request. Default `12` :param int mpn: Memory per node in gb to request. Default no setting. ''' # Figure out the subcampaign if type(campaign) is int: subcampaign = -1 elif type(campaign) is float: x, y = divmod(campaign, 1) campaign = int(x) subcampaign = round(y * 10) # DEV hack: limit backfill jobs to 10 hours if EVEREST_DEV and (queue == 'bf'): walltime = min(10, walltime) # Convert kwargs to string. This is really hacky. Pickle creates an array # of bytes, which we must convert into a regular string to pass to the pbs # script and then back into python. Decoding the bytes isn't enough, since # we have pesky escaped characters such as newlines that don't behave well # when passing this string around. My braindead hack is to replace newlines # with '%%%', then undo the replacement when reading the kwargs. This works # for most cases, but sometimes pickle creates a byte array that can't be # decoded into utf-8; this happens when trying to pass numpy arrays around, # for instance. This needs to be fixed in the future, but for now we'll # restrict the kwargs to be ints, floats, lists, and strings. try: strkwargs = pickle.dumps(kwargs, 0).decode( 'utf-8').replace('\n', '%%%') except UnicodeDecodeError: raise ValueError('Unable to pickle `kwargs`. Currently the `kwargs` ' + 'values may only be `int`s, `float`s, `string`s, ' + '`bool`s, or lists of these.') # Submit the cluster job pbsfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'run.pbs') if mpn is not None: str_n = 'nodes=%d:ppn=%d,feature=%dcore,mem=%dgb' % ( nodes, ppn, ppn, mpn * nodes) else: str_n = 'nodes=%d:ppn=%d,feature=%dcore' % (nodes, ppn, ppn) str_w = 'walltime=%d:00:00' % walltime str_v = "EVEREST_DAT=%s,NODES=%d," % (EVEREST_DAT, nodes) + \ "EPIC=%d," % (0 if EPIC is None else EPIC) + \ "CAMPAIGN=%d,SUBCAMPAIGN=%d,STRKWARGS='%s'" % \ (campaign, subcampaign, strkwargs) if EPIC is None: if subcampaign == -1: str_name = 'c%02d' % campaign else: str_name = 'c%02d.%d' % (campaign, subcampaign) else: str_name = 'EPIC%d' % EPIC str_out = os.path.join(EVEREST_DAT, 'k2', str_name + '.log') qsub_args = ['qsub', pbsfile, '-v', str_v, '-o', str_out, '-j', 'oe', '-N', str_name, '-l', str_n, '-l', str_w] if email is not None: qsub_args.append(['-M', email, '-m', 'ae']) if queue is not None: qsub_args += ['-q', queue] # Now we submit the job print("Submitting the job...") subprocess.call(qsub_args)
[ "def", "Run", "(", "campaign", "=", "0", ",", "EPIC", "=", "None", ",", "nodes", "=", "5", ",", "ppn", "=", "12", ",", "walltime", "=", "100", ",", "mpn", "=", "None", ",", "email", "=", "None", ",", "queue", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# Figure out the subcampaign", "if", "type", "(", "campaign", ")", "is", "int", ":", "subcampaign", "=", "-", "1", "elif", "type", "(", "campaign", ")", "is", "float", ":", "x", ",", "y", "=", "divmod", "(", "campaign", ",", "1", ")", "campaign", "=", "int", "(", "x", ")", "subcampaign", "=", "round", "(", "y", "*", "10", ")", "# DEV hack: limit backfill jobs to 10 hours", "if", "EVEREST_DEV", "and", "(", "queue", "==", "'bf'", ")", ":", "walltime", "=", "min", "(", "10", ",", "walltime", ")", "# Convert kwargs to string. This is really hacky. Pickle creates an array", "# of bytes, which we must convert into a regular string to pass to the pbs", "# script and then back into python. Decoding the bytes isn't enough, since", "# we have pesky escaped characters such as newlines that don't behave well", "# when passing this string around. My braindead hack is to replace newlines", "# with '%%%', then undo the replacement when reading the kwargs. This works", "# for most cases, but sometimes pickle creates a byte array that can't be", "# decoded into utf-8; this happens when trying to pass numpy arrays around,", "# for instance. This needs to be fixed in the future, but for now we'll", "# restrict the kwargs to be ints, floats, lists, and strings.", "try", ":", "strkwargs", "=", "pickle", ".", "dumps", "(", "kwargs", ",", "0", ")", ".", "decode", "(", "'utf-8'", ")", ".", "replace", "(", "'\\n'", ",", "'%%%'", ")", "except", "UnicodeDecodeError", ":", "raise", "ValueError", "(", "'Unable to pickle `kwargs`. Currently the `kwargs` '", "+", "'values may only be `int`s, `float`s, `string`s, '", "+", "'`bool`s, or lists of these.'", ")", "# Submit the cluster job", "pbsfile", "=", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'run.pbs'", ")", "if", "mpn", "is", "not", "None", ":", "str_n", "=", "'nodes=%d:ppn=%d,feature=%dcore,mem=%dgb'", "%", "(", "nodes", ",", "ppn", ",", "ppn", ",", "mpn", "*", "nodes", ")", "else", ":", "str_n", "=", "'nodes=%d:ppn=%d,feature=%dcore'", "%", "(", "nodes", ",", "ppn", ",", "ppn", ")", "str_w", "=", "'walltime=%d:00:00'", "%", "walltime", "str_v", "=", "\"EVEREST_DAT=%s,NODES=%d,\"", "%", "(", "EVEREST_DAT", ",", "nodes", ")", "+", "\"EPIC=%d,\"", "%", "(", "0", "if", "EPIC", "is", "None", "else", "EPIC", ")", "+", "\"CAMPAIGN=%d,SUBCAMPAIGN=%d,STRKWARGS='%s'\"", "%", "(", "campaign", ",", "subcampaign", ",", "strkwargs", ")", "if", "EPIC", "is", "None", ":", "if", "subcampaign", "==", "-", "1", ":", "str_name", "=", "'c%02d'", "%", "campaign", "else", ":", "str_name", "=", "'c%02d.%d'", "%", "(", "campaign", ",", "subcampaign", ")", "else", ":", "str_name", "=", "'EPIC%d'", "%", "EPIC", "str_out", "=", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "str_name", "+", "'.log'", ")", "qsub_args", "=", "[", "'qsub'", ",", "pbsfile", ",", "'-v'", ",", "str_v", ",", "'-o'", ",", "str_out", ",", "'-j'", ",", "'oe'", ",", "'-N'", ",", "str_name", ",", "'-l'", ",", "str_n", ",", "'-l'", ",", "str_w", "]", "if", "email", "is", "not", "None", ":", "qsub_args", ".", "append", "(", "[", "'-M'", ",", "email", ",", "'-m'", ",", "'ae'", "]", ")", "if", "queue", "is", "not", "None", ":", "qsub_args", "+=", "[", "'-q'", ",", "queue", "]", "# Now we submit the job", "print", "(", "\"Submitting the job...\"", ")", "subprocess", ".", "call", "(", "qsub_args", ")" ]
Submits a cluster job to compute and plot data for all targets in a given campaign. :param campaign: The K2 campaign number. If this is an :py:class:`int`, \ returns all targets in that campaign. If a :py:class:`float` \ in the form `X.Y`, runs the `Y^th` decile of campaign `X`. :param str queue: The queue to submit to. Default `None` (default queue) :param str email: The email to send job status notifications to. \ Default `None` :param int walltime: The number of hours to request. Default `100` :param int nodes: The number of nodes to request. Default `5` :param int ppn: The number of processors per node to request. Default `12` :param int mpn: Memory per node in gb to request. Default no setting.
[ "Submits", "a", "cluster", "job", "to", "compute", "and", "plot", "data", "for", "all", "targets", "in", "a", "given", "campaign", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/pbs.py#L114-L198
rodluger/everest
everest/missions/k2/pbs.py
_Publish
def _Publish(campaign, subcampaign, strkwargs): ''' The actual function that publishes a given campaign; this must be called from ``missions/k2/publish.pbs``. ''' # Get kwargs from string kwargs = pickle.loads(strkwargs.replace('%%%', '\n').encode('utf-8')) # Check the cadence cadence = kwargs.get('cadence', 'lc') # Model wrapper m = FunctionWrapper(EverestModel, season=campaign, publish=True, **kwargs) # Set up our custom exception handler sys.excepthook = ExceptionHook # Initialize our multiprocessing pool with Pool() as pool: # Are we doing a subcampaign? if subcampaign != -1: campaign = campaign + 0.1 * subcampaign # Get all the stars stars = GetK2Campaign(campaign, epics_only=True, cadence=cadence) # Run pool.map(m, stars)
python
def _Publish(campaign, subcampaign, strkwargs): ''' The actual function that publishes a given campaign; this must be called from ``missions/k2/publish.pbs``. ''' # Get kwargs from string kwargs = pickle.loads(strkwargs.replace('%%%', '\n').encode('utf-8')) # Check the cadence cadence = kwargs.get('cadence', 'lc') # Model wrapper m = FunctionWrapper(EverestModel, season=campaign, publish=True, **kwargs) # Set up our custom exception handler sys.excepthook = ExceptionHook # Initialize our multiprocessing pool with Pool() as pool: # Are we doing a subcampaign? if subcampaign != -1: campaign = campaign + 0.1 * subcampaign # Get all the stars stars = GetK2Campaign(campaign, epics_only=True, cadence=cadence) # Run pool.map(m, stars)
[ "def", "_Publish", "(", "campaign", ",", "subcampaign", ",", "strkwargs", ")", ":", "# Get kwargs from string", "kwargs", "=", "pickle", ".", "loads", "(", "strkwargs", ".", "replace", "(", "'%%%'", ",", "'\\n'", ")", ".", "encode", "(", "'utf-8'", ")", ")", "# Check the cadence", "cadence", "=", "kwargs", ".", "get", "(", "'cadence'", ",", "'lc'", ")", "# Model wrapper", "m", "=", "FunctionWrapper", "(", "EverestModel", ",", "season", "=", "campaign", ",", "publish", "=", "True", ",", "*", "*", "kwargs", ")", "# Set up our custom exception handler", "sys", ".", "excepthook", "=", "ExceptionHook", "# Initialize our multiprocessing pool", "with", "Pool", "(", ")", "as", "pool", ":", "# Are we doing a subcampaign?", "if", "subcampaign", "!=", "-", "1", ":", "campaign", "=", "campaign", "+", "0.1", "*", "subcampaign", "# Get all the stars", "stars", "=", "GetK2Campaign", "(", "campaign", ",", "epics_only", "=", "True", ",", "cadence", "=", "cadence", ")", "# Run", "pool", ".", "map", "(", "m", ",", "stars", ")" ]
The actual function that publishes a given campaign; this must be called from ``missions/k2/publish.pbs``.
[ "The", "actual", "function", "that", "publishes", "a", "given", "campaign", ";", "this", "must", "be", "called", "from", "missions", "/", "k2", "/", "publish", ".", "pbs", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/pbs.py#L322-L350
rodluger/everest
everest/missions/k2/pbs.py
Status
def Status(season=range(18), model='nPLD', purge=False, injection=False, cadence='lc', **kwargs): ''' Shows the progress of the de-trending runs for the specified campaign(s). ''' # Mission compatibility campaign = season # Injection? if injection: return InjectionStatus(campaign=campaign, model=model, purge=purge, **kwargs) # Cadence if cadence == 'sc': model = '%s.sc' % model if not hasattr(campaign, '__len__'): if type(campaign) is int: # Return the subcampaigns all_stars = [s for s in GetK2Campaign( campaign, split=True, epics_only=True, cadence=cadence)] campaign = [campaign + 0.1 * n for n in range(10)] else: all_stars = [[s for s in GetK2Campaign( campaign, epics_only=True, cadence=cadence)]] campaign = [campaign] else: all_stars = [[s for s in GetK2Campaign( c, epics_only=True, cadence=cadence)] for c in campaign] print("CAMP TOTAL DOWNLOADED PROCESSED FITS ERRORS") print("---- ----- ---------- --------- ---- ------") for c, stars in zip(campaign, all_stars): if len(stars) == 0: continue down = 0 proc = 0 err = 0 fits = 0 bad = [] remain = [] total = len(stars) if os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c)): path = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c) for folder in [f for f in os.listdir(path) if f.endswith('00000')]: for subfolder in os.listdir(os.path.join(path, folder)): ID = int(folder[:4] + subfolder) if ID in stars: if os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c, folder, subfolder, 'data.npz')): down += 1 if os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c, folder, subfolder, FITSFile( ID, c, cadence=cadence))): fits += 1 if os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c, folder, subfolder, model + '.npz')): proc += 1 elif os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c, folder, subfolder, model + '.err')): err += 1 bad.append(folder[:4] + subfolder) if purge: os.remove(os.path.join( EVEREST_DAT, 'k2', 'c%02d' % c, folder, subfolder, model + '.err')) else: remain.append(folder[:4] + subfolder) if proc == total: cc = ct = cp = ce = GREEN cd = BLACK if down < total else GREEN else: cc = BLACK ct = BLACK cd = BLACK if down < total else BLUE cp = BLACK if proc < down or proc == 0 else BLUE ce = RED if err > 0 else BLACK cf = BLACK if fits < total else GREEN if type(c) is int: print("%s{:>4d} \033[0m%s{:>8d}\033[0m%s{:>16d}\033[0m%s{:>13d}\033[0m%s{:>10d}\033[0m%s{:>10d}\033[0m".format(c, total, down, proc, fits, err) % (cc, ct, cd, cp, cf, ce)) else: print("%s{:>4.1f} \033[0m%s{:>8d}\033[0m%s{:>16d}\033[0m%s{:>13d}\033[0m%s{:>10d}\033[0m%s{:>10d}\033[0m".format(c, total, down, proc, fits, err) % (cc, ct, cd, cp, cf, ce)) if len(remain) <= 25 and len(remain) > 0 and len(campaign) == 1: remain.extend([" "] * (4 - (len(remain) % 4))) print() for A, B, C, D in zip(remain[::4], remain[1::4], remain[2::4], remain[3::4]): if A == remain[0]: print("REMAIN: %s %s %s %s" % (A, B, C, D)) print() else: print(" %s %s %s %s" % (A, B, C, D)) print() if len(bad) and len(campaign) == 1: bad.extend([" "] * (4 - (len(bad) % 4))) print() for A, B, C, D in zip(bad[::4], bad[1::4], bad[2::4], bad[3::4]): if A == bad[0]: print("ERRORS: %s %s %s %s" % (A, B, C, D)) print() else: print(" %s %s %s %s" % (A, B, C, D)) print()
python
def Status(season=range(18), model='nPLD', purge=False, injection=False, cadence='lc', **kwargs): ''' Shows the progress of the de-trending runs for the specified campaign(s). ''' # Mission compatibility campaign = season # Injection? if injection: return InjectionStatus(campaign=campaign, model=model, purge=purge, **kwargs) # Cadence if cadence == 'sc': model = '%s.sc' % model if not hasattr(campaign, '__len__'): if type(campaign) is int: # Return the subcampaigns all_stars = [s for s in GetK2Campaign( campaign, split=True, epics_only=True, cadence=cadence)] campaign = [campaign + 0.1 * n for n in range(10)] else: all_stars = [[s for s in GetK2Campaign( campaign, epics_only=True, cadence=cadence)]] campaign = [campaign] else: all_stars = [[s for s in GetK2Campaign( c, epics_only=True, cadence=cadence)] for c in campaign] print("CAMP TOTAL DOWNLOADED PROCESSED FITS ERRORS") print("---- ----- ---------- --------- ---- ------") for c, stars in zip(campaign, all_stars): if len(stars) == 0: continue down = 0 proc = 0 err = 0 fits = 0 bad = [] remain = [] total = len(stars) if os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c)): path = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c) for folder in [f for f in os.listdir(path) if f.endswith('00000')]: for subfolder in os.listdir(os.path.join(path, folder)): ID = int(folder[:4] + subfolder) if ID in stars: if os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c, folder, subfolder, 'data.npz')): down += 1 if os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c, folder, subfolder, FITSFile( ID, c, cadence=cadence))): fits += 1 if os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c, folder, subfolder, model + '.npz')): proc += 1 elif os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c, folder, subfolder, model + '.err')): err += 1 bad.append(folder[:4] + subfolder) if purge: os.remove(os.path.join( EVEREST_DAT, 'k2', 'c%02d' % c, folder, subfolder, model + '.err')) else: remain.append(folder[:4] + subfolder) if proc == total: cc = ct = cp = ce = GREEN cd = BLACK if down < total else GREEN else: cc = BLACK ct = BLACK cd = BLACK if down < total else BLUE cp = BLACK if proc < down or proc == 0 else BLUE ce = RED if err > 0 else BLACK cf = BLACK if fits < total else GREEN if type(c) is int: print("%s{:>4d} \033[0m%s{:>8d}\033[0m%s{:>16d}\033[0m%s{:>13d}\033[0m%s{:>10d}\033[0m%s{:>10d}\033[0m".format(c, total, down, proc, fits, err) % (cc, ct, cd, cp, cf, ce)) else: print("%s{:>4.1f} \033[0m%s{:>8d}\033[0m%s{:>16d}\033[0m%s{:>13d}\033[0m%s{:>10d}\033[0m%s{:>10d}\033[0m".format(c, total, down, proc, fits, err) % (cc, ct, cd, cp, cf, ce)) if len(remain) <= 25 and len(remain) > 0 and len(campaign) == 1: remain.extend([" "] * (4 - (len(remain) % 4))) print() for A, B, C, D in zip(remain[::4], remain[1::4], remain[2::4], remain[3::4]): if A == remain[0]: print("REMAIN: %s %s %s %s" % (A, B, C, D)) print() else: print(" %s %s %s %s" % (A, B, C, D)) print() if len(bad) and len(campaign) == 1: bad.extend([" "] * (4 - (len(bad) % 4))) print() for A, B, C, D in zip(bad[::4], bad[1::4], bad[2::4], bad[3::4]): if A == bad[0]: print("ERRORS: %s %s %s %s" % (A, B, C, D)) print() else: print(" %s %s %s %s" % (A, B, C, D)) print()
[ "def", "Status", "(", "season", "=", "range", "(", "18", ")", ",", "model", "=", "'nPLD'", ",", "purge", "=", "False", ",", "injection", "=", "False", ",", "cadence", "=", "'lc'", ",", "*", "*", "kwargs", ")", ":", "# Mission compatibility", "campaign", "=", "season", "# Injection?", "if", "injection", ":", "return", "InjectionStatus", "(", "campaign", "=", "campaign", ",", "model", "=", "model", ",", "purge", "=", "purge", ",", "*", "*", "kwargs", ")", "# Cadence", "if", "cadence", "==", "'sc'", ":", "model", "=", "'%s.sc'", "%", "model", "if", "not", "hasattr", "(", "campaign", ",", "'__len__'", ")", ":", "if", "type", "(", "campaign", ")", "is", "int", ":", "# Return the subcampaigns", "all_stars", "=", "[", "s", "for", "s", "in", "GetK2Campaign", "(", "campaign", ",", "split", "=", "True", ",", "epics_only", "=", "True", ",", "cadence", "=", "cadence", ")", "]", "campaign", "=", "[", "campaign", "+", "0.1", "*", "n", "for", "n", "in", "range", "(", "10", ")", "]", "else", ":", "all_stars", "=", "[", "[", "s", "for", "s", "in", "GetK2Campaign", "(", "campaign", ",", "epics_only", "=", "True", ",", "cadence", "=", "cadence", ")", "]", "]", "campaign", "=", "[", "campaign", "]", "else", ":", "all_stars", "=", "[", "[", "s", "for", "s", "in", "GetK2Campaign", "(", "c", ",", "epics_only", "=", "True", ",", "cadence", "=", "cadence", ")", "]", "for", "c", "in", "campaign", "]", "print", "(", "\"CAMP TOTAL DOWNLOADED PROCESSED FITS ERRORS\"", ")", "print", "(", "\"---- ----- ---------- --------- ---- ------\"", ")", "for", "c", ",", "stars", "in", "zip", "(", "campaign", ",", "all_stars", ")", ":", "if", "len", "(", "stars", ")", "==", "0", ":", "continue", "down", "=", "0", "proc", "=", "0", "err", "=", "0", "fits", "=", "0", "bad", "=", "[", "]", "remain", "=", "[", "]", "total", "=", "len", "(", "stars", ")", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "'c%02d'", "%", "c", ")", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "'c%02d'", "%", "c", ")", "for", "folder", "in", "[", "f", "for", "f", "in", "os", ".", "listdir", "(", "path", ")", "if", "f", ".", "endswith", "(", "'00000'", ")", "]", ":", "for", "subfolder", "in", "os", ".", "listdir", "(", "os", ".", "path", ".", "join", "(", "path", ",", "folder", ")", ")", ":", "ID", "=", "int", "(", "folder", "[", ":", "4", "]", "+", "subfolder", ")", "if", "ID", "in", "stars", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "'c%02d'", "%", "c", ",", "folder", ",", "subfolder", ",", "'data.npz'", ")", ")", ":", "down", "+=", "1", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "'c%02d'", "%", "c", ",", "folder", ",", "subfolder", ",", "FITSFile", "(", "ID", ",", "c", ",", "cadence", "=", "cadence", ")", ")", ")", ":", "fits", "+=", "1", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "'c%02d'", "%", "c", ",", "folder", ",", "subfolder", ",", "model", "+", "'.npz'", ")", ")", ":", "proc", "+=", "1", "elif", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "'c%02d'", "%", "c", ",", "folder", ",", "subfolder", ",", "model", "+", "'.err'", ")", ")", ":", "err", "+=", "1", "bad", ".", "append", "(", "folder", "[", ":", "4", "]", "+", "subfolder", ")", "if", "purge", ":", "os", ".", "remove", "(", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "'c%02d'", "%", "c", ",", "folder", ",", "subfolder", ",", "model", "+", "'.err'", ")", ")", "else", ":", "remain", ".", "append", "(", "folder", "[", ":", "4", "]", "+", "subfolder", ")", "if", "proc", "==", "total", ":", "cc", "=", "ct", "=", "cp", "=", "ce", "=", "GREEN", "cd", "=", "BLACK", "if", "down", "<", "total", "else", "GREEN", "else", ":", "cc", "=", "BLACK", "ct", "=", "BLACK", "cd", "=", "BLACK", "if", "down", "<", "total", "else", "BLUE", "cp", "=", "BLACK", "if", "proc", "<", "down", "or", "proc", "==", "0", "else", "BLUE", "ce", "=", "RED", "if", "err", ">", "0", "else", "BLACK", "cf", "=", "BLACK", "if", "fits", "<", "total", "else", "GREEN", "if", "type", "(", "c", ")", "is", "int", ":", "print", "(", "\"%s{:>4d} \\033[0m%s{:>8d}\\033[0m%s{:>16d}\\033[0m%s{:>13d}\\033[0m%s{:>10d}\\033[0m%s{:>10d}\\033[0m\"", ".", "format", "(", "c", ",", "total", ",", "down", ",", "proc", ",", "fits", ",", "err", ")", "%", "(", "cc", ",", "ct", ",", "cd", ",", "cp", ",", "cf", ",", "ce", ")", ")", "else", ":", "print", "(", "\"%s{:>4.1f} \\033[0m%s{:>8d}\\033[0m%s{:>16d}\\033[0m%s{:>13d}\\033[0m%s{:>10d}\\033[0m%s{:>10d}\\033[0m\"", ".", "format", "(", "c", ",", "total", ",", "down", ",", "proc", ",", "fits", ",", "err", ")", "%", "(", "cc", ",", "ct", ",", "cd", ",", "cp", ",", "cf", ",", "ce", ")", ")", "if", "len", "(", "remain", ")", "<=", "25", "and", "len", "(", "remain", ")", ">", "0", "and", "len", "(", "campaign", ")", "==", "1", ":", "remain", ".", "extend", "(", "[", "\" \"", "]", "*", "(", "4", "-", "(", "len", "(", "remain", ")", "%", "4", ")", ")", ")", "print", "(", ")", "for", "A", ",", "B", ",", "C", ",", "D", "in", "zip", "(", "remain", "[", ":", ":", "4", "]", ",", "remain", "[", "1", ":", ":", "4", "]", ",", "remain", "[", "2", ":", ":", "4", "]", ",", "remain", "[", "3", ":", ":", "4", "]", ")", ":", "if", "A", "==", "remain", "[", "0", "]", ":", "print", "(", "\"REMAIN: %s %s %s %s\"", "%", "(", "A", ",", "B", ",", "C", ",", "D", ")", ")", "print", "(", ")", "else", ":", "print", "(", "\" %s %s %s %s\"", "%", "(", "A", ",", "B", ",", "C", ",", "D", ")", ")", "print", "(", ")", "if", "len", "(", "bad", ")", "and", "len", "(", "campaign", ")", "==", "1", ":", "bad", ".", "extend", "(", "[", "\" \"", "]", "*", "(", "4", "-", "(", "len", "(", "bad", ")", "%", "4", ")", ")", ")", "print", "(", ")", "for", "A", ",", "B", ",", "C", ",", "D", "in", "zip", "(", "bad", "[", ":", ":", "4", "]", ",", "bad", "[", "1", ":", ":", "4", "]", ",", "bad", "[", "2", ":", ":", "4", "]", ",", "bad", "[", "3", ":", ":", "4", "]", ")", ":", "if", "A", "==", "bad", "[", "0", "]", ":", "print", "(", "\"ERRORS: %s %s %s %s\"", "%", "(", "A", ",", "B", ",", "C", ",", "D", ")", ")", "print", "(", ")", "else", ":", "print", "(", "\" %s %s %s %s\"", "%", "(", "A", ",", "B", ",", "C", ",", "D", ")", ")", "print", "(", ")" ]
Shows the progress of the de-trending runs for the specified campaign(s).
[ "Shows", "the", "progress", "of", "the", "de", "-", "trending", "runs", "for", "the", "specified", "campaign", "(", "s", ")", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/pbs.py#L353-L468
rodluger/everest
everest/missions/k2/pbs.py
InjectionStatus
def InjectionStatus(campaign=range(18), model='nPLD', purge=False, depths=[0.01, 0.001, 0.0001], **kwargs): ''' Shows the progress of the injection de-trending runs for the specified campaign(s). ''' if not hasattr(campaign, '__len__'): if type(campaign) is int: # Return the subcampaigns all_stars = [s for s in GetK2Campaign( campaign, split=True, epics_only=True)] campaign = [campaign + 0.1 * n for n in range(10)] else: all_stars = [[s for s in GetK2Campaign(campaign, epics_only=True)]] campaign = [campaign] else: all_stars = [[s for s in GetK2Campaign( c, epics_only=True)] for c in campaign] print("CAMP MASK DEPTH TOTAL DONE ERRORS") print("---- ---- ----- ----- ---- ------") for c, stars in zip(campaign, all_stars): if len(stars) == 0: continue done = [[0 for d in depths], [0 for d in depths]] err = [[0 for d in depths], [0 for d in depths]] total = len(stars) if os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c)): path = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c) for folder in os.listdir(path): for subfolder in os.listdir(os.path.join(path, folder)): ID = int(folder[:4] + subfolder) for m, mask in enumerate(['U', 'M']): for d, depth in enumerate(depths): if os.path.exists( os.path.join( EVEREST_DAT, 'k2', 'c%02d' % c, folder, subfolder, '%s_Inject_%s%g.npz' % (model, mask, depth))): done[m][d] += 1 elif os.path.exists( os.path.join( EVEREST_DAT, 'k2', 'c%02d' % c, folder, subfolder, '%s_Inject_%s%g.err' % (model, mask, depth))): err[m][d] += 1 for d, depth in enumerate(depths): for m, mask in enumerate(['F', 'T']): if done[m][d] == total: color = GREEN else: color = BLACK if err[m][d] > 0: errcolor = RED else: errcolor = '' if type(c) is int: print("%s{:>4d}{:>8s}{:>14g}{:>10d}{:>10d}%s{:>9d}\033[0m".format( c, mask, depth, total, done[m][d], err[m][d]) % (color, errcolor)) else: print("%s{:>4.1f}{:>8s}{:>14g}{:>10d}{:>10d}%s{:>9d}\033[0m".format( c, mask, depth, total, done[m][d], err[m][d]) % (color, errcolor))
python
def InjectionStatus(campaign=range(18), model='nPLD', purge=False, depths=[0.01, 0.001, 0.0001], **kwargs): ''' Shows the progress of the injection de-trending runs for the specified campaign(s). ''' if not hasattr(campaign, '__len__'): if type(campaign) is int: # Return the subcampaigns all_stars = [s for s in GetK2Campaign( campaign, split=True, epics_only=True)] campaign = [campaign + 0.1 * n for n in range(10)] else: all_stars = [[s for s in GetK2Campaign(campaign, epics_only=True)]] campaign = [campaign] else: all_stars = [[s for s in GetK2Campaign( c, epics_only=True)] for c in campaign] print("CAMP MASK DEPTH TOTAL DONE ERRORS") print("---- ---- ----- ----- ---- ------") for c, stars in zip(campaign, all_stars): if len(stars) == 0: continue done = [[0 for d in depths], [0 for d in depths]] err = [[0 for d in depths], [0 for d in depths]] total = len(stars) if os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c)): path = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c) for folder in os.listdir(path): for subfolder in os.listdir(os.path.join(path, folder)): ID = int(folder[:4] + subfolder) for m, mask in enumerate(['U', 'M']): for d, depth in enumerate(depths): if os.path.exists( os.path.join( EVEREST_DAT, 'k2', 'c%02d' % c, folder, subfolder, '%s_Inject_%s%g.npz' % (model, mask, depth))): done[m][d] += 1 elif os.path.exists( os.path.join( EVEREST_DAT, 'k2', 'c%02d' % c, folder, subfolder, '%s_Inject_%s%g.err' % (model, mask, depth))): err[m][d] += 1 for d, depth in enumerate(depths): for m, mask in enumerate(['F', 'T']): if done[m][d] == total: color = GREEN else: color = BLACK if err[m][d] > 0: errcolor = RED else: errcolor = '' if type(c) is int: print("%s{:>4d}{:>8s}{:>14g}{:>10d}{:>10d}%s{:>9d}\033[0m".format( c, mask, depth, total, done[m][d], err[m][d]) % (color, errcolor)) else: print("%s{:>4.1f}{:>8s}{:>14g}{:>10d}{:>10d}%s{:>9d}\033[0m".format( c, mask, depth, total, done[m][d], err[m][d]) % (color, errcolor))
[ "def", "InjectionStatus", "(", "campaign", "=", "range", "(", "18", ")", ",", "model", "=", "'nPLD'", ",", "purge", "=", "False", ",", "depths", "=", "[", "0.01", ",", "0.001", ",", "0.0001", "]", ",", "*", "*", "kwargs", ")", ":", "if", "not", "hasattr", "(", "campaign", ",", "'__len__'", ")", ":", "if", "type", "(", "campaign", ")", "is", "int", ":", "# Return the subcampaigns", "all_stars", "=", "[", "s", "for", "s", "in", "GetK2Campaign", "(", "campaign", ",", "split", "=", "True", ",", "epics_only", "=", "True", ")", "]", "campaign", "=", "[", "campaign", "+", "0.1", "*", "n", "for", "n", "in", "range", "(", "10", ")", "]", "else", ":", "all_stars", "=", "[", "[", "s", "for", "s", "in", "GetK2Campaign", "(", "campaign", ",", "epics_only", "=", "True", ")", "]", "]", "campaign", "=", "[", "campaign", "]", "else", ":", "all_stars", "=", "[", "[", "s", "for", "s", "in", "GetK2Campaign", "(", "c", ",", "epics_only", "=", "True", ")", "]", "for", "c", "in", "campaign", "]", "print", "(", "\"CAMP MASK DEPTH TOTAL DONE ERRORS\"", ")", "print", "(", "\"---- ---- ----- ----- ---- ------\"", ")", "for", "c", ",", "stars", "in", "zip", "(", "campaign", ",", "all_stars", ")", ":", "if", "len", "(", "stars", ")", "==", "0", ":", "continue", "done", "=", "[", "[", "0", "for", "d", "in", "depths", "]", ",", "[", "0", "for", "d", "in", "depths", "]", "]", "err", "=", "[", "[", "0", "for", "d", "in", "depths", "]", ",", "[", "0", "for", "d", "in", "depths", "]", "]", "total", "=", "len", "(", "stars", ")", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "'c%02d'", "%", "c", ")", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "'c%02d'", "%", "c", ")", "for", "folder", "in", "os", ".", "listdir", "(", "path", ")", ":", "for", "subfolder", "in", "os", ".", "listdir", "(", "os", ".", "path", ".", "join", "(", "path", ",", "folder", ")", ")", ":", "ID", "=", "int", "(", "folder", "[", ":", "4", "]", "+", "subfolder", ")", "for", "m", ",", "mask", "in", "enumerate", "(", "[", "'U'", ",", "'M'", "]", ")", ":", "for", "d", ",", "depth", "in", "enumerate", "(", "depths", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "'c%02d'", "%", "c", ",", "folder", ",", "subfolder", ",", "'%s_Inject_%s%g.npz'", "%", "(", "model", ",", "mask", ",", "depth", ")", ")", ")", ":", "done", "[", "m", "]", "[", "d", "]", "+=", "1", "elif", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "'c%02d'", "%", "c", ",", "folder", ",", "subfolder", ",", "'%s_Inject_%s%g.err'", "%", "(", "model", ",", "mask", ",", "depth", ")", ")", ")", ":", "err", "[", "m", "]", "[", "d", "]", "+=", "1", "for", "d", ",", "depth", "in", "enumerate", "(", "depths", ")", ":", "for", "m", ",", "mask", "in", "enumerate", "(", "[", "'F'", ",", "'T'", "]", ")", ":", "if", "done", "[", "m", "]", "[", "d", "]", "==", "total", ":", "color", "=", "GREEN", "else", ":", "color", "=", "BLACK", "if", "err", "[", "m", "]", "[", "d", "]", ">", "0", ":", "errcolor", "=", "RED", "else", ":", "errcolor", "=", "''", "if", "type", "(", "c", ")", "is", "int", ":", "print", "(", "\"%s{:>4d}{:>8s}{:>14g}{:>10d}{:>10d}%s{:>9d}\\033[0m\"", ".", "format", "(", "c", ",", "mask", ",", "depth", ",", "total", ",", "done", "[", "m", "]", "[", "d", "]", ",", "err", "[", "m", "]", "[", "d", "]", ")", "%", "(", "color", ",", "errcolor", ")", ")", "else", ":", "print", "(", "\"%s{:>4.1f}{:>8s}{:>14g}{:>10d}{:>10d}%s{:>9d}\\033[0m\"", ".", "format", "(", "c", ",", "mask", ",", "depth", ",", "total", ",", "done", "[", "m", "]", "[", "d", "]", ",", "err", "[", "m", "]", "[", "d", "]", ")", "%", "(", "color", ",", "errcolor", ")", ")" ]
Shows the progress of the injection de-trending runs for the specified campaign(s).
[ "Shows", "the", "progress", "of", "the", "injection", "de", "-", "trending", "runs", "for", "the", "specified", "campaign", "(", "s", ")", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/pbs.py#L471-L533
rodluger/everest
everest/missions/k2/pbs.py
EverestModel
def EverestModel(ID, model='nPLD', publish=False, csv=False, **kwargs): ''' A wrapper around an :py:obj:`everest` model for PBS runs. ''' if model != 'Inject': from ... import detrender # HACK: We need to explicitly mask short cadence planets if kwargs.get('cadence', 'lc') == 'sc': EPIC, t0, period, duration = \ np.loadtxt(os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'scmasks.tsv'), unpack=True) if ID in EPIC and kwargs.get('planets', None) is None: ii = np.where(EPIC == ID)[0] planets = [] for i in ii: planets.append([t0[i], period[i], 1.25 * duration[i]]) kwargs.update({'planets': planets}) # Run the model m = getattr(detrender, model)(ID, **kwargs) # Publish? if publish: if csv: m.publish_csv() else: m.publish() else: from ...inject import Inject Inject(ID, **kwargs) return True
python
def EverestModel(ID, model='nPLD', publish=False, csv=False, **kwargs): ''' A wrapper around an :py:obj:`everest` model for PBS runs. ''' if model != 'Inject': from ... import detrender # HACK: We need to explicitly mask short cadence planets if kwargs.get('cadence', 'lc') == 'sc': EPIC, t0, period, duration = \ np.loadtxt(os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'scmasks.tsv'), unpack=True) if ID in EPIC and kwargs.get('planets', None) is None: ii = np.where(EPIC == ID)[0] planets = [] for i in ii: planets.append([t0[i], period[i], 1.25 * duration[i]]) kwargs.update({'planets': planets}) # Run the model m = getattr(detrender, model)(ID, **kwargs) # Publish? if publish: if csv: m.publish_csv() else: m.publish() else: from ...inject import Inject Inject(ID, **kwargs) return True
[ "def", "EverestModel", "(", "ID", ",", "model", "=", "'nPLD'", ",", "publish", "=", "False", ",", "csv", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "model", "!=", "'Inject'", ":", "from", ".", ".", ".", "import", "detrender", "# HACK: We need to explicitly mask short cadence planets", "if", "kwargs", ".", "get", "(", "'cadence'", ",", "'lc'", ")", "==", "'sc'", ":", "EPIC", ",", "t0", ",", "period", ",", "duration", "=", "np", ".", "loadtxt", "(", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'scmasks.tsv'", ")", ",", "unpack", "=", "True", ")", "if", "ID", "in", "EPIC", "and", "kwargs", ".", "get", "(", "'planets'", ",", "None", ")", "is", "None", ":", "ii", "=", "np", ".", "where", "(", "EPIC", "==", "ID", ")", "[", "0", "]", "planets", "=", "[", "]", "for", "i", "in", "ii", ":", "planets", ".", "append", "(", "[", "t0", "[", "i", "]", ",", "period", "[", "i", "]", ",", "1.25", "*", "duration", "[", "i", "]", "]", ")", "kwargs", ".", "update", "(", "{", "'planets'", ":", "planets", "}", ")", "# Run the model", "m", "=", "getattr", "(", "detrender", ",", "model", ")", "(", "ID", ",", "*", "*", "kwargs", ")", "# Publish?", "if", "publish", ":", "if", "csv", ":", "m", ".", "publish_csv", "(", ")", "else", ":", "m", ".", "publish", "(", ")", "else", ":", "from", ".", ".", ".", "inject", "import", "Inject", "Inject", "(", "ID", ",", "*", "*", "kwargs", ")", "return", "True" ]
A wrapper around an :py:obj:`everest` model for PBS runs.
[ "A", "wrapper", "around", "an", ":", "py", ":", "obj", ":", "everest", "model", "for", "PBS", "runs", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/pbs.py#L536-L570
rodluger/everest
everest/fits.py
PrimaryHDU
def PrimaryHDU(model): ''' Construct the primary HDU file containing basic header info. ''' # Get mission cards cards = model._mission.HDUCards(model.meta, hdu=0) if 'KEPMAG' not in [c[0] for c in cards]: cards.append(('KEPMAG', model.mag, 'Kepler magnitude')) # Add EVEREST info cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* EVEREST INFO *')) cards.append(('COMMENT', '************************')) cards.append(('MISSION', model.mission, 'Mission name')) cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version')) cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion')) cards.append(('DATE', strftime('%Y-%m-%d'), 'EVEREST file creation date (YYYY-MM-DD)')) # Create the HDU header = pyfits.Header(cards=cards) hdu = pyfits.PrimaryHDU(header=header) return hdu
python
def PrimaryHDU(model): ''' Construct the primary HDU file containing basic header info. ''' # Get mission cards cards = model._mission.HDUCards(model.meta, hdu=0) if 'KEPMAG' not in [c[0] for c in cards]: cards.append(('KEPMAG', model.mag, 'Kepler magnitude')) # Add EVEREST info cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* EVEREST INFO *')) cards.append(('COMMENT', '************************')) cards.append(('MISSION', model.mission, 'Mission name')) cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version')) cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion')) cards.append(('DATE', strftime('%Y-%m-%d'), 'EVEREST file creation date (YYYY-MM-DD)')) # Create the HDU header = pyfits.Header(cards=cards) hdu = pyfits.PrimaryHDU(header=header) return hdu
[ "def", "PrimaryHDU", "(", "model", ")", ":", "# Get mission cards", "cards", "=", "model", ".", "_mission", ".", "HDUCards", "(", "model", ".", "meta", ",", "hdu", "=", "0", ")", "if", "'KEPMAG'", "not", "in", "[", "c", "[", "0", "]", "for", "c", "in", "cards", "]", ":", "cards", ".", "append", "(", "(", "'KEPMAG'", ",", "model", ".", "mag", ",", "'Kepler magnitude'", ")", ")", "# Add EVEREST info", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'************************'", ")", ")", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'* EVEREST INFO *'", ")", ")", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'************************'", ")", ")", "cards", ".", "append", "(", "(", "'MISSION'", ",", "model", ".", "mission", ",", "'Mission name'", ")", ")", "cards", ".", "append", "(", "(", "'VERSION'", ",", "EVEREST_MAJOR_MINOR", ",", "'EVEREST pipeline version'", ")", ")", "cards", ".", "append", "(", "(", "'SUBVER'", ",", "EVEREST_VERSION", ",", "'EVEREST pipeline subversion'", ")", ")", "cards", ".", "append", "(", "(", "'DATE'", ",", "strftime", "(", "'%Y-%m-%d'", ")", ",", "'EVEREST file creation date (YYYY-MM-DD)'", ")", ")", "# Create the HDU", "header", "=", "pyfits", ".", "Header", "(", "cards", "=", "cards", ")", "hdu", "=", "pyfits", ".", "PrimaryHDU", "(", "header", "=", "header", ")", "return", "hdu" ]
Construct the primary HDU file containing basic header info.
[ "Construct", "the", "primary", "HDU", "file", "containing", "basic", "header", "info", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/fits.py#L35-L60
rodluger/everest
everest/fits.py
LightcurveHDU
def LightcurveHDU(model): ''' Construct the data HDU file containing the arrays and the observing info. ''' # Get mission cards cards = model._mission.HDUCards(model.meta, hdu=1) # Add EVEREST info cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* EVEREST INFO *')) cards.append(('COMMENT', '************************')) cards.append(('MISSION', model.mission, 'Mission name')) cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version')) cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion')) cards.append(('DATE', strftime('%Y-%m-%d'), 'EVEREST file creation date (YYYY-MM-DD)')) cards.append(('MODEL', model.name, 'Name of EVEREST model used')) cards.append(('APNAME', model.aperture_name, 'Name of aperture used')) cards.append(('BPAD', model.bpad, 'Chunk overlap in cadences')) for c in range(len(model.breakpoints)): cards.append( ('BRKPT%02d' % (c + 1), model.breakpoints[c], 'Light curve breakpoint')) cards.append(('CBVNUM', model.cbv_num, 'Number of CBV signals to recover')) cards.append(('CBVNITER', model.cbv_niter, 'Number of CBV SysRem iterations')) cards.append(('CBVWIN', model.cbv_win, 'Window size for smoothing CBVs')) cards.append(('CBVORD', model.cbv_order, 'Order when smoothing CBVs')) cards.append(('CDIVS', model.cdivs, 'Cross-validation subdivisions')) cards.append(('CDPP', model.cdpp, 'Average de-trended CDPP')) cards.append(('CDPPR', model.cdppr, 'Raw CDPP')) cards.append(('CDPPV', model.cdppv, 'Average validation CDPP')) cards.append(('CDPPG', model.cdppg, 'Average GP-de-trended CDPP')) for i in range(99): try: cards.append(('CDPP%02d' % (i + 1), model.cdpp_arr[i] if not np.isnan( model.cdpp_arr[i]) else 0, 'Chunk de-trended CDPP')) cards.append(('CDPPR%02d' % ( i + 1), model.cdppr_arr[i] if not np.isnan( model.cdppr_arr[i]) else 0, 'Chunk raw CDPP')) cards.append(('CDPPV%02d' % (i + 1), model.cdppv_arr[i] if not np.isnan( model.cdppv_arr[i]) else 0, 'Chunk validation CDPP')) except: break cards.append( ('CVMIN', model.cv_min, 'Cross-validation objective function')) cards.append( ('GITER', model.giter, 'Number of GP optimiziation iterations')) cards.append( ('GMAXF', model.giter, 'Max number of GP function evaluations')) cards.append(('GPFACTOR', model.gp_factor, 'GP amplitude initialization factor')) cards.append(('KERNEL', model.kernel, 'GP kernel name')) if model.kernel == 'Basic': cards.append( ('GPWHITE', model.kernel_params[0], 'GP white noise amplitude (e-/s)')) cards.append( ('GPRED', model.kernel_params[1], 'GP red noise amplitude (e-/s)')) cards.append( ('GPTAU', model.kernel_params[2], 'GP red noise timescale (days)')) elif model.kernel == 'QuasiPeriodic': cards.append( ('GPWHITE', model.kernel_params[0], 'GP white noise amplitude (e-/s)')) cards.append( ('GPRED', model.kernel_params[1], 'GP red noise amplitude (e-/s)')) cards.append(('GPGAMMA', model.kernel_params[2], 'GP scale factor')) cards.append(('GPPER', model.kernel_params[3], 'GP period (days)')) for c in range(len(model.breakpoints)): for o in range(model.pld_order): cards.append(('LAMB%02d%02d' % (c + 1, o + 1), model.lam[c][o], 'Cross-validation parameter')) if model.name == 'iPLD': cards.append(('RECL%02d%02d' % (c + 1, o + 1), model.reclam[c][o], 'Cross-validation parameter')) cards.append(('LEPS', model.leps, 'Cross-validation tolerance')) cards.append(('MAXPIX', model.max_pixels, 'Maximum size of TPF aperture')) for i, source in enumerate(model.nearby[:99]): cards.append(('NRBY%02dID' % (i + 1), source['ID'], 'Nearby source ID')) cards.append( ('NRBY%02dX' % (i + 1), source['x'], 'Nearby source X position')) cards.append( ('NRBY%02dY' % (i + 1), source['y'], 'Nearby source Y position')) cards.append( ('NRBY%02dM' % (i + 1), source['mag'], 'Nearby source magnitude')) cards.append(('NRBY%02dX0' % (i + 1), source['x0'], 'Nearby source reference X')) cards.append(('NRBY%02dY0' % (i + 1), source['y0'], 'Nearby source reference Y')) for i, n in enumerate(model.neighbors): cards.append( ('NEIGH%02d' % i, model.neighbors[i], 'Neighboring star used to de-trend')) cards.append(('OITER', model.oiter, 'Number of outlier search iterations')) cards.append(('OPTGP', model.optimize_gp, 'GP optimization performed?')) cards.append( ('OSIGMA', model.osigma, 'Outlier tolerance (standard deviations)')) for i, planet in enumerate(model.planets): cards.append( ('P%02dT0' % (i + 1), planet[0], 'Planet transit time (days)')) cards.append( ('P%02dPER' % (i + 1), planet[1], 'Planet transit period (days)')) cards.append( ('P%02dDUR' % (i + 1), planet[2], 'Planet transit duration (days)')) cards.append(('PLDORDER', model.pld_order, 'PLD de-trending order')) cards.append(('SATUR', model.saturated, 'Is target saturated?')) cards.append(('SATTOL', model.saturation_tolerance, 'Fractional saturation tolerance')) # Add the EVEREST quality flags to the QUALITY array quality = np.array(model.quality) quality[np.array(model.badmask, dtype=int)] += 2 ** (QUALITY_BAD - 1) quality[np.array(model.nanmask, dtype=int)] += 2 ** (QUALITY_NAN - 1) quality[np.array(model.outmask, dtype=int)] += 2 ** (QUALITY_OUT - 1) quality[np.array(model.recmask, dtype=int)] += 2 ** (QUALITY_REC - 1) quality[np.array(model.transitmask, dtype=int)] += 2 ** (QUALITY_TRN - 1) # When de-trending, we interpolated to fill in NaN fluxes. Here # we insert the NaNs back in, since there's no actual physical # information at those cadences. flux = np.array(model.flux) flux[model.nanmask] = np.nan # Create the arrays list arrays = [pyfits.Column(name='CADN', format='D', array=model.cadn), pyfits.Column(name='FLUX', format='D', array=flux, unit='e-/s'), pyfits.Column(name='FRAW', format='D', array=model.fraw, unit='e-/s'), pyfits.Column(name='FRAW_ERR', format='D', array=model.fraw_err, unit='e-/s'), pyfits.Column(name='QUALITY', format='J', array=quality), pyfits.Column(name='TIME', format='D', array=model.time, unit='BJD - 2454833')] # Add the CBVs if model.fcor is not None: arrays += [pyfits.Column(name='FCOR', format='D', array=model.fcor, unit='e-/s')] for n in range(model.XCBV.shape[1]): arrays += [pyfits.Column(name='CBV%02d' % (n + 1), format='D', array=model.XCBV[:, n])] # Did we subtract a background term? if hasattr(model.bkg, '__len__'): arrays.append(pyfits.Column(name='BKG', format='D', array=model.bkg, unit='e-/s')) # Create the HDU header = pyfits.Header(cards=cards) cols = pyfits.ColDefs(arrays) hdu = pyfits.BinTableHDU.from_columns(cols, header=header, name='ARRAYS') return hdu
python
def LightcurveHDU(model): ''' Construct the data HDU file containing the arrays and the observing info. ''' # Get mission cards cards = model._mission.HDUCards(model.meta, hdu=1) # Add EVEREST info cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* EVEREST INFO *')) cards.append(('COMMENT', '************************')) cards.append(('MISSION', model.mission, 'Mission name')) cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version')) cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion')) cards.append(('DATE', strftime('%Y-%m-%d'), 'EVEREST file creation date (YYYY-MM-DD)')) cards.append(('MODEL', model.name, 'Name of EVEREST model used')) cards.append(('APNAME', model.aperture_name, 'Name of aperture used')) cards.append(('BPAD', model.bpad, 'Chunk overlap in cadences')) for c in range(len(model.breakpoints)): cards.append( ('BRKPT%02d' % (c + 1), model.breakpoints[c], 'Light curve breakpoint')) cards.append(('CBVNUM', model.cbv_num, 'Number of CBV signals to recover')) cards.append(('CBVNITER', model.cbv_niter, 'Number of CBV SysRem iterations')) cards.append(('CBVWIN', model.cbv_win, 'Window size for smoothing CBVs')) cards.append(('CBVORD', model.cbv_order, 'Order when smoothing CBVs')) cards.append(('CDIVS', model.cdivs, 'Cross-validation subdivisions')) cards.append(('CDPP', model.cdpp, 'Average de-trended CDPP')) cards.append(('CDPPR', model.cdppr, 'Raw CDPP')) cards.append(('CDPPV', model.cdppv, 'Average validation CDPP')) cards.append(('CDPPG', model.cdppg, 'Average GP-de-trended CDPP')) for i in range(99): try: cards.append(('CDPP%02d' % (i + 1), model.cdpp_arr[i] if not np.isnan( model.cdpp_arr[i]) else 0, 'Chunk de-trended CDPP')) cards.append(('CDPPR%02d' % ( i + 1), model.cdppr_arr[i] if not np.isnan( model.cdppr_arr[i]) else 0, 'Chunk raw CDPP')) cards.append(('CDPPV%02d' % (i + 1), model.cdppv_arr[i] if not np.isnan( model.cdppv_arr[i]) else 0, 'Chunk validation CDPP')) except: break cards.append( ('CVMIN', model.cv_min, 'Cross-validation objective function')) cards.append( ('GITER', model.giter, 'Number of GP optimiziation iterations')) cards.append( ('GMAXF', model.giter, 'Max number of GP function evaluations')) cards.append(('GPFACTOR', model.gp_factor, 'GP amplitude initialization factor')) cards.append(('KERNEL', model.kernel, 'GP kernel name')) if model.kernel == 'Basic': cards.append( ('GPWHITE', model.kernel_params[0], 'GP white noise amplitude (e-/s)')) cards.append( ('GPRED', model.kernel_params[1], 'GP red noise amplitude (e-/s)')) cards.append( ('GPTAU', model.kernel_params[2], 'GP red noise timescale (days)')) elif model.kernel == 'QuasiPeriodic': cards.append( ('GPWHITE', model.kernel_params[0], 'GP white noise amplitude (e-/s)')) cards.append( ('GPRED', model.kernel_params[1], 'GP red noise amplitude (e-/s)')) cards.append(('GPGAMMA', model.kernel_params[2], 'GP scale factor')) cards.append(('GPPER', model.kernel_params[3], 'GP period (days)')) for c in range(len(model.breakpoints)): for o in range(model.pld_order): cards.append(('LAMB%02d%02d' % (c + 1, o + 1), model.lam[c][o], 'Cross-validation parameter')) if model.name == 'iPLD': cards.append(('RECL%02d%02d' % (c + 1, o + 1), model.reclam[c][o], 'Cross-validation parameter')) cards.append(('LEPS', model.leps, 'Cross-validation tolerance')) cards.append(('MAXPIX', model.max_pixels, 'Maximum size of TPF aperture')) for i, source in enumerate(model.nearby[:99]): cards.append(('NRBY%02dID' % (i + 1), source['ID'], 'Nearby source ID')) cards.append( ('NRBY%02dX' % (i + 1), source['x'], 'Nearby source X position')) cards.append( ('NRBY%02dY' % (i + 1), source['y'], 'Nearby source Y position')) cards.append( ('NRBY%02dM' % (i + 1), source['mag'], 'Nearby source magnitude')) cards.append(('NRBY%02dX0' % (i + 1), source['x0'], 'Nearby source reference X')) cards.append(('NRBY%02dY0' % (i + 1), source['y0'], 'Nearby source reference Y')) for i, n in enumerate(model.neighbors): cards.append( ('NEIGH%02d' % i, model.neighbors[i], 'Neighboring star used to de-trend')) cards.append(('OITER', model.oiter, 'Number of outlier search iterations')) cards.append(('OPTGP', model.optimize_gp, 'GP optimization performed?')) cards.append( ('OSIGMA', model.osigma, 'Outlier tolerance (standard deviations)')) for i, planet in enumerate(model.planets): cards.append( ('P%02dT0' % (i + 1), planet[0], 'Planet transit time (days)')) cards.append( ('P%02dPER' % (i + 1), planet[1], 'Planet transit period (days)')) cards.append( ('P%02dDUR' % (i + 1), planet[2], 'Planet transit duration (days)')) cards.append(('PLDORDER', model.pld_order, 'PLD de-trending order')) cards.append(('SATUR', model.saturated, 'Is target saturated?')) cards.append(('SATTOL', model.saturation_tolerance, 'Fractional saturation tolerance')) # Add the EVEREST quality flags to the QUALITY array quality = np.array(model.quality) quality[np.array(model.badmask, dtype=int)] += 2 ** (QUALITY_BAD - 1) quality[np.array(model.nanmask, dtype=int)] += 2 ** (QUALITY_NAN - 1) quality[np.array(model.outmask, dtype=int)] += 2 ** (QUALITY_OUT - 1) quality[np.array(model.recmask, dtype=int)] += 2 ** (QUALITY_REC - 1) quality[np.array(model.transitmask, dtype=int)] += 2 ** (QUALITY_TRN - 1) # When de-trending, we interpolated to fill in NaN fluxes. Here # we insert the NaNs back in, since there's no actual physical # information at those cadences. flux = np.array(model.flux) flux[model.nanmask] = np.nan # Create the arrays list arrays = [pyfits.Column(name='CADN', format='D', array=model.cadn), pyfits.Column(name='FLUX', format='D', array=flux, unit='e-/s'), pyfits.Column(name='FRAW', format='D', array=model.fraw, unit='e-/s'), pyfits.Column(name='FRAW_ERR', format='D', array=model.fraw_err, unit='e-/s'), pyfits.Column(name='QUALITY', format='J', array=quality), pyfits.Column(name='TIME', format='D', array=model.time, unit='BJD - 2454833')] # Add the CBVs if model.fcor is not None: arrays += [pyfits.Column(name='FCOR', format='D', array=model.fcor, unit='e-/s')] for n in range(model.XCBV.shape[1]): arrays += [pyfits.Column(name='CBV%02d' % (n + 1), format='D', array=model.XCBV[:, n])] # Did we subtract a background term? if hasattr(model.bkg, '__len__'): arrays.append(pyfits.Column(name='BKG', format='D', array=model.bkg, unit='e-/s')) # Create the HDU header = pyfits.Header(cards=cards) cols = pyfits.ColDefs(arrays) hdu = pyfits.BinTableHDU.from_columns(cols, header=header, name='ARRAYS') return hdu
[ "def", "LightcurveHDU", "(", "model", ")", ":", "# Get mission cards", "cards", "=", "model", ".", "_mission", ".", "HDUCards", "(", "model", ".", "meta", ",", "hdu", "=", "1", ")", "# Add EVEREST info", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'************************'", ")", ")", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'* EVEREST INFO *'", ")", ")", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'************************'", ")", ")", "cards", ".", "append", "(", "(", "'MISSION'", ",", "model", ".", "mission", ",", "'Mission name'", ")", ")", "cards", ".", "append", "(", "(", "'VERSION'", ",", "EVEREST_MAJOR_MINOR", ",", "'EVEREST pipeline version'", ")", ")", "cards", ".", "append", "(", "(", "'SUBVER'", ",", "EVEREST_VERSION", ",", "'EVEREST pipeline subversion'", ")", ")", "cards", ".", "append", "(", "(", "'DATE'", ",", "strftime", "(", "'%Y-%m-%d'", ")", ",", "'EVEREST file creation date (YYYY-MM-DD)'", ")", ")", "cards", ".", "append", "(", "(", "'MODEL'", ",", "model", ".", "name", ",", "'Name of EVEREST model used'", ")", ")", "cards", ".", "append", "(", "(", "'APNAME'", ",", "model", ".", "aperture_name", ",", "'Name of aperture used'", ")", ")", "cards", ".", "append", "(", "(", "'BPAD'", ",", "model", ".", "bpad", ",", "'Chunk overlap in cadences'", ")", ")", "for", "c", "in", "range", "(", "len", "(", "model", ".", "breakpoints", ")", ")", ":", "cards", ".", "append", "(", "(", "'BRKPT%02d'", "%", "(", "c", "+", "1", ")", ",", "model", ".", "breakpoints", "[", "c", "]", ",", "'Light curve breakpoint'", ")", ")", "cards", ".", "append", "(", "(", "'CBVNUM'", ",", "model", ".", "cbv_num", ",", "'Number of CBV signals to recover'", ")", ")", "cards", ".", "append", "(", "(", "'CBVNITER'", ",", "model", ".", "cbv_niter", ",", "'Number of CBV SysRem iterations'", ")", ")", "cards", ".", "append", "(", "(", "'CBVWIN'", ",", "model", ".", "cbv_win", ",", "'Window size for smoothing CBVs'", ")", ")", "cards", ".", "append", "(", "(", "'CBVORD'", ",", "model", ".", "cbv_order", ",", "'Order when smoothing CBVs'", ")", ")", "cards", ".", "append", "(", "(", "'CDIVS'", ",", "model", ".", "cdivs", ",", "'Cross-validation subdivisions'", ")", ")", "cards", ".", "append", "(", "(", "'CDPP'", ",", "model", ".", "cdpp", ",", "'Average de-trended CDPP'", ")", ")", "cards", ".", "append", "(", "(", "'CDPPR'", ",", "model", ".", "cdppr", ",", "'Raw CDPP'", ")", ")", "cards", ".", "append", "(", "(", "'CDPPV'", ",", "model", ".", "cdppv", ",", "'Average validation CDPP'", ")", ")", "cards", ".", "append", "(", "(", "'CDPPG'", ",", "model", ".", "cdppg", ",", "'Average GP-de-trended CDPP'", ")", ")", "for", "i", "in", "range", "(", "99", ")", ":", "try", ":", "cards", ".", "append", "(", "(", "'CDPP%02d'", "%", "(", "i", "+", "1", ")", ",", "model", ".", "cdpp_arr", "[", "i", "]", "if", "not", "np", ".", "isnan", "(", "model", ".", "cdpp_arr", "[", "i", "]", ")", "else", "0", ",", "'Chunk de-trended CDPP'", ")", ")", "cards", ".", "append", "(", "(", "'CDPPR%02d'", "%", "(", "i", "+", "1", ")", ",", "model", ".", "cdppr_arr", "[", "i", "]", "if", "not", "np", ".", "isnan", "(", "model", ".", "cdppr_arr", "[", "i", "]", ")", "else", "0", ",", "'Chunk raw CDPP'", ")", ")", "cards", ".", "append", "(", "(", "'CDPPV%02d'", "%", "(", "i", "+", "1", ")", ",", "model", ".", "cdppv_arr", "[", "i", "]", "if", "not", "np", ".", "isnan", "(", "model", ".", "cdppv_arr", "[", "i", "]", ")", "else", "0", ",", "'Chunk validation CDPP'", ")", ")", "except", ":", "break", "cards", ".", "append", "(", "(", "'CVMIN'", ",", "model", ".", "cv_min", ",", "'Cross-validation objective function'", ")", ")", "cards", ".", "append", "(", "(", "'GITER'", ",", "model", ".", "giter", ",", "'Number of GP optimiziation iterations'", ")", ")", "cards", ".", "append", "(", "(", "'GMAXF'", ",", "model", ".", "giter", ",", "'Max number of GP function evaluations'", ")", ")", "cards", ".", "append", "(", "(", "'GPFACTOR'", ",", "model", ".", "gp_factor", ",", "'GP amplitude initialization factor'", ")", ")", "cards", ".", "append", "(", "(", "'KERNEL'", ",", "model", ".", "kernel", ",", "'GP kernel name'", ")", ")", "if", "model", ".", "kernel", "==", "'Basic'", ":", "cards", ".", "append", "(", "(", "'GPWHITE'", ",", "model", ".", "kernel_params", "[", "0", "]", ",", "'GP white noise amplitude (e-/s)'", ")", ")", "cards", ".", "append", "(", "(", "'GPRED'", ",", "model", ".", "kernel_params", "[", "1", "]", ",", "'GP red noise amplitude (e-/s)'", ")", ")", "cards", ".", "append", "(", "(", "'GPTAU'", ",", "model", ".", "kernel_params", "[", "2", "]", ",", "'GP red noise timescale (days)'", ")", ")", "elif", "model", ".", "kernel", "==", "'QuasiPeriodic'", ":", "cards", ".", "append", "(", "(", "'GPWHITE'", ",", "model", ".", "kernel_params", "[", "0", "]", ",", "'GP white noise amplitude (e-/s)'", ")", ")", "cards", ".", "append", "(", "(", "'GPRED'", ",", "model", ".", "kernel_params", "[", "1", "]", ",", "'GP red noise amplitude (e-/s)'", ")", ")", "cards", ".", "append", "(", "(", "'GPGAMMA'", ",", "model", ".", "kernel_params", "[", "2", "]", ",", "'GP scale factor'", ")", ")", "cards", ".", "append", "(", "(", "'GPPER'", ",", "model", ".", "kernel_params", "[", "3", "]", ",", "'GP period (days)'", ")", ")", "for", "c", "in", "range", "(", "len", "(", "model", ".", "breakpoints", ")", ")", ":", "for", "o", "in", "range", "(", "model", ".", "pld_order", ")", ":", "cards", ".", "append", "(", "(", "'LAMB%02d%02d'", "%", "(", "c", "+", "1", ",", "o", "+", "1", ")", ",", "model", ".", "lam", "[", "c", "]", "[", "o", "]", ",", "'Cross-validation parameter'", ")", ")", "if", "model", ".", "name", "==", "'iPLD'", ":", "cards", ".", "append", "(", "(", "'RECL%02d%02d'", "%", "(", "c", "+", "1", ",", "o", "+", "1", ")", ",", "model", ".", "reclam", "[", "c", "]", "[", "o", "]", ",", "'Cross-validation parameter'", ")", ")", "cards", ".", "append", "(", "(", "'LEPS'", ",", "model", ".", "leps", ",", "'Cross-validation tolerance'", ")", ")", "cards", ".", "append", "(", "(", "'MAXPIX'", ",", "model", ".", "max_pixels", ",", "'Maximum size of TPF aperture'", ")", ")", "for", "i", ",", "source", "in", "enumerate", "(", "model", ".", "nearby", "[", ":", "99", "]", ")", ":", "cards", ".", "append", "(", "(", "'NRBY%02dID'", "%", "(", "i", "+", "1", ")", ",", "source", "[", "'ID'", "]", ",", "'Nearby source ID'", ")", ")", "cards", ".", "append", "(", "(", "'NRBY%02dX'", "%", "(", "i", "+", "1", ")", ",", "source", "[", "'x'", "]", ",", "'Nearby source X position'", ")", ")", "cards", ".", "append", "(", "(", "'NRBY%02dY'", "%", "(", "i", "+", "1", ")", ",", "source", "[", "'y'", "]", ",", "'Nearby source Y position'", ")", ")", "cards", ".", "append", "(", "(", "'NRBY%02dM'", "%", "(", "i", "+", "1", ")", ",", "source", "[", "'mag'", "]", ",", "'Nearby source magnitude'", ")", ")", "cards", ".", "append", "(", "(", "'NRBY%02dX0'", "%", "(", "i", "+", "1", ")", ",", "source", "[", "'x0'", "]", ",", "'Nearby source reference X'", ")", ")", "cards", ".", "append", "(", "(", "'NRBY%02dY0'", "%", "(", "i", "+", "1", ")", ",", "source", "[", "'y0'", "]", ",", "'Nearby source reference Y'", ")", ")", "for", "i", ",", "n", "in", "enumerate", "(", "model", ".", "neighbors", ")", ":", "cards", ".", "append", "(", "(", "'NEIGH%02d'", "%", "i", ",", "model", ".", "neighbors", "[", "i", "]", ",", "'Neighboring star used to de-trend'", ")", ")", "cards", ".", "append", "(", "(", "'OITER'", ",", "model", ".", "oiter", ",", "'Number of outlier search iterations'", ")", ")", "cards", ".", "append", "(", "(", "'OPTGP'", ",", "model", ".", "optimize_gp", ",", "'GP optimization performed?'", ")", ")", "cards", ".", "append", "(", "(", "'OSIGMA'", ",", "model", ".", "osigma", ",", "'Outlier tolerance (standard deviations)'", ")", ")", "for", "i", ",", "planet", "in", "enumerate", "(", "model", ".", "planets", ")", ":", "cards", ".", "append", "(", "(", "'P%02dT0'", "%", "(", "i", "+", "1", ")", ",", "planet", "[", "0", "]", ",", "'Planet transit time (days)'", ")", ")", "cards", ".", "append", "(", "(", "'P%02dPER'", "%", "(", "i", "+", "1", ")", ",", "planet", "[", "1", "]", ",", "'Planet transit period (days)'", ")", ")", "cards", ".", "append", "(", "(", "'P%02dDUR'", "%", "(", "i", "+", "1", ")", ",", "planet", "[", "2", "]", ",", "'Planet transit duration (days)'", ")", ")", "cards", ".", "append", "(", "(", "'PLDORDER'", ",", "model", ".", "pld_order", ",", "'PLD de-trending order'", ")", ")", "cards", ".", "append", "(", "(", "'SATUR'", ",", "model", ".", "saturated", ",", "'Is target saturated?'", ")", ")", "cards", ".", "append", "(", "(", "'SATTOL'", ",", "model", ".", "saturation_tolerance", ",", "'Fractional saturation tolerance'", ")", ")", "# Add the EVEREST quality flags to the QUALITY array", "quality", "=", "np", ".", "array", "(", "model", ".", "quality", ")", "quality", "[", "np", ".", "array", "(", "model", ".", "badmask", ",", "dtype", "=", "int", ")", "]", "+=", "2", "**", "(", "QUALITY_BAD", "-", "1", ")", "quality", "[", "np", ".", "array", "(", "model", ".", "nanmask", ",", "dtype", "=", "int", ")", "]", "+=", "2", "**", "(", "QUALITY_NAN", "-", "1", ")", "quality", "[", "np", ".", "array", "(", "model", ".", "outmask", ",", "dtype", "=", "int", ")", "]", "+=", "2", "**", "(", "QUALITY_OUT", "-", "1", ")", "quality", "[", "np", ".", "array", "(", "model", ".", "recmask", ",", "dtype", "=", "int", ")", "]", "+=", "2", "**", "(", "QUALITY_REC", "-", "1", ")", "quality", "[", "np", ".", "array", "(", "model", ".", "transitmask", ",", "dtype", "=", "int", ")", "]", "+=", "2", "**", "(", "QUALITY_TRN", "-", "1", ")", "# When de-trending, we interpolated to fill in NaN fluxes. Here", "# we insert the NaNs back in, since there's no actual physical", "# information at those cadences.", "flux", "=", "np", ".", "array", "(", "model", ".", "flux", ")", "flux", "[", "model", ".", "nanmask", "]", "=", "np", ".", "nan", "# Create the arrays list", "arrays", "=", "[", "pyfits", ".", "Column", "(", "name", "=", "'CADN'", ",", "format", "=", "'D'", ",", "array", "=", "model", ".", "cadn", ")", ",", "pyfits", ".", "Column", "(", "name", "=", "'FLUX'", ",", "format", "=", "'D'", ",", "array", "=", "flux", ",", "unit", "=", "'e-/s'", ")", ",", "pyfits", ".", "Column", "(", "name", "=", "'FRAW'", ",", "format", "=", "'D'", ",", "array", "=", "model", ".", "fraw", ",", "unit", "=", "'e-/s'", ")", ",", "pyfits", ".", "Column", "(", "name", "=", "'FRAW_ERR'", ",", "format", "=", "'D'", ",", "array", "=", "model", ".", "fraw_err", ",", "unit", "=", "'e-/s'", ")", ",", "pyfits", ".", "Column", "(", "name", "=", "'QUALITY'", ",", "format", "=", "'J'", ",", "array", "=", "quality", ")", ",", "pyfits", ".", "Column", "(", "name", "=", "'TIME'", ",", "format", "=", "'D'", ",", "array", "=", "model", ".", "time", ",", "unit", "=", "'BJD - 2454833'", ")", "]", "# Add the CBVs", "if", "model", ".", "fcor", "is", "not", "None", ":", "arrays", "+=", "[", "pyfits", ".", "Column", "(", "name", "=", "'FCOR'", ",", "format", "=", "'D'", ",", "array", "=", "model", ".", "fcor", ",", "unit", "=", "'e-/s'", ")", "]", "for", "n", "in", "range", "(", "model", ".", "XCBV", ".", "shape", "[", "1", "]", ")", ":", "arrays", "+=", "[", "pyfits", ".", "Column", "(", "name", "=", "'CBV%02d'", "%", "(", "n", "+", "1", ")", ",", "format", "=", "'D'", ",", "array", "=", "model", ".", "XCBV", "[", ":", ",", "n", "]", ")", "]", "# Did we subtract a background term?", "if", "hasattr", "(", "model", ".", "bkg", ",", "'__len__'", ")", ":", "arrays", ".", "append", "(", "pyfits", ".", "Column", "(", "name", "=", "'BKG'", ",", "format", "=", "'D'", ",", "array", "=", "model", ".", "bkg", ",", "unit", "=", "'e-/s'", ")", ")", "# Create the HDU", "header", "=", "pyfits", ".", "Header", "(", "cards", "=", "cards", ")", "cols", "=", "pyfits", ".", "ColDefs", "(", "arrays", ")", "hdu", "=", "pyfits", ".", "BinTableHDU", ".", "from_columns", "(", "cols", ",", "header", "=", "header", ",", "name", "=", "'ARRAYS'", ")", "return", "hdu" ]
Construct the data HDU file containing the arrays and the observing info.
[ "Construct", "the", "data", "HDU", "file", "containing", "the", "arrays", "and", "the", "observing", "info", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/fits.py#L63-L226
rodluger/everest
everest/fits.py
PixelsHDU
def PixelsHDU(model): ''' Construct the HDU containing the pixel-level light curve. ''' # Get mission cards cards = model._mission.HDUCards(model.meta, hdu=2) # Add EVEREST info cards = [] cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* EVEREST INFO *')) cards.append(('COMMENT', '************************')) cards.append(('MISSION', model.mission, 'Mission name')) cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version')) cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion')) cards.append(('DATE', strftime('%Y-%m-%d'), 'EVEREST file creation date (YYYY-MM-DD)')) # Create the HDU header = pyfits.Header(cards=cards) # The pixel timeseries arrays = [pyfits.Column(name='FPIX', format='%dD' % model.fpix.shape[1], array=model.fpix)] # The first order PLD vectors for all the neighbors (npixels, ncadences) X1N = model.X1N if X1N is not None: arrays.append(pyfits.Column(name='X1N', format='%dD' % X1N.shape[1], array=X1N)) cols = pyfits.ColDefs(arrays) hdu = pyfits.BinTableHDU.from_columns(cols, header=header, name='PIXELS') return hdu
python
def PixelsHDU(model): ''' Construct the HDU containing the pixel-level light curve. ''' # Get mission cards cards = model._mission.HDUCards(model.meta, hdu=2) # Add EVEREST info cards = [] cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* EVEREST INFO *')) cards.append(('COMMENT', '************************')) cards.append(('MISSION', model.mission, 'Mission name')) cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version')) cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion')) cards.append(('DATE', strftime('%Y-%m-%d'), 'EVEREST file creation date (YYYY-MM-DD)')) # Create the HDU header = pyfits.Header(cards=cards) # The pixel timeseries arrays = [pyfits.Column(name='FPIX', format='%dD' % model.fpix.shape[1], array=model.fpix)] # The first order PLD vectors for all the neighbors (npixels, ncadences) X1N = model.X1N if X1N is not None: arrays.append(pyfits.Column(name='X1N', format='%dD' % X1N.shape[1], array=X1N)) cols = pyfits.ColDefs(arrays) hdu = pyfits.BinTableHDU.from_columns(cols, header=header, name='PIXELS') return hdu
[ "def", "PixelsHDU", "(", "model", ")", ":", "# Get mission cards", "cards", "=", "model", ".", "_mission", ".", "HDUCards", "(", "model", ".", "meta", ",", "hdu", "=", "2", ")", "# Add EVEREST info", "cards", "=", "[", "]", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'************************'", ")", ")", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'* EVEREST INFO *'", ")", ")", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'************************'", ")", ")", "cards", ".", "append", "(", "(", "'MISSION'", ",", "model", ".", "mission", ",", "'Mission name'", ")", ")", "cards", ".", "append", "(", "(", "'VERSION'", ",", "EVEREST_MAJOR_MINOR", ",", "'EVEREST pipeline version'", ")", ")", "cards", ".", "append", "(", "(", "'SUBVER'", ",", "EVEREST_VERSION", ",", "'EVEREST pipeline subversion'", ")", ")", "cards", ".", "append", "(", "(", "'DATE'", ",", "strftime", "(", "'%Y-%m-%d'", ")", ",", "'EVEREST file creation date (YYYY-MM-DD)'", ")", ")", "# Create the HDU", "header", "=", "pyfits", ".", "Header", "(", "cards", "=", "cards", ")", "# The pixel timeseries", "arrays", "=", "[", "pyfits", ".", "Column", "(", "name", "=", "'FPIX'", ",", "format", "=", "'%dD'", "%", "model", ".", "fpix", ".", "shape", "[", "1", "]", ",", "array", "=", "model", ".", "fpix", ")", "]", "# The first order PLD vectors for all the neighbors (npixels, ncadences)", "X1N", "=", "model", ".", "X1N", "if", "X1N", "is", "not", "None", ":", "arrays", ".", "append", "(", "pyfits", ".", "Column", "(", "name", "=", "'X1N'", ",", "format", "=", "'%dD'", "%", "X1N", ".", "shape", "[", "1", "]", ",", "array", "=", "X1N", ")", ")", "cols", "=", "pyfits", ".", "ColDefs", "(", "arrays", ")", "hdu", "=", "pyfits", ".", "BinTableHDU", ".", "from_columns", "(", "cols", ",", "header", "=", "header", ",", "name", "=", "'PIXELS'", ")", "return", "hdu" ]
Construct the HDU containing the pixel-level light curve.
[ "Construct", "the", "HDU", "containing", "the", "pixel", "-", "level", "light", "curve", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/fits.py#L229-L265
rodluger/everest
everest/fits.py
ApertureHDU
def ApertureHDU(model): ''' Construct the HDU containing the aperture used to de-trend. ''' # Get mission cards cards = model._mission.HDUCards(model.meta, hdu=3) # Add EVEREST info cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* EVEREST INFO *')) cards.append(('COMMENT', '************************')) cards.append(('MISSION', model.mission, 'Mission name')) cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version')) cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion')) cards.append(('DATE', strftime('%Y-%m-%d'), 'EVEREST file creation date (YYYY-MM-DD)')) # Create the HDU header = pyfits.Header(cards=cards) hdu = pyfits.ImageHDU(data=model.aperture, header=header, name='APERTURE MASK') return hdu
python
def ApertureHDU(model): ''' Construct the HDU containing the aperture used to de-trend. ''' # Get mission cards cards = model._mission.HDUCards(model.meta, hdu=3) # Add EVEREST info cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* EVEREST INFO *')) cards.append(('COMMENT', '************************')) cards.append(('MISSION', model.mission, 'Mission name')) cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version')) cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion')) cards.append(('DATE', strftime('%Y-%m-%d'), 'EVEREST file creation date (YYYY-MM-DD)')) # Create the HDU header = pyfits.Header(cards=cards) hdu = pyfits.ImageHDU(data=model.aperture, header=header, name='APERTURE MASK') return hdu
[ "def", "ApertureHDU", "(", "model", ")", ":", "# Get mission cards", "cards", "=", "model", ".", "_mission", ".", "HDUCards", "(", "model", ".", "meta", ",", "hdu", "=", "3", ")", "# Add EVEREST info", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'************************'", ")", ")", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'* EVEREST INFO *'", ")", ")", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'************************'", ")", ")", "cards", ".", "append", "(", "(", "'MISSION'", ",", "model", ".", "mission", ",", "'Mission name'", ")", ")", "cards", ".", "append", "(", "(", "'VERSION'", ",", "EVEREST_MAJOR_MINOR", ",", "'EVEREST pipeline version'", ")", ")", "cards", ".", "append", "(", "(", "'SUBVER'", ",", "EVEREST_VERSION", ",", "'EVEREST pipeline subversion'", ")", ")", "cards", ".", "append", "(", "(", "'DATE'", ",", "strftime", "(", "'%Y-%m-%d'", ")", ",", "'EVEREST file creation date (YYYY-MM-DD)'", ")", ")", "# Create the HDU", "header", "=", "pyfits", ".", "Header", "(", "cards", "=", "cards", ")", "hdu", "=", "pyfits", ".", "ImageHDU", "(", "data", "=", "model", ".", "aperture", ",", "header", "=", "header", ",", "name", "=", "'APERTURE MASK'", ")", "return", "hdu" ]
Construct the HDU containing the aperture used to de-trend.
[ "Construct", "the", "HDU", "containing", "the", "aperture", "used", "to", "de", "-", "trend", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/fits.py#L268-L292
rodluger/everest
everest/fits.py
ImagesHDU
def ImagesHDU(model): ''' Construct the HDU containing sample postage stamp images of the target. ''' # Get mission cards cards = model._mission.HDUCards(model.meta, hdu=4) # Add EVEREST info cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* EVEREST INFO *')) cards.append(('COMMENT', '************************')) cards.append(('MISSION', model.mission, 'Mission name')) cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version')) cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion')) cards.append(('DATE', strftime('%Y-%m-%d'), 'EVEREST file creation date (YYYY-MM-DD)')) # The images format = '%dD' % model.pixel_images[0].shape[1] arrays = [pyfits.Column(name='STAMP1', format=format, array=model.pixel_images[0]), pyfits.Column(name='STAMP2', format=format, array=model.pixel_images[1]), pyfits.Column(name='STAMP3', format=format, array=model.pixel_images[2])] # Create the HDU header = pyfits.Header(cards=cards) cols = pyfits.ColDefs(arrays) hdu = pyfits.BinTableHDU.from_columns( cols, header=header, name='POSTAGE STAMPS') return hdu
python
def ImagesHDU(model): ''' Construct the HDU containing sample postage stamp images of the target. ''' # Get mission cards cards = model._mission.HDUCards(model.meta, hdu=4) # Add EVEREST info cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* EVEREST INFO *')) cards.append(('COMMENT', '************************')) cards.append(('MISSION', model.mission, 'Mission name')) cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version')) cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion')) cards.append(('DATE', strftime('%Y-%m-%d'), 'EVEREST file creation date (YYYY-MM-DD)')) # The images format = '%dD' % model.pixel_images[0].shape[1] arrays = [pyfits.Column(name='STAMP1', format=format, array=model.pixel_images[0]), pyfits.Column(name='STAMP2', format=format, array=model.pixel_images[1]), pyfits.Column(name='STAMP3', format=format, array=model.pixel_images[2])] # Create the HDU header = pyfits.Header(cards=cards) cols = pyfits.ColDefs(arrays) hdu = pyfits.BinTableHDU.from_columns( cols, header=header, name='POSTAGE STAMPS') return hdu
[ "def", "ImagesHDU", "(", "model", ")", ":", "# Get mission cards", "cards", "=", "model", ".", "_mission", ".", "HDUCards", "(", "model", ".", "meta", ",", "hdu", "=", "4", ")", "# Add EVEREST info", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'************************'", ")", ")", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'* EVEREST INFO *'", ")", ")", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'************************'", ")", ")", "cards", ".", "append", "(", "(", "'MISSION'", ",", "model", ".", "mission", ",", "'Mission name'", ")", ")", "cards", ".", "append", "(", "(", "'VERSION'", ",", "EVEREST_MAJOR_MINOR", ",", "'EVEREST pipeline version'", ")", ")", "cards", ".", "append", "(", "(", "'SUBVER'", ",", "EVEREST_VERSION", ",", "'EVEREST pipeline subversion'", ")", ")", "cards", ".", "append", "(", "(", "'DATE'", ",", "strftime", "(", "'%Y-%m-%d'", ")", ",", "'EVEREST file creation date (YYYY-MM-DD)'", ")", ")", "# The images", "format", "=", "'%dD'", "%", "model", ".", "pixel_images", "[", "0", "]", ".", "shape", "[", "1", "]", "arrays", "=", "[", "pyfits", ".", "Column", "(", "name", "=", "'STAMP1'", ",", "format", "=", "format", ",", "array", "=", "model", ".", "pixel_images", "[", "0", "]", ")", ",", "pyfits", ".", "Column", "(", "name", "=", "'STAMP2'", ",", "format", "=", "format", ",", "array", "=", "model", ".", "pixel_images", "[", "1", "]", ")", ",", "pyfits", ".", "Column", "(", "name", "=", "'STAMP3'", ",", "format", "=", "format", ",", "array", "=", "model", ".", "pixel_images", "[", "2", "]", ")", "]", "# Create the HDU", "header", "=", "pyfits", ".", "Header", "(", "cards", "=", "cards", ")", "cols", "=", "pyfits", ".", "ColDefs", "(", "arrays", ")", "hdu", "=", "pyfits", ".", "BinTableHDU", ".", "from_columns", "(", "cols", ",", "header", "=", "header", ",", "name", "=", "'POSTAGE STAMPS'", ")", "return", "hdu" ]
Construct the HDU containing sample postage stamp images of the target.
[ "Construct", "the", "HDU", "containing", "sample", "postage", "stamp", "images", "of", "the", "target", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/fits.py#L295-L329
rodluger/everest
everest/fits.py
HiResHDU
def HiResHDU(model): ''' Construct the HDU containing the hi res image of the target. ''' # Get mission cards cards = model._mission.HDUCards(model.meta, hdu=5) # Add EVEREST info cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* EVEREST INFO *')) cards.append(('COMMENT', '************************')) cards.append(('MISSION', model.mission, 'Mission name')) cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version')) cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion')) cards.append(('DATE', strftime('%Y-%m-%d'), 'EVEREST file creation date (YYYY-MM-DD)')) # Create the HDU header = pyfits.Header(cards=cards) if model.hires is not None: hdu = pyfits.ImageHDU( data=model.hires, header=header, name='HI RES IMAGE') else: hdu = pyfits.ImageHDU(data=np.empty( (0, 0), dtype=float), header=header, name='HI RES IMAGE') return hdu
python
def HiResHDU(model): ''' Construct the HDU containing the hi res image of the target. ''' # Get mission cards cards = model._mission.HDUCards(model.meta, hdu=5) # Add EVEREST info cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* EVEREST INFO *')) cards.append(('COMMENT', '************************')) cards.append(('MISSION', model.mission, 'Mission name')) cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version')) cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion')) cards.append(('DATE', strftime('%Y-%m-%d'), 'EVEREST file creation date (YYYY-MM-DD)')) # Create the HDU header = pyfits.Header(cards=cards) if model.hires is not None: hdu = pyfits.ImageHDU( data=model.hires, header=header, name='HI RES IMAGE') else: hdu = pyfits.ImageHDU(data=np.empty( (0, 0), dtype=float), header=header, name='HI RES IMAGE') return hdu
[ "def", "HiResHDU", "(", "model", ")", ":", "# Get mission cards", "cards", "=", "model", ".", "_mission", ".", "HDUCards", "(", "model", ".", "meta", ",", "hdu", "=", "5", ")", "# Add EVEREST info", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'************************'", ")", ")", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'* EVEREST INFO *'", ")", ")", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'************************'", ")", ")", "cards", ".", "append", "(", "(", "'MISSION'", ",", "model", ".", "mission", ",", "'Mission name'", ")", ")", "cards", ".", "append", "(", "(", "'VERSION'", ",", "EVEREST_MAJOR_MINOR", ",", "'EVEREST pipeline version'", ")", ")", "cards", ".", "append", "(", "(", "'SUBVER'", ",", "EVEREST_VERSION", ",", "'EVEREST pipeline subversion'", ")", ")", "cards", ".", "append", "(", "(", "'DATE'", ",", "strftime", "(", "'%Y-%m-%d'", ")", ",", "'EVEREST file creation date (YYYY-MM-DD)'", ")", ")", "# Create the HDU", "header", "=", "pyfits", ".", "Header", "(", "cards", "=", "cards", ")", "if", "model", ".", "hires", "is", "not", "None", ":", "hdu", "=", "pyfits", ".", "ImageHDU", "(", "data", "=", "model", ".", "hires", ",", "header", "=", "header", ",", "name", "=", "'HI RES IMAGE'", ")", "else", ":", "hdu", "=", "pyfits", ".", "ImageHDU", "(", "data", "=", "np", ".", "empty", "(", "(", "0", ",", "0", ")", ",", "dtype", "=", "float", ")", ",", "header", "=", "header", ",", "name", "=", "'HI RES IMAGE'", ")", "return", "hdu" ]
Construct the HDU containing the hi res image of the target.
[ "Construct", "the", "HDU", "containing", "the", "hi", "res", "image", "of", "the", "target", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/fits.py#L332-L359
rodluger/everest
everest/fits.py
MakeFITS
def MakeFITS(model, fitsfile=None): ''' Generate a FITS file for a given :py:mod:`everest` run. :param model: An :py:mod:`everest` model instance ''' # Get the fits file name if fitsfile is None: outfile = os.path.join(model.dir, model._mission.FITSFile( model.ID, model.season, model.cadence)) else: outfile = os.path.join(model.dir, fitsfile) if os.path.exists(outfile) and not model.clobber: return elif os.path.exists(outfile): os.remove(outfile) log.info('Generating FITS file...') # Create the HDUs primary = PrimaryHDU(model) lightcurve = LightcurveHDU(model) pixels = PixelsHDU(model) aperture = ApertureHDU(model) images = ImagesHDU(model) hires = HiResHDU(model) # Combine to get the HDUList hdulist = pyfits.HDUList( [primary, lightcurve, pixels, aperture, images, hires]) # Output to the FITS file hdulist.writeto(outfile) return
python
def MakeFITS(model, fitsfile=None): ''' Generate a FITS file for a given :py:mod:`everest` run. :param model: An :py:mod:`everest` model instance ''' # Get the fits file name if fitsfile is None: outfile = os.path.join(model.dir, model._mission.FITSFile( model.ID, model.season, model.cadence)) else: outfile = os.path.join(model.dir, fitsfile) if os.path.exists(outfile) and not model.clobber: return elif os.path.exists(outfile): os.remove(outfile) log.info('Generating FITS file...') # Create the HDUs primary = PrimaryHDU(model) lightcurve = LightcurveHDU(model) pixels = PixelsHDU(model) aperture = ApertureHDU(model) images = ImagesHDU(model) hires = HiResHDU(model) # Combine to get the HDUList hdulist = pyfits.HDUList( [primary, lightcurve, pixels, aperture, images, hires]) # Output to the FITS file hdulist.writeto(outfile) return
[ "def", "MakeFITS", "(", "model", ",", "fitsfile", "=", "None", ")", ":", "# Get the fits file name", "if", "fitsfile", "is", "None", ":", "outfile", "=", "os", ".", "path", ".", "join", "(", "model", ".", "dir", ",", "model", ".", "_mission", ".", "FITSFile", "(", "model", ".", "ID", ",", "model", ".", "season", ",", "model", ".", "cadence", ")", ")", "else", ":", "outfile", "=", "os", ".", "path", ".", "join", "(", "model", ".", "dir", ",", "fitsfile", ")", "if", "os", ".", "path", ".", "exists", "(", "outfile", ")", "and", "not", "model", ".", "clobber", ":", "return", "elif", "os", ".", "path", ".", "exists", "(", "outfile", ")", ":", "os", ".", "remove", "(", "outfile", ")", "log", ".", "info", "(", "'Generating FITS file...'", ")", "# Create the HDUs", "primary", "=", "PrimaryHDU", "(", "model", ")", "lightcurve", "=", "LightcurveHDU", "(", "model", ")", "pixels", "=", "PixelsHDU", "(", "model", ")", "aperture", "=", "ApertureHDU", "(", "model", ")", "images", "=", "ImagesHDU", "(", "model", ")", "hires", "=", "HiResHDU", "(", "model", ")", "# Combine to get the HDUList", "hdulist", "=", "pyfits", ".", "HDUList", "(", "[", "primary", ",", "lightcurve", ",", "pixels", ",", "aperture", ",", "images", ",", "hires", "]", ")", "# Output to the FITS file", "hdulist", ".", "writeto", "(", "outfile", ")", "return" ]
Generate a FITS file for a given :py:mod:`everest` run. :param model: An :py:mod:`everest` model instance
[ "Generate", "a", "FITS", "file", "for", "a", "given", ":", "py", ":", "mod", ":", "everest", "run", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/fits.py#L362-L398
lsbardel/python-stdnet
stdnet/odm/utils.py
get_serializer
def get_serializer(name, **options): '''Retrieve a serializer register as *name*. If the serializer is not available a ``ValueError`` exception will raise. A common usage pattern:: qs = MyModel.objects.query().sort_by('id') s = odm.get_serializer('json') s.dump(qs) ''' if name in _serializers: serializer = _serializers[name] return serializer(**options) else: raise ValueError('Unknown serializer {0}.'.format(name))
python
def get_serializer(name, **options): '''Retrieve a serializer register as *name*. If the serializer is not available a ``ValueError`` exception will raise. A common usage pattern:: qs = MyModel.objects.query().sort_by('id') s = odm.get_serializer('json') s.dump(qs) ''' if name in _serializers: serializer = _serializers[name] return serializer(**options) else: raise ValueError('Unknown serializer {0}.'.format(name))
[ "def", "get_serializer", "(", "name", ",", "*", "*", "options", ")", ":", "if", "name", "in", "_serializers", ":", "serializer", "=", "_serializers", "[", "name", "]", "return", "serializer", "(", "*", "*", "options", ")", "else", ":", "raise", "ValueError", "(", "'Unknown serializer {0}.'", ".", "format", "(", "name", ")", ")" ]
Retrieve a serializer register as *name*. If the serializer is not available a ``ValueError`` exception will raise. A common usage pattern:: qs = MyModel.objects.query().sort_by('id') s = odm.get_serializer('json') s.dump(qs)
[ "Retrieve", "a", "serializer", "register", "as", "*", "name", "*", ".", "If", "the", "serializer", "is", "not", "available", "a", "ValueError", "exception", "will", "raise", ".", "A", "common", "usage", "pattern", "::", "qs", "=", "MyModel", ".", "objects", ".", "query", "()", ".", "sort_by", "(", "id", ")", "s", "=", "odm", ".", "get_serializer", "(", "json", ")", "s", ".", "dump", "(", "qs", ")" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/utils.py#L34-L47
lsbardel/python-stdnet
stdnet/odm/utils.py
register_serializer
def register_serializer(name, serializer): '''\ Register a new serializer to the library. :parameter name: serializer name (it can override existing serializers). :parameter serializer: an instance or a derived class of a :class:`stdnet.odm.Serializer` class or a callable. ''' if not isclass(serializer): serializer = serializer.__class__ _serializers[name] = serializer
python
def register_serializer(name, serializer): '''\ Register a new serializer to the library. :parameter name: serializer name (it can override existing serializers). :parameter serializer: an instance or a derived class of a :class:`stdnet.odm.Serializer` class or a callable. ''' if not isclass(serializer): serializer = serializer.__class__ _serializers[name] = serializer
[ "def", "register_serializer", "(", "name", ",", "serializer", ")", ":", "if", "not", "isclass", "(", "serializer", ")", ":", "serializer", "=", "serializer", ".", "__class__", "_serializers", "[", "name", "]", "=", "serializer" ]
\ Register a new serializer to the library. :parameter name: serializer name (it can override existing serializers). :parameter serializer: an instance or a derived class of a :class:`stdnet.odm.Serializer` class or a callable.
[ "\\", "Register", "a", "new", "serializer", "to", "the", "library", ".", ":", "parameter", "name", ":", "serializer", "name", "(", "it", "can", "override", "existing", "serializers", ")", ".", ":", "parameter", "serializer", ":", "an", "instance", "or", "a", "derived", "class", "of", "a", ":", "class", ":", "stdnet", ".", "odm", ".", "Serializer", "class", "or", "a", "callable", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/utils.py#L50-L60
rodluger/everest
everest/masksolve.py
MaskSolve
def MaskSolve(A, b, w=5, progress=True, niter=None): ''' Finds the solution `x` to the linear problem A x = b for all contiguous `w`-sized masks applied to the rows and columns of `A` and to the entries of `b`. Returns an array `X` of shape `(N - w + 1, N - w)`, where the `nth` row is the solution to the equation A[![n,n+w)] x = b[![n,n+w)] where ![n,n+w) indicates that indices in the range [n,n+w) have been masked. ''' # Ensure we have choldate installed if cholupdate is None: log.info("Running the slow version of `MaskSolve`.") log.info("Install the `choldate` package for better performance.") log.info("https://github.com/rodluger/choldate") return MaskSolveSlow(A, b, w=w, progress=progress, niter=niter) # Number of data points N = b.shape[0] # How many iterations? Default is to go through # the entire dataset if niter is None: niter = N - w + 1 # Our result matrix X = np.empty((niter, N - w)) # Solve the first two steps explicitly. for n in range(2): mask = np.arange(n, w + n) A_ = np.delete(np.delete(A, mask, axis=0), mask, axis=1) b_ = np.delete(b, mask) U = cholesky(A_) X[n] = cho_solve((U, False), b_) # Iterate! for n in prange(1, niter - 1): # Update the data vector. b_[n] = b[n] # Remove a row. S33 = U[n + 1:, n + 1:] S23 = U[n, n + 1:] cholupdate(S33, S23) # Add a row. A12 = A[:n, n] A22 = A[n, n] A23 = A[n, n + w + 1:] S11 = U[:n, :n] S12 = solve_triangular(S11.T, A12, lower=True, check_finite=False, trans=0, overwrite_b=True) S22 = np.sqrt(A22 - np.dot(S12.T, S12)) S13 = U[:n, n + 1:] S23 = (A23 - np.dot(S12.T, S13)) / S22 choldowndate(S33, np.array(S23)) U[:n, n] = S12 U[n, n] = S22 U[n, n + 1:] = S23 U[n + 1:, n + 1:] = S33 # Now we can solve our linear equation X[n + 1] = cho_solve((U, False), b_) # Return the matrix return X
python
def MaskSolve(A, b, w=5, progress=True, niter=None): ''' Finds the solution `x` to the linear problem A x = b for all contiguous `w`-sized masks applied to the rows and columns of `A` and to the entries of `b`. Returns an array `X` of shape `(N - w + 1, N - w)`, where the `nth` row is the solution to the equation A[![n,n+w)] x = b[![n,n+w)] where ![n,n+w) indicates that indices in the range [n,n+w) have been masked. ''' # Ensure we have choldate installed if cholupdate is None: log.info("Running the slow version of `MaskSolve`.") log.info("Install the `choldate` package for better performance.") log.info("https://github.com/rodluger/choldate") return MaskSolveSlow(A, b, w=w, progress=progress, niter=niter) # Number of data points N = b.shape[0] # How many iterations? Default is to go through # the entire dataset if niter is None: niter = N - w + 1 # Our result matrix X = np.empty((niter, N - w)) # Solve the first two steps explicitly. for n in range(2): mask = np.arange(n, w + n) A_ = np.delete(np.delete(A, mask, axis=0), mask, axis=1) b_ = np.delete(b, mask) U = cholesky(A_) X[n] = cho_solve((U, False), b_) # Iterate! for n in prange(1, niter - 1): # Update the data vector. b_[n] = b[n] # Remove a row. S33 = U[n + 1:, n + 1:] S23 = U[n, n + 1:] cholupdate(S33, S23) # Add a row. A12 = A[:n, n] A22 = A[n, n] A23 = A[n, n + w + 1:] S11 = U[:n, :n] S12 = solve_triangular(S11.T, A12, lower=True, check_finite=False, trans=0, overwrite_b=True) S22 = np.sqrt(A22 - np.dot(S12.T, S12)) S13 = U[:n, n + 1:] S23 = (A23 - np.dot(S12.T, S13)) / S22 choldowndate(S33, np.array(S23)) U[:n, n] = S12 U[n, n] = S22 U[n, n + 1:] = S23 U[n + 1:, n + 1:] = S33 # Now we can solve our linear equation X[n + 1] = cho_solve((U, False), b_) # Return the matrix return X
[ "def", "MaskSolve", "(", "A", ",", "b", ",", "w", "=", "5", ",", "progress", "=", "True", ",", "niter", "=", "None", ")", ":", "# Ensure we have choldate installed\r", "if", "cholupdate", "is", "None", ":", "log", ".", "info", "(", "\"Running the slow version of `MaskSolve`.\"", ")", "log", ".", "info", "(", "\"Install the `choldate` package for better performance.\"", ")", "log", ".", "info", "(", "\"https://github.com/rodluger/choldate\"", ")", "return", "MaskSolveSlow", "(", "A", ",", "b", ",", "w", "=", "w", ",", "progress", "=", "progress", ",", "niter", "=", "niter", ")", "# Number of data points\r", "N", "=", "b", ".", "shape", "[", "0", "]", "# How many iterations? Default is to go through\r", "# the entire dataset\r", "if", "niter", "is", "None", ":", "niter", "=", "N", "-", "w", "+", "1", "# Our result matrix\r", "X", "=", "np", ".", "empty", "(", "(", "niter", ",", "N", "-", "w", ")", ")", "# Solve the first two steps explicitly.\r", "for", "n", "in", "range", "(", "2", ")", ":", "mask", "=", "np", ".", "arange", "(", "n", ",", "w", "+", "n", ")", "A_", "=", "np", ".", "delete", "(", "np", ".", "delete", "(", "A", ",", "mask", ",", "axis", "=", "0", ")", ",", "mask", ",", "axis", "=", "1", ")", "b_", "=", "np", ".", "delete", "(", "b", ",", "mask", ")", "U", "=", "cholesky", "(", "A_", ")", "X", "[", "n", "]", "=", "cho_solve", "(", "(", "U", ",", "False", ")", ",", "b_", ")", "# Iterate!\r", "for", "n", "in", "prange", "(", "1", ",", "niter", "-", "1", ")", ":", "# Update the data vector.\r", "b_", "[", "n", "]", "=", "b", "[", "n", "]", "# Remove a row.\r", "S33", "=", "U", "[", "n", "+", "1", ":", ",", "n", "+", "1", ":", "]", "S23", "=", "U", "[", "n", ",", "n", "+", "1", ":", "]", "cholupdate", "(", "S33", ",", "S23", ")", "# Add a row.\r", "A12", "=", "A", "[", ":", "n", ",", "n", "]", "A22", "=", "A", "[", "n", ",", "n", "]", "A23", "=", "A", "[", "n", ",", "n", "+", "w", "+", "1", ":", "]", "S11", "=", "U", "[", ":", "n", ",", ":", "n", "]", "S12", "=", "solve_triangular", "(", "S11", ".", "T", ",", "A12", ",", "lower", "=", "True", ",", "check_finite", "=", "False", ",", "trans", "=", "0", ",", "overwrite_b", "=", "True", ")", "S22", "=", "np", ".", "sqrt", "(", "A22", "-", "np", ".", "dot", "(", "S12", ".", "T", ",", "S12", ")", ")", "S13", "=", "U", "[", ":", "n", ",", "n", "+", "1", ":", "]", "S23", "=", "(", "A23", "-", "np", ".", "dot", "(", "S12", ".", "T", ",", "S13", ")", ")", "/", "S22", "choldowndate", "(", "S33", ",", "np", ".", "array", "(", "S23", ")", ")", "U", "[", ":", "n", ",", "n", "]", "=", "S12", "U", "[", "n", ",", "n", "]", "=", "S22", "U", "[", "n", ",", "n", "+", "1", ":", "]", "=", "S23", "U", "[", "n", "+", "1", ":", ",", "n", "+", "1", ":", "]", "=", "S33", "# Now we can solve our linear equation\r", "X", "[", "n", "+", "1", "]", "=", "cho_solve", "(", "(", "U", ",", "False", ")", ",", "b_", ")", "# Return the matrix\r", "return", "X" ]
Finds the solution `x` to the linear problem A x = b for all contiguous `w`-sized masks applied to the rows and columns of `A` and to the entries of `b`. Returns an array `X` of shape `(N - w + 1, N - w)`, where the `nth` row is the solution to the equation A[![n,n+w)] x = b[![n,n+w)] where ![n,n+w) indicates that indices in the range [n,n+w) have been masked.
[ "Finds", "the", "solution", "x", "to", "the", "linear", "problem", "A", "x", "=", "b", "for", "all", "contiguous", "w", "-", "sized", "masks", "applied", "to", "the", "rows", "and", "columns", "of", "A", "and", "to", "the", "entries", "of", "b", ".", "Returns", "an", "array", "X", "of", "shape", "(", "N", "-", "w", "+", "1", "N", "-", "w", ")", "where", "the", "nth", "row", "is", "the", "solution", "to", "the", "equation", "A", "[", "!", "[", "n", "n", "+", "w", ")", "]", "x", "=", "b", "[", "!", "[", "n", "n", "+", "w", ")", "]", "where", "!", "[", "n", "n", "+", "w", ")", "indicates", "that", "indices", "in", "the", "range", "[", "n", "n", "+", "w", ")", "have", "been", "masked", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/masksolve.py#L25-L102
rodluger/everest
everest/masksolve.py
MaskSolveSlow
def MaskSolveSlow(A, b, w=5, progress=True, niter=None): ''' Identical to `MaskSolve`, but computes the solution the brute-force way. ''' # Number of data points N = b.shape[0] # How many iterations? Default is to go through # the entire dataset if niter is None: niter = N - w + 1 # Our result matrix X = np.empty((niter, N - w)) # Iterate! The mask at step `n` goes from # data index `n` to data index `n+w-1` (inclusive). for n in prange(niter): mask = np.arange(n, n + w) An = np.delete(np.delete(A, mask, axis=0), mask, axis=1) Un = cholesky(An) bn = np.delete(b, mask) X[n] = cho_solve((Un, False), bn) return X
python
def MaskSolveSlow(A, b, w=5, progress=True, niter=None): ''' Identical to `MaskSolve`, but computes the solution the brute-force way. ''' # Number of data points N = b.shape[0] # How many iterations? Default is to go through # the entire dataset if niter is None: niter = N - w + 1 # Our result matrix X = np.empty((niter, N - w)) # Iterate! The mask at step `n` goes from # data index `n` to data index `n+w-1` (inclusive). for n in prange(niter): mask = np.arange(n, n + w) An = np.delete(np.delete(A, mask, axis=0), mask, axis=1) Un = cholesky(An) bn = np.delete(b, mask) X[n] = cho_solve((Un, False), bn) return X
[ "def", "MaskSolveSlow", "(", "A", ",", "b", ",", "w", "=", "5", ",", "progress", "=", "True", ",", "niter", "=", "None", ")", ":", "# Number of data points\r", "N", "=", "b", ".", "shape", "[", "0", "]", "# How many iterations? Default is to go through\r", "# the entire dataset\r", "if", "niter", "is", "None", ":", "niter", "=", "N", "-", "w", "+", "1", "# Our result matrix\r", "X", "=", "np", ".", "empty", "(", "(", "niter", ",", "N", "-", "w", ")", ")", "# Iterate! The mask at step `n` goes from\r", "# data index `n` to data index `n+w-1` (inclusive).\r", "for", "n", "in", "prange", "(", "niter", ")", ":", "mask", "=", "np", ".", "arange", "(", "n", ",", "n", "+", "w", ")", "An", "=", "np", ".", "delete", "(", "np", ".", "delete", "(", "A", ",", "mask", ",", "axis", "=", "0", ")", ",", "mask", ",", "axis", "=", "1", ")", "Un", "=", "cholesky", "(", "An", ")", "bn", "=", "np", ".", "delete", "(", "b", ",", "mask", ")", "X", "[", "n", "]", "=", "cho_solve", "(", "(", "Un", ",", "False", ")", ",", "bn", ")", "return", "X" ]
Identical to `MaskSolve`, but computes the solution the brute-force way.
[ "Identical", "to", "MaskSolve", "but", "computes", "the", "solution", "the", "brute", "-", "force", "way", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/masksolve.py#L105-L132
rodluger/everest
everest/basecamp.py
Overfitting.unmasked
def unmasked(self, depth=0.01): """Return the unmasked overfitting metric for a given transit depth.""" return 1 - (np.hstack(self._O2) + np.hstack(self._O3) / depth) / np.hstack(self._O1)
python
def unmasked(self, depth=0.01): """Return the unmasked overfitting metric for a given transit depth.""" return 1 - (np.hstack(self._O2) + np.hstack(self._O3) / depth) / np.hstack(self._O1)
[ "def", "unmasked", "(", "self", ",", "depth", "=", "0.01", ")", ":", "return", "1", "-", "(", "np", ".", "hstack", "(", "self", ".", "_O2", ")", "+", "np", ".", "hstack", "(", "self", ".", "_O3", ")", "/", "depth", ")", "/", "np", ".", "hstack", "(", "self", ".", "_O1", ")" ]
Return the unmasked overfitting metric for a given transit depth.
[ "Return", "the", "unmasked", "overfitting", "metric", "for", "a", "given", "transit", "depth", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/basecamp.py#L52-L55
rodluger/everest
everest/basecamp.py
Overfitting.show
def show(self): """Show the overfitting PDF summary.""" try: if platform.system().lower().startswith('darwin'): subprocess.call(['open', self.pdf]) elif os.name == 'nt': os.startfile(self.pdf) elif os.name == 'posix': subprocess.call(['xdg-open', self.pdf]) else: raise IOError("") except IOError: log.info("Unable to open the pdf. Try opening it manually:") log.info(self.pdf)
python
def show(self): """Show the overfitting PDF summary.""" try: if platform.system().lower().startswith('darwin'): subprocess.call(['open', self.pdf]) elif os.name == 'nt': os.startfile(self.pdf) elif os.name == 'posix': subprocess.call(['xdg-open', self.pdf]) else: raise IOError("") except IOError: log.info("Unable to open the pdf. Try opening it manually:") log.info(self.pdf)
[ "def", "show", "(", "self", ")", ":", "try", ":", "if", "platform", ".", "system", "(", ")", ".", "lower", "(", ")", ".", "startswith", "(", "'darwin'", ")", ":", "subprocess", ".", "call", "(", "[", "'open'", ",", "self", ".", "pdf", "]", ")", "elif", "os", ".", "name", "==", "'nt'", ":", "os", ".", "startfile", "(", "self", ".", "pdf", ")", "elif", "os", ".", "name", "==", "'posix'", ":", "subprocess", ".", "call", "(", "[", "'xdg-open'", ",", "self", ".", "pdf", "]", ")", "else", ":", "raise", "IOError", "(", "\"\"", ")", "except", "IOError", ":", "log", ".", "info", "(", "\"Unable to open the pdf. Try opening it manually:\"", ")", "log", ".", "info", "(", "self", ".", "pdf", ")" ]
Show the overfitting PDF summary.
[ "Show", "the", "overfitting", "PDF", "summary", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/basecamp.py#L57-L70
rodluger/everest
everest/basecamp.py
Basecamp.season
def season(self): """ Return the current observing season. For *K2*, this is the observing campaign, while for *Kepler*, it is the current quarter. """ try: self._season except AttributeError: self._season = self._mission.Season(self.ID) if hasattr(self._season, '__len__'): raise AttributeError( "Please choose a campaign/season for this target: %s." % self._season) return self._season
python
def season(self): """ Return the current observing season. For *K2*, this is the observing campaign, while for *Kepler*, it is the current quarter. """ try: self._season except AttributeError: self._season = self._mission.Season(self.ID) if hasattr(self._season, '__len__'): raise AttributeError( "Please choose a campaign/season for this target: %s." % self._season) return self._season
[ "def", "season", "(", "self", ")", ":", "try", ":", "self", ".", "_season", "except", "AttributeError", ":", "self", ".", "_season", "=", "self", ".", "_mission", ".", "Season", "(", "self", ".", "ID", ")", "if", "hasattr", "(", "self", ".", "_season", ",", "'__len__'", ")", ":", "raise", "AttributeError", "(", "\"Please choose a campaign/season for this target: %s.\"", "%", "self", ".", "_season", ")", "return", "self", ".", "_season" ]
Return the current observing season. For *K2*, this is the observing campaign, while for *Kepler*, it is the current quarter.
[ "Return", "the", "current", "observing", "season", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/basecamp.py#L129-L145
rodluger/everest
everest/basecamp.py
Basecamp.fcor
def fcor(self): ''' The CBV-corrected de-trended flux. ''' if self.XCBV is None: return None else: return self.flux - self._mission.FitCBVs(self)
python
def fcor(self): ''' The CBV-corrected de-trended flux. ''' if self.XCBV is None: return None else: return self.flux - self._mission.FitCBVs(self)
[ "def", "fcor", "(", "self", ")", ":", "if", "self", ".", "XCBV", "is", "None", ":", "return", "None", "else", ":", "return", "self", ".", "flux", "-", "self", ".", "_mission", ".", "FitCBVs", "(", "self", ")" ]
The CBV-corrected de-trended flux.
[ "The", "CBV", "-", "corrected", "de", "-", "trended", "flux", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/basecamp.py#L174-L183
rodluger/everest
everest/basecamp.py
Basecamp.mask
def mask(self): ''' The array of indices to be masked. This is the union of the sets of outliers, bad (flagged) cadences, transit cadences, and :py:obj:`NaN` cadences. ''' return np.array(list(set(np.concatenate([self.outmask, self.badmask, self.transitmask, self.nanmask]))), dtype=int)
python
def mask(self): ''' The array of indices to be masked. This is the union of the sets of outliers, bad (flagged) cadences, transit cadences, and :py:obj:`NaN` cadences. ''' return np.array(list(set(np.concatenate([self.outmask, self.badmask, self.transitmask, self.nanmask]))), dtype=int)
[ "def", "mask", "(", "self", ")", ":", "return", "np", ".", "array", "(", "list", "(", "set", "(", "np", ".", "concatenate", "(", "[", "self", ".", "outmask", ",", "self", ".", "badmask", ",", "self", ".", "transitmask", ",", "self", ".", "nanmask", "]", ")", ")", ")", ",", "dtype", "=", "int", ")" ]
The array of indices to be masked. This is the union of the sets of outliers, bad (flagged) cadences, transit cadences, and :py:obj:`NaN` cadences.
[ "The", "array", "of", "indices", "to", "be", "masked", ".", "This", "is", "the", "union", "of", "the", "sets", "of", "outliers", "bad", "(", "flagged", ")", "cadences", "transit", "cadences", "and", ":", "py", ":", "obj", ":", "NaN", "cadences", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/basecamp.py#L232-L241
rodluger/everest
everest/basecamp.py
Basecamp.X
def X(self, i, j=slice(None, None, None)): ''' Computes the design matrix at the given *PLD* order and the given indices. The columns are the *PLD* vectors for the target at the corresponding order, computed as the product of the fractional pixel flux of all sets of :py:obj:`n` pixels, where :py:obj:`n` is the *PLD* order. ''' X1 = self.fpix[j] / self.norm[j].reshape(-1, 1) X = np.product(list(multichoose(X1.T, i + 1)), axis=1).T if self.X1N is not None: return np.hstack([X, self.X1N[j] ** (i + 1)]) else: return X
python
def X(self, i, j=slice(None, None, None)): ''' Computes the design matrix at the given *PLD* order and the given indices. The columns are the *PLD* vectors for the target at the corresponding order, computed as the product of the fractional pixel flux of all sets of :py:obj:`n` pixels, where :py:obj:`n` is the *PLD* order. ''' X1 = self.fpix[j] / self.norm[j].reshape(-1, 1) X = np.product(list(multichoose(X1.T, i + 1)), axis=1).T if self.X1N is not None: return np.hstack([X, self.X1N[j] ** (i + 1)]) else: return X
[ "def", "X", "(", "self", ",", "i", ",", "j", "=", "slice", "(", "None", ",", "None", ",", "None", ")", ")", ":", "X1", "=", "self", ".", "fpix", "[", "j", "]", "/", "self", ".", "norm", "[", "j", "]", ".", "reshape", "(", "-", "1", ",", "1", ")", "X", "=", "np", ".", "product", "(", "list", "(", "multichoose", "(", "X1", ".", "T", ",", "i", "+", "1", ")", ")", ",", "axis", "=", "1", ")", ".", "T", "if", "self", ".", "X1N", "is", "not", "None", ":", "return", "np", ".", "hstack", "(", "[", "X", ",", "self", ".", "X1N", "[", "j", "]", "**", "(", "i", "+", "1", ")", "]", ")", "else", ":", "return", "X" ]
Computes the design matrix at the given *PLD* order and the given indices. The columns are the *PLD* vectors for the target at the corresponding order, computed as the product of the fractional pixel flux of all sets of :py:obj:`n` pixels, where :py:obj:`n` is the *PLD* order.
[ "Computes", "the", "design", "matrix", "at", "the", "given", "*", "PLD", "*", "order", "and", "the", "given", "indices", ".", "The", "columns", "are", "the", "*", "PLD", "*", "vectors", "for", "the", "target", "at", "the", "corresponding", "order", "computed", "as", "the", "product", "of", "the", "fractional", "pixel", "flux", "of", "all", "sets", "of", ":", "py", ":", "obj", ":", "n", "pixels", "where", ":", "py", ":", "obj", ":", "n", "is", "the", "*", "PLD", "*", "order", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/basecamp.py#L312-L327
rodluger/everest
everest/basecamp.py
Basecamp.plot_info
def plot_info(self, dvs): ''' Plots miscellaneous de-trending information on the data validation summary figure. :param dvs: A :py:class:`dvs.DVS` figure instance ''' axl, axc, axr = dvs.title() axc.annotate("%s %d" % (self._mission.IDSTRING, self.ID), xy=(0.5, 0.5), xycoords='axes fraction', ha='center', va='center', fontsize=18) axc.annotate(r"%.2f ppm $\rightarrow$ %.2f ppm" % (self.cdppr, self.cdpp), xy=(0.5, 0.2), xycoords='axes fraction', ha='center', va='center', fontsize=8, color='k', fontstyle='italic') axl.annotate("%s %s%02d: %s" % (self.mission.upper(), self._mission.SEASONCHAR, self.season, self.name), xy=(0.5, 0.5), xycoords='axes fraction', ha='center', va='center', fontsize=12, color='k') axl.annotate(self.aperture_name if len(self.neighbors) == 0 else "%s, %d neighbors" % (self.aperture_name, len(self.neighbors)), xy=(0.5, 0.2), xycoords='axes fraction', ha='center', va='center', fontsize=8, color='k', fontstyle='italic') axr.annotate("%s %.3f" % (self._mission.MAGSTRING, self.mag), xy=(0.5, 0.5), xycoords='axes fraction', ha='center', va='center', fontsize=12, color='k') if not np.isnan(self.cdppg) and self.cdppg > 0: axr.annotate(r"GP %.3f ppm" % (self.cdppg), xy=(0.5, 0.2), xycoords='axes fraction', ha='center', va='center', fontsize=8, color='k', fontstyle='italic')
python
def plot_info(self, dvs): ''' Plots miscellaneous de-trending information on the data validation summary figure. :param dvs: A :py:class:`dvs.DVS` figure instance ''' axl, axc, axr = dvs.title() axc.annotate("%s %d" % (self._mission.IDSTRING, self.ID), xy=(0.5, 0.5), xycoords='axes fraction', ha='center', va='center', fontsize=18) axc.annotate(r"%.2f ppm $\rightarrow$ %.2f ppm" % (self.cdppr, self.cdpp), xy=(0.5, 0.2), xycoords='axes fraction', ha='center', va='center', fontsize=8, color='k', fontstyle='italic') axl.annotate("%s %s%02d: %s" % (self.mission.upper(), self._mission.SEASONCHAR, self.season, self.name), xy=(0.5, 0.5), xycoords='axes fraction', ha='center', va='center', fontsize=12, color='k') axl.annotate(self.aperture_name if len(self.neighbors) == 0 else "%s, %d neighbors" % (self.aperture_name, len(self.neighbors)), xy=(0.5, 0.2), xycoords='axes fraction', ha='center', va='center', fontsize=8, color='k', fontstyle='italic') axr.annotate("%s %.3f" % (self._mission.MAGSTRING, self.mag), xy=(0.5, 0.5), xycoords='axes fraction', ha='center', va='center', fontsize=12, color='k') if not np.isnan(self.cdppg) and self.cdppg > 0: axr.annotate(r"GP %.3f ppm" % (self.cdppg), xy=(0.5, 0.2), xycoords='axes fraction', ha='center', va='center', fontsize=8, color='k', fontstyle='italic')
[ "def", "plot_info", "(", "self", ",", "dvs", ")", ":", "axl", ",", "axc", ",", "axr", "=", "dvs", ".", "title", "(", ")", "axc", ".", "annotate", "(", "\"%s %d\"", "%", "(", "self", ".", "_mission", ".", "IDSTRING", ",", "self", ".", "ID", ")", ",", "xy", "=", "(", "0.5", ",", "0.5", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'center'", ",", "va", "=", "'center'", ",", "fontsize", "=", "18", ")", "axc", ".", "annotate", "(", "r\"%.2f ppm $\\rightarrow$ %.2f ppm\"", "%", "(", "self", ".", "cdppr", ",", "self", ".", "cdpp", ")", ",", "xy", "=", "(", "0.5", ",", "0.2", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'center'", ",", "va", "=", "'center'", ",", "fontsize", "=", "8", ",", "color", "=", "'k'", ",", "fontstyle", "=", "'italic'", ")", "axl", ".", "annotate", "(", "\"%s %s%02d: %s\"", "%", "(", "self", ".", "mission", ".", "upper", "(", ")", ",", "self", ".", "_mission", ".", "SEASONCHAR", ",", "self", ".", "season", ",", "self", ".", "name", ")", ",", "xy", "=", "(", "0.5", ",", "0.5", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'center'", ",", "va", "=", "'center'", ",", "fontsize", "=", "12", ",", "color", "=", "'k'", ")", "axl", ".", "annotate", "(", "self", ".", "aperture_name", "if", "len", "(", "self", ".", "neighbors", ")", "==", "0", "else", "\"%s, %d neighbors\"", "%", "(", "self", ".", "aperture_name", ",", "len", "(", "self", ".", "neighbors", ")", ")", ",", "xy", "=", "(", "0.5", ",", "0.2", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'center'", ",", "va", "=", "'center'", ",", "fontsize", "=", "8", ",", "color", "=", "'k'", ",", "fontstyle", "=", "'italic'", ")", "axr", ".", "annotate", "(", "\"%s %.3f\"", "%", "(", "self", ".", "_mission", ".", "MAGSTRING", ",", "self", ".", "mag", ")", ",", "xy", "=", "(", "0.5", ",", "0.5", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'center'", ",", "va", "=", "'center'", ",", "fontsize", "=", "12", ",", "color", "=", "'k'", ")", "if", "not", "np", ".", "isnan", "(", "self", ".", "cdppg", ")", "and", "self", ".", "cdppg", ">", "0", ":", "axr", ".", "annotate", "(", "r\"GP %.3f ppm\"", "%", "(", "self", ".", "cdppg", ")", ",", "xy", "=", "(", "0.5", ",", "0.2", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'center'", ",", "va", "=", "'center'", ",", "fontsize", "=", "8", ",", "color", "=", "'k'", ",", "fontstyle", "=", "'italic'", ")" ]
Plots miscellaneous de-trending information on the data validation summary figure. :param dvs: A :py:class:`dvs.DVS` figure instance
[ "Plots", "miscellaneous", "de", "-", "trending", "information", "on", "the", "data", "validation", "summary", "figure", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/basecamp.py#L329-L372
rodluger/everest
everest/basecamp.py
Basecamp.compute
def compute(self): ''' Compute the model for the current value of lambda. ''' # Is there a transit model? if self.transit_model is not None: return self.compute_joint() log.info('Computing the model...') # Loop over all chunks model = [None for b in self.breakpoints] for b, brkpt in enumerate(self.breakpoints): # Masks for current chunk m = self.get_masked_chunk(b) c = self.get_chunk(b) # This block of the masked covariance matrix mK = GetCovariance(self.kernel, self.kernel_params, self.time[m], self.fraw_err[m]) # Get median med = np.nanmedian(self.fraw[m]) # Normalize the flux f = self.fraw[m] - med # The X^2 matrices A = np.zeros((len(m), len(m))) B = np.zeros((len(c), len(m))) # Loop over all orders for n in range(self.pld_order): # Only compute up to the current PLD order if (self.lam_idx >= n) and (self.lam[b][n] is not None): XM = self.X(n, m) XC = self.X(n, c) A += self.lam[b][n] * np.dot(XM, XM.T) B += self.lam[b][n] * np.dot(XC, XM.T) del XM, XC # Compute the model W = np.linalg.solve(mK + A, f) model[b] = np.dot(B, W) # Free up some memory del A, B, W # Join the chunks after applying the correct offset if len(model) > 1: # First chunk self.model = model[0][:-self.bpad] # Center chunks for m in model[1:-1]: # Join the chunks at the first non-outlier cadence i = 1 while len(self.model) - i in self.mask: i += 1 offset = self.model[-i] - m[self.bpad - i] self.model = np.concatenate( [self.model, m[self.bpad:-self.bpad] + offset]) # Last chunk i = 1 while len(self.model) - i in self.mask: i += 1 offset = self.model[-i] - model[-1][self.bpad - i] self.model = np.concatenate( [self.model, model[-1][self.bpad:] + offset]) else: self.model = model[0] # Subtract the global median self.model -= np.nanmedian(self.model) # Get the CDPP and reset the weights self.cdpp_arr = self.get_cdpp_arr() self.cdpp = self.get_cdpp() self._weights = None
python
def compute(self): ''' Compute the model for the current value of lambda. ''' # Is there a transit model? if self.transit_model is not None: return self.compute_joint() log.info('Computing the model...') # Loop over all chunks model = [None for b in self.breakpoints] for b, brkpt in enumerate(self.breakpoints): # Masks for current chunk m = self.get_masked_chunk(b) c = self.get_chunk(b) # This block of the masked covariance matrix mK = GetCovariance(self.kernel, self.kernel_params, self.time[m], self.fraw_err[m]) # Get median med = np.nanmedian(self.fraw[m]) # Normalize the flux f = self.fraw[m] - med # The X^2 matrices A = np.zeros((len(m), len(m))) B = np.zeros((len(c), len(m))) # Loop over all orders for n in range(self.pld_order): # Only compute up to the current PLD order if (self.lam_idx >= n) and (self.lam[b][n] is not None): XM = self.X(n, m) XC = self.X(n, c) A += self.lam[b][n] * np.dot(XM, XM.T) B += self.lam[b][n] * np.dot(XC, XM.T) del XM, XC # Compute the model W = np.linalg.solve(mK + A, f) model[b] = np.dot(B, W) # Free up some memory del A, B, W # Join the chunks after applying the correct offset if len(model) > 1: # First chunk self.model = model[0][:-self.bpad] # Center chunks for m in model[1:-1]: # Join the chunks at the first non-outlier cadence i = 1 while len(self.model) - i in self.mask: i += 1 offset = self.model[-i] - m[self.bpad - i] self.model = np.concatenate( [self.model, m[self.bpad:-self.bpad] + offset]) # Last chunk i = 1 while len(self.model) - i in self.mask: i += 1 offset = self.model[-i] - model[-1][self.bpad - i] self.model = np.concatenate( [self.model, model[-1][self.bpad:] + offset]) else: self.model = model[0] # Subtract the global median self.model -= np.nanmedian(self.model) # Get the CDPP and reset the weights self.cdpp_arr = self.get_cdpp_arr() self.cdpp = self.get_cdpp() self._weights = None
[ "def", "compute", "(", "self", ")", ":", "# Is there a transit model?", "if", "self", ".", "transit_model", "is", "not", "None", ":", "return", "self", ".", "compute_joint", "(", ")", "log", ".", "info", "(", "'Computing the model...'", ")", "# Loop over all chunks", "model", "=", "[", "None", "for", "b", "in", "self", ".", "breakpoints", "]", "for", "b", ",", "brkpt", "in", "enumerate", "(", "self", ".", "breakpoints", ")", ":", "# Masks for current chunk", "m", "=", "self", ".", "get_masked_chunk", "(", "b", ")", "c", "=", "self", ".", "get_chunk", "(", "b", ")", "# This block of the masked covariance matrix", "mK", "=", "GetCovariance", "(", "self", ".", "kernel", ",", "self", ".", "kernel_params", ",", "self", ".", "time", "[", "m", "]", ",", "self", ".", "fraw_err", "[", "m", "]", ")", "# Get median", "med", "=", "np", ".", "nanmedian", "(", "self", ".", "fraw", "[", "m", "]", ")", "# Normalize the flux", "f", "=", "self", ".", "fraw", "[", "m", "]", "-", "med", "# The X^2 matrices", "A", "=", "np", ".", "zeros", "(", "(", "len", "(", "m", ")", ",", "len", "(", "m", ")", ")", ")", "B", "=", "np", ".", "zeros", "(", "(", "len", "(", "c", ")", ",", "len", "(", "m", ")", ")", ")", "# Loop over all orders", "for", "n", "in", "range", "(", "self", ".", "pld_order", ")", ":", "# Only compute up to the current PLD order", "if", "(", "self", ".", "lam_idx", ">=", "n", ")", "and", "(", "self", ".", "lam", "[", "b", "]", "[", "n", "]", "is", "not", "None", ")", ":", "XM", "=", "self", ".", "X", "(", "n", ",", "m", ")", "XC", "=", "self", ".", "X", "(", "n", ",", "c", ")", "A", "+=", "self", ".", "lam", "[", "b", "]", "[", "n", "]", "*", "np", ".", "dot", "(", "XM", ",", "XM", ".", "T", ")", "B", "+=", "self", ".", "lam", "[", "b", "]", "[", "n", "]", "*", "np", ".", "dot", "(", "XC", ",", "XM", ".", "T", ")", "del", "XM", ",", "XC", "# Compute the model", "W", "=", "np", ".", "linalg", ".", "solve", "(", "mK", "+", "A", ",", "f", ")", "model", "[", "b", "]", "=", "np", ".", "dot", "(", "B", ",", "W", ")", "# Free up some memory", "del", "A", ",", "B", ",", "W", "# Join the chunks after applying the correct offset", "if", "len", "(", "model", ")", ">", "1", ":", "# First chunk", "self", ".", "model", "=", "model", "[", "0", "]", "[", ":", "-", "self", ".", "bpad", "]", "# Center chunks", "for", "m", "in", "model", "[", "1", ":", "-", "1", "]", ":", "# Join the chunks at the first non-outlier cadence", "i", "=", "1", "while", "len", "(", "self", ".", "model", ")", "-", "i", "in", "self", ".", "mask", ":", "i", "+=", "1", "offset", "=", "self", ".", "model", "[", "-", "i", "]", "-", "m", "[", "self", ".", "bpad", "-", "i", "]", "self", ".", "model", "=", "np", ".", "concatenate", "(", "[", "self", ".", "model", ",", "m", "[", "self", ".", "bpad", ":", "-", "self", ".", "bpad", "]", "+", "offset", "]", ")", "# Last chunk", "i", "=", "1", "while", "len", "(", "self", ".", "model", ")", "-", "i", "in", "self", ".", "mask", ":", "i", "+=", "1", "offset", "=", "self", ".", "model", "[", "-", "i", "]", "-", "model", "[", "-", "1", "]", "[", "self", ".", "bpad", "-", "i", "]", "self", ".", "model", "=", "np", ".", "concatenate", "(", "[", "self", ".", "model", ",", "model", "[", "-", "1", "]", "[", "self", ".", "bpad", ":", "]", "+", "offset", "]", ")", "else", ":", "self", ".", "model", "=", "model", "[", "0", "]", "# Subtract the global median", "self", ".", "model", "-=", "np", ".", "nanmedian", "(", "self", ".", "model", ")", "# Get the CDPP and reset the weights", "self", ".", "cdpp_arr", "=", "self", ".", "get_cdpp_arr", "(", ")", "self", ".", "cdpp", "=", "self", ".", "get_cdpp", "(", ")", "self", ".", "_weights", "=", "None" ]
Compute the model for the current value of lambda.
[ "Compute", "the", "model", "for", "the", "current", "value", "of", "lambda", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/basecamp.py#L374-L460
rodluger/everest
everest/basecamp.py
Basecamp.compute_joint
def compute_joint(self): ''' Compute the model in a single step, allowing for a light curve-wide transit model. This is a bit more expensive to compute. ''' # Init log.info('Computing the joint model...') A = [None for b in self.breakpoints] B = [None for b in self.breakpoints] # We need to make sure that we're not masking the transits we are # trying to fit! # NOTE: If there happens to be an index that *SHOULD* be masked during # a transit (cosmic ray, detector anomaly), update `self.badmask` # to include that index. # Bad data points are *never* used in the regression. if self.transit_model is not None: outmask = np.array(self.outmask) transitmask = np.array(self.transitmask) transit_inds = np.where( np.sum([tm(self.time) for tm in self.transit_model], axis=0) < 0)[0] self.outmask = np.array( [i for i in self.outmask if i not in transit_inds]) self.transitmask = np.array( [i for i in self.transitmask if i not in transit_inds]) # Loop over all chunks for b, brkpt in enumerate(self.breakpoints): # Masks for current chunk m = self.get_masked_chunk(b, pad=False) c = self.get_chunk(b, pad=False) # The X^2 matrices A[b] = np.zeros((len(m), len(m))) B[b] = np.zeros((len(c), len(m))) # Loop over all orders for n in range(self.pld_order): # Only compute up to the current PLD order if (self.lam_idx >= n) and (self.lam[b][n] is not None): XM = self.X(n, m) XC = self.X(n, c) A[b] += self.lam[b][n] * np.dot(XM, XM.T) B[b] += self.lam[b][n] * np.dot(XC, XM.T) del XM, XC # Merge chunks. BIGA and BIGB are sparse, but unfortunately # scipy.sparse doesn't handle sparse matrix inversion all that # well when the *result* is not itself sparse. So we're sticking # with regular np.linalg. BIGA = block_diag(*A) del A BIGB = block_diag(*B) del B # Compute the full covariance matrix mK = GetCovariance(self.kernel, self.kernel_params, self.apply_mask( self.time), self.apply_mask(self.fraw_err)) # The normalized, masked flux array f = self.apply_mask(self.fraw) med = np.nanmedian(f) f -= med # Are we computing a joint transit model? if self.transit_model is not None: # Get the unmasked indices m = self.apply_mask() # Subtract off the mean total transit model mean_transit_model = med * \ np.sum([tm.depth * tm(self.time[m]) for tm in self.transit_model], axis=0) f -= mean_transit_model # Now add each transit model to the matrix of regressors for tm in self.transit_model: XM = tm(self.time[m]).reshape(-1, 1) XC = tm(self.time).reshape(-1, 1) BIGA += med ** 2 * tm.var_depth * np.dot(XM, XM.T) BIGB += med ** 2 * tm.var_depth * np.dot(XC, XM.T) del XM, XC # Dot the inverse of the covariance matrix W = np.linalg.solve(mK + BIGA, f) self.model = np.dot(BIGB, W) # Compute the transit weights and maximum likelihood transit model w_trn = med ** 2 * np.concatenate([tm.var_depth * np.dot( tm(self.time[m]).reshape(1, -1), W) for tm in self.transit_model]) self.transit_depth = np.array( [med * tm.depth + w_trn[i] for i, tm in enumerate(self.transit_model)]) / med # Remove the transit prediction from the model self.model -= np.dot(np.hstack([tm(self.time).reshape(-1, 1) for tm in self.transit_model]), w_trn) else: # No transit model to worry about W = np.linalg.solve(mK + BIGA, f) self.model = np.dot(BIGB, W) # Subtract the global median self.model -= np.nanmedian(self.model) # Restore the mask if self.transit_model is not None: self.outmask = outmask self.transitmask = transitmask # Get the CDPP and reset the weights self.cdpp_arr = self.get_cdpp_arr() self.cdpp = self.get_cdpp() self._weights = None
python
def compute_joint(self): ''' Compute the model in a single step, allowing for a light curve-wide transit model. This is a bit more expensive to compute. ''' # Init log.info('Computing the joint model...') A = [None for b in self.breakpoints] B = [None for b in self.breakpoints] # We need to make sure that we're not masking the transits we are # trying to fit! # NOTE: If there happens to be an index that *SHOULD* be masked during # a transit (cosmic ray, detector anomaly), update `self.badmask` # to include that index. # Bad data points are *never* used in the regression. if self.transit_model is not None: outmask = np.array(self.outmask) transitmask = np.array(self.transitmask) transit_inds = np.where( np.sum([tm(self.time) for tm in self.transit_model], axis=0) < 0)[0] self.outmask = np.array( [i for i in self.outmask if i not in transit_inds]) self.transitmask = np.array( [i for i in self.transitmask if i not in transit_inds]) # Loop over all chunks for b, brkpt in enumerate(self.breakpoints): # Masks for current chunk m = self.get_masked_chunk(b, pad=False) c = self.get_chunk(b, pad=False) # The X^2 matrices A[b] = np.zeros((len(m), len(m))) B[b] = np.zeros((len(c), len(m))) # Loop over all orders for n in range(self.pld_order): # Only compute up to the current PLD order if (self.lam_idx >= n) and (self.lam[b][n] is not None): XM = self.X(n, m) XC = self.X(n, c) A[b] += self.lam[b][n] * np.dot(XM, XM.T) B[b] += self.lam[b][n] * np.dot(XC, XM.T) del XM, XC # Merge chunks. BIGA and BIGB are sparse, but unfortunately # scipy.sparse doesn't handle sparse matrix inversion all that # well when the *result* is not itself sparse. So we're sticking # with regular np.linalg. BIGA = block_diag(*A) del A BIGB = block_diag(*B) del B # Compute the full covariance matrix mK = GetCovariance(self.kernel, self.kernel_params, self.apply_mask( self.time), self.apply_mask(self.fraw_err)) # The normalized, masked flux array f = self.apply_mask(self.fraw) med = np.nanmedian(f) f -= med # Are we computing a joint transit model? if self.transit_model is not None: # Get the unmasked indices m = self.apply_mask() # Subtract off the mean total transit model mean_transit_model = med * \ np.sum([tm.depth * tm(self.time[m]) for tm in self.transit_model], axis=0) f -= mean_transit_model # Now add each transit model to the matrix of regressors for tm in self.transit_model: XM = tm(self.time[m]).reshape(-1, 1) XC = tm(self.time).reshape(-1, 1) BIGA += med ** 2 * tm.var_depth * np.dot(XM, XM.T) BIGB += med ** 2 * tm.var_depth * np.dot(XC, XM.T) del XM, XC # Dot the inverse of the covariance matrix W = np.linalg.solve(mK + BIGA, f) self.model = np.dot(BIGB, W) # Compute the transit weights and maximum likelihood transit model w_trn = med ** 2 * np.concatenate([tm.var_depth * np.dot( tm(self.time[m]).reshape(1, -1), W) for tm in self.transit_model]) self.transit_depth = np.array( [med * tm.depth + w_trn[i] for i, tm in enumerate(self.transit_model)]) / med # Remove the transit prediction from the model self.model -= np.dot(np.hstack([tm(self.time).reshape(-1, 1) for tm in self.transit_model]), w_trn) else: # No transit model to worry about W = np.linalg.solve(mK + BIGA, f) self.model = np.dot(BIGB, W) # Subtract the global median self.model -= np.nanmedian(self.model) # Restore the mask if self.transit_model is not None: self.outmask = outmask self.transitmask = transitmask # Get the CDPP and reset the weights self.cdpp_arr = self.get_cdpp_arr() self.cdpp = self.get_cdpp() self._weights = None
[ "def", "compute_joint", "(", "self", ")", ":", "# Init", "log", ".", "info", "(", "'Computing the joint model...'", ")", "A", "=", "[", "None", "for", "b", "in", "self", ".", "breakpoints", "]", "B", "=", "[", "None", "for", "b", "in", "self", ".", "breakpoints", "]", "# We need to make sure that we're not masking the transits we are", "# trying to fit!", "# NOTE: If there happens to be an index that *SHOULD* be masked during", "# a transit (cosmic ray, detector anomaly), update `self.badmask`", "# to include that index.", "# Bad data points are *never* used in the regression.", "if", "self", ".", "transit_model", "is", "not", "None", ":", "outmask", "=", "np", ".", "array", "(", "self", ".", "outmask", ")", "transitmask", "=", "np", ".", "array", "(", "self", ".", "transitmask", ")", "transit_inds", "=", "np", ".", "where", "(", "np", ".", "sum", "(", "[", "tm", "(", "self", ".", "time", ")", "for", "tm", "in", "self", ".", "transit_model", "]", ",", "axis", "=", "0", ")", "<", "0", ")", "[", "0", "]", "self", ".", "outmask", "=", "np", ".", "array", "(", "[", "i", "for", "i", "in", "self", ".", "outmask", "if", "i", "not", "in", "transit_inds", "]", ")", "self", ".", "transitmask", "=", "np", ".", "array", "(", "[", "i", "for", "i", "in", "self", ".", "transitmask", "if", "i", "not", "in", "transit_inds", "]", ")", "# Loop over all chunks", "for", "b", ",", "brkpt", "in", "enumerate", "(", "self", ".", "breakpoints", ")", ":", "# Masks for current chunk", "m", "=", "self", ".", "get_masked_chunk", "(", "b", ",", "pad", "=", "False", ")", "c", "=", "self", ".", "get_chunk", "(", "b", ",", "pad", "=", "False", ")", "# The X^2 matrices", "A", "[", "b", "]", "=", "np", ".", "zeros", "(", "(", "len", "(", "m", ")", ",", "len", "(", "m", ")", ")", ")", "B", "[", "b", "]", "=", "np", ".", "zeros", "(", "(", "len", "(", "c", ")", ",", "len", "(", "m", ")", ")", ")", "# Loop over all orders", "for", "n", "in", "range", "(", "self", ".", "pld_order", ")", ":", "# Only compute up to the current PLD order", "if", "(", "self", ".", "lam_idx", ">=", "n", ")", "and", "(", "self", ".", "lam", "[", "b", "]", "[", "n", "]", "is", "not", "None", ")", ":", "XM", "=", "self", ".", "X", "(", "n", ",", "m", ")", "XC", "=", "self", ".", "X", "(", "n", ",", "c", ")", "A", "[", "b", "]", "+=", "self", ".", "lam", "[", "b", "]", "[", "n", "]", "*", "np", ".", "dot", "(", "XM", ",", "XM", ".", "T", ")", "B", "[", "b", "]", "+=", "self", ".", "lam", "[", "b", "]", "[", "n", "]", "*", "np", ".", "dot", "(", "XC", ",", "XM", ".", "T", ")", "del", "XM", ",", "XC", "# Merge chunks. BIGA and BIGB are sparse, but unfortunately", "# scipy.sparse doesn't handle sparse matrix inversion all that", "# well when the *result* is not itself sparse. So we're sticking", "# with regular np.linalg.", "BIGA", "=", "block_diag", "(", "*", "A", ")", "del", "A", "BIGB", "=", "block_diag", "(", "*", "B", ")", "del", "B", "# Compute the full covariance matrix", "mK", "=", "GetCovariance", "(", "self", ".", "kernel", ",", "self", ".", "kernel_params", ",", "self", ".", "apply_mask", "(", "self", ".", "time", ")", ",", "self", ".", "apply_mask", "(", "self", ".", "fraw_err", ")", ")", "# The normalized, masked flux array", "f", "=", "self", ".", "apply_mask", "(", "self", ".", "fraw", ")", "med", "=", "np", ".", "nanmedian", "(", "f", ")", "f", "-=", "med", "# Are we computing a joint transit model?", "if", "self", ".", "transit_model", "is", "not", "None", ":", "# Get the unmasked indices", "m", "=", "self", ".", "apply_mask", "(", ")", "# Subtract off the mean total transit model", "mean_transit_model", "=", "med", "*", "np", ".", "sum", "(", "[", "tm", ".", "depth", "*", "tm", "(", "self", ".", "time", "[", "m", "]", ")", "for", "tm", "in", "self", ".", "transit_model", "]", ",", "axis", "=", "0", ")", "f", "-=", "mean_transit_model", "# Now add each transit model to the matrix of regressors", "for", "tm", "in", "self", ".", "transit_model", ":", "XM", "=", "tm", "(", "self", ".", "time", "[", "m", "]", ")", ".", "reshape", "(", "-", "1", ",", "1", ")", "XC", "=", "tm", "(", "self", ".", "time", ")", ".", "reshape", "(", "-", "1", ",", "1", ")", "BIGA", "+=", "med", "**", "2", "*", "tm", ".", "var_depth", "*", "np", ".", "dot", "(", "XM", ",", "XM", ".", "T", ")", "BIGB", "+=", "med", "**", "2", "*", "tm", ".", "var_depth", "*", "np", ".", "dot", "(", "XC", ",", "XM", ".", "T", ")", "del", "XM", ",", "XC", "# Dot the inverse of the covariance matrix", "W", "=", "np", ".", "linalg", ".", "solve", "(", "mK", "+", "BIGA", ",", "f", ")", "self", ".", "model", "=", "np", ".", "dot", "(", "BIGB", ",", "W", ")", "# Compute the transit weights and maximum likelihood transit model", "w_trn", "=", "med", "**", "2", "*", "np", ".", "concatenate", "(", "[", "tm", ".", "var_depth", "*", "np", ".", "dot", "(", "tm", "(", "self", ".", "time", "[", "m", "]", ")", ".", "reshape", "(", "1", ",", "-", "1", ")", ",", "W", ")", "for", "tm", "in", "self", ".", "transit_model", "]", ")", "self", ".", "transit_depth", "=", "np", ".", "array", "(", "[", "med", "*", "tm", ".", "depth", "+", "w_trn", "[", "i", "]", "for", "i", ",", "tm", "in", "enumerate", "(", "self", ".", "transit_model", ")", "]", ")", "/", "med", "# Remove the transit prediction from the model", "self", ".", "model", "-=", "np", ".", "dot", "(", "np", ".", "hstack", "(", "[", "tm", "(", "self", ".", "time", ")", ".", "reshape", "(", "-", "1", ",", "1", ")", "for", "tm", "in", "self", ".", "transit_model", "]", ")", ",", "w_trn", ")", "else", ":", "# No transit model to worry about", "W", "=", "np", ".", "linalg", ".", "solve", "(", "mK", "+", "BIGA", ",", "f", ")", "self", ".", "model", "=", "np", ".", "dot", "(", "BIGB", ",", "W", ")", "# Subtract the global median", "self", ".", "model", "-=", "np", ".", "nanmedian", "(", "self", ".", "model", ")", "# Restore the mask", "if", "self", ".", "transit_model", "is", "not", "None", ":", "self", ".", "outmask", "=", "outmask", "self", ".", "transitmask", "=", "transitmask", "# Get the CDPP and reset the weights", "self", ".", "cdpp_arr", "=", "self", ".", "get_cdpp_arr", "(", ")", "self", ".", "cdpp", "=", "self", ".", "get_cdpp", "(", ")", "self", ".", "_weights", "=", "None" ]
Compute the model in a single step, allowing for a light curve-wide transit model. This is a bit more expensive to compute.
[ "Compute", "the", "model", "in", "a", "single", "step", "allowing", "for", "a", "light", "curve", "-", "wide", "transit", "model", ".", "This", "is", "a", "bit", "more", "expensive", "to", "compute", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/basecamp.py#L462-L585
rodluger/everest
everest/basecamp.py
Basecamp.apply_mask
def apply_mask(self, x=None): ''' Returns the outlier mask, an array of indices corresponding to the non-outliers. :param numpy.ndarray x: If specified, returns the masked version of \ :py:obj:`x` instead. Default :py:obj:`None` ''' if x is None: return np.delete(np.arange(len(self.time)), self.mask) else: return np.delete(x, self.mask, axis=0)
python
def apply_mask(self, x=None): ''' Returns the outlier mask, an array of indices corresponding to the non-outliers. :param numpy.ndarray x: If specified, returns the masked version of \ :py:obj:`x` instead. Default :py:obj:`None` ''' if x is None: return np.delete(np.arange(len(self.time)), self.mask) else: return np.delete(x, self.mask, axis=0)
[ "def", "apply_mask", "(", "self", ",", "x", "=", "None", ")", ":", "if", "x", "is", "None", ":", "return", "np", ".", "delete", "(", "np", ".", "arange", "(", "len", "(", "self", ".", "time", ")", ")", ",", "self", ".", "mask", ")", "else", ":", "return", "np", ".", "delete", "(", "x", ",", "self", ".", "mask", ",", "axis", "=", "0", ")" ]
Returns the outlier mask, an array of indices corresponding to the non-outliers. :param numpy.ndarray x: If specified, returns the masked version of \ :py:obj:`x` instead. Default :py:obj:`None`
[ "Returns", "the", "outlier", "mask", "an", "array", "of", "indices", "corresponding", "to", "the", "non", "-", "outliers", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/basecamp.py#L587-L600
rodluger/everest
everest/basecamp.py
Basecamp.get_chunk
def get_chunk(self, b, x=None, pad=True): ''' Returns the indices corresponding to a given light curve chunk. :param int b: The index of the chunk to return :param numpy.ndarray x: If specified, applies the mask to array \ :py:obj:`x`. Default :py:obj:`None` ''' M = np.arange(len(self.time)) if b > 0: res = M[(M > self.breakpoints[b - 1] - int(pad) * self.bpad) & (M <= self.breakpoints[b] + int(pad) * self.bpad)] else: res = M[M <= self.breakpoints[b] + int(pad) * self.bpad] if x is None: return res else: return x[res]
python
def get_chunk(self, b, x=None, pad=True): ''' Returns the indices corresponding to a given light curve chunk. :param int b: The index of the chunk to return :param numpy.ndarray x: If specified, applies the mask to array \ :py:obj:`x`. Default :py:obj:`None` ''' M = np.arange(len(self.time)) if b > 0: res = M[(M > self.breakpoints[b - 1] - int(pad) * self.bpad) & (M <= self.breakpoints[b] + int(pad) * self.bpad)] else: res = M[M <= self.breakpoints[b] + int(pad) * self.bpad] if x is None: return res else: return x[res]
[ "def", "get_chunk", "(", "self", ",", "b", ",", "x", "=", "None", ",", "pad", "=", "True", ")", ":", "M", "=", "np", ".", "arange", "(", "len", "(", "self", ".", "time", ")", ")", "if", "b", ">", "0", ":", "res", "=", "M", "[", "(", "M", ">", "self", ".", "breakpoints", "[", "b", "-", "1", "]", "-", "int", "(", "pad", ")", "*", "self", ".", "bpad", ")", "&", "(", "M", "<=", "self", ".", "breakpoints", "[", "b", "]", "+", "int", "(", "pad", ")", "*", "self", ".", "bpad", ")", "]", "else", ":", "res", "=", "M", "[", "M", "<=", "self", ".", "breakpoints", "[", "b", "]", "+", "int", "(", "pad", ")", "*", "self", ".", "bpad", "]", "if", "x", "is", "None", ":", "return", "res", "else", ":", "return", "x", "[", "res", "]" ]
Returns the indices corresponding to a given light curve chunk. :param int b: The index of the chunk to return :param numpy.ndarray x: If specified, applies the mask to array \ :py:obj:`x`. Default :py:obj:`None`
[ "Returns", "the", "indices", "corresponding", "to", "a", "given", "light", "curve", "chunk", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/basecamp.py#L602-L621
rodluger/everest
everest/basecamp.py
Basecamp.get_weights
def get_weights(self): ''' Computes the PLD weights vector :py:obj:`w`. ..warning :: Deprecated and not thoroughly tested. ''' log.info("Computing PLD weights...") # Loop over all chunks weights = [None for i in range(len(self.breakpoints))] for b, brkpt in enumerate(self.breakpoints): # Masks for current chunk m = self.get_masked_chunk(b) c = self.get_chunk(b) # This block of the masked covariance matrix _mK = GetCovariance(self.kernel, self.kernel_params, self.time[m], self.fraw_err[m]) # This chunk of the normalized flux f = self.fraw[m] - np.nanmedian(self.fraw) # Loop over all orders _A = [None for i in range(self.pld_order)] for n in range(self.pld_order): if self.lam_idx >= n: X = self.X(n, m) _A[n] = np.dot(X, X.T) del X # Compute the weights A = np.sum([l * a for l, a in zip(self.lam[b], _A) if l is not None], axis=0) W = np.linalg.solve(_mK + A, f) weights[b] = [l * np.dot(self.X(n, m).T, W) for n, l in enumerate(self.lam[b]) if l is not None] self._weights = weights
python
def get_weights(self): ''' Computes the PLD weights vector :py:obj:`w`. ..warning :: Deprecated and not thoroughly tested. ''' log.info("Computing PLD weights...") # Loop over all chunks weights = [None for i in range(len(self.breakpoints))] for b, brkpt in enumerate(self.breakpoints): # Masks for current chunk m = self.get_masked_chunk(b) c = self.get_chunk(b) # This block of the masked covariance matrix _mK = GetCovariance(self.kernel, self.kernel_params, self.time[m], self.fraw_err[m]) # This chunk of the normalized flux f = self.fraw[m] - np.nanmedian(self.fraw) # Loop over all orders _A = [None for i in range(self.pld_order)] for n in range(self.pld_order): if self.lam_idx >= n: X = self.X(n, m) _A[n] = np.dot(X, X.T) del X # Compute the weights A = np.sum([l * a for l, a in zip(self.lam[b], _A) if l is not None], axis=0) W = np.linalg.solve(_mK + A, f) weights[b] = [l * np.dot(self.X(n, m).T, W) for n, l in enumerate(self.lam[b]) if l is not None] self._weights = weights
[ "def", "get_weights", "(", "self", ")", ":", "log", ".", "info", "(", "\"Computing PLD weights...\"", ")", "# Loop over all chunks", "weights", "=", "[", "None", "for", "i", "in", "range", "(", "len", "(", "self", ".", "breakpoints", ")", ")", "]", "for", "b", ",", "brkpt", "in", "enumerate", "(", "self", ".", "breakpoints", ")", ":", "# Masks for current chunk", "m", "=", "self", ".", "get_masked_chunk", "(", "b", ")", "c", "=", "self", ".", "get_chunk", "(", "b", ")", "# This block of the masked covariance matrix", "_mK", "=", "GetCovariance", "(", "self", ".", "kernel", ",", "self", ".", "kernel_params", ",", "self", ".", "time", "[", "m", "]", ",", "self", ".", "fraw_err", "[", "m", "]", ")", "# This chunk of the normalized flux", "f", "=", "self", ".", "fraw", "[", "m", "]", "-", "np", ".", "nanmedian", "(", "self", ".", "fraw", ")", "# Loop over all orders", "_A", "=", "[", "None", "for", "i", "in", "range", "(", "self", ".", "pld_order", ")", "]", "for", "n", "in", "range", "(", "self", ".", "pld_order", ")", ":", "if", "self", ".", "lam_idx", ">=", "n", ":", "X", "=", "self", ".", "X", "(", "n", ",", "m", ")", "_A", "[", "n", "]", "=", "np", ".", "dot", "(", "X", ",", "X", ".", "T", ")", "del", "X", "# Compute the weights", "A", "=", "np", ".", "sum", "(", "[", "l", "*", "a", "for", "l", ",", "a", "in", "zip", "(", "self", ".", "lam", "[", "b", "]", ",", "_A", ")", "if", "l", "is", "not", "None", "]", ",", "axis", "=", "0", ")", "W", "=", "np", ".", "linalg", ".", "solve", "(", "_mK", "+", "A", ",", "f", ")", "weights", "[", "b", "]", "=", "[", "l", "*", "np", ".", "dot", "(", "self", ".", "X", "(", "n", ",", "m", ")", ".", "T", ",", "W", ")", "for", "n", ",", "l", "in", "enumerate", "(", "self", ".", "lam", "[", "b", "]", ")", "if", "l", "is", "not", "None", "]", "self", ".", "_weights", "=", "weights" ]
Computes the PLD weights vector :py:obj:`w`. ..warning :: Deprecated and not thoroughly tested.
[ "Computes", "the", "PLD", "weights", "vector", ":", "py", ":", "obj", ":", "w", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/basecamp.py#L643-L683
rodluger/everest
everest/basecamp.py
Basecamp.get_cdpp_arr
def get_cdpp_arr(self, flux=None): ''' Returns the CDPP value in *ppm* for each of the chunks in the light curve. ''' if flux is None: flux = self.flux return np.array([self._mission.CDPP(flux[self.get_masked_chunk(b)], cadence=self.cadence) for b, _ in enumerate(self.breakpoints)])
python
def get_cdpp_arr(self, flux=None): ''' Returns the CDPP value in *ppm* for each of the chunks in the light curve. ''' if flux is None: flux = self.flux return np.array([self._mission.CDPP(flux[self.get_masked_chunk(b)], cadence=self.cadence) for b, _ in enumerate(self.breakpoints)])
[ "def", "get_cdpp_arr", "(", "self", ",", "flux", "=", "None", ")", ":", "if", "flux", "is", "None", ":", "flux", "=", "self", ".", "flux", "return", "np", ".", "array", "(", "[", "self", ".", "_mission", ".", "CDPP", "(", "flux", "[", "self", ".", "get_masked_chunk", "(", "b", ")", "]", ",", "cadence", "=", "self", ".", "cadence", ")", "for", "b", ",", "_", "in", "enumerate", "(", "self", ".", "breakpoints", ")", "]", ")" ]
Returns the CDPP value in *ppm* for each of the chunks in the light curve.
[ "Returns", "the", "CDPP", "value", "in", "*", "ppm", "*", "for", "each", "of", "the", "chunks", "in", "the", "light", "curve", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/basecamp.py#L685-L696
rodluger/everest
everest/basecamp.py
Basecamp.get_cdpp
def get_cdpp(self, flux=None): ''' Returns the scalar CDPP for the light curve. ''' if flux is None: flux = self.flux return self._mission.CDPP(self.apply_mask(flux), cadence=self.cadence)
python
def get_cdpp(self, flux=None): ''' Returns the scalar CDPP for the light curve. ''' if flux is None: flux = self.flux return self._mission.CDPP(self.apply_mask(flux), cadence=self.cadence)
[ "def", "get_cdpp", "(", "self", ",", "flux", "=", "None", ")", ":", "if", "flux", "is", "None", ":", "flux", "=", "self", ".", "flux", "return", "self", ".", "_mission", ".", "CDPP", "(", "self", ".", "apply_mask", "(", "flux", ")", ",", "cadence", "=", "self", ".", "cadence", ")" ]
Returns the scalar CDPP for the light curve.
[ "Returns", "the", "scalar", "CDPP", "for", "the", "light", "curve", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/basecamp.py#L698-L706
rodluger/everest
everest/basecamp.py
Basecamp.plot_aperture
def plot_aperture(self, axes, labelsize=8): ''' Plots the aperture and the pixel images at the beginning, middle, and end of the time series. Also plots a high resolution image of the target, if available. ''' log.info('Plotting the aperture...') # Get colormap plasma = pl.get_cmap('plasma') plasma.set_bad(alpha=0) # Get aperture contour def PadWithZeros(vector, pad_width, iaxis, kwargs): vector[:pad_width[0]] = 0 vector[-pad_width[1]:] = 0 return vector ny, nx = self.pixel_images[0].shape contour = np.zeros((ny, nx)) contour[np.where(self.aperture)] = 1 contour = np.lib.pad(contour, 1, PadWithZeros) highres = zoom(contour, 100, order=0, mode='nearest') extent = np.array([-1, nx, -1, ny]) # Plot first, mid, and last TPF image title = ['start', 'mid', 'end'] for i, image in enumerate(self.pixel_images): ax = axes[i] ax.imshow(image, aspect='auto', interpolation='nearest', cmap=plasma) ax.contour(highres, levels=[0.5], extent=extent, origin='lower', colors='r', linewidths=1) # Check for saturated columns for x in range(self.aperture.shape[0]): for y in range(self.aperture.shape[1]): if self.aperture[x][y] == AP_SATURATED_PIXEL: ax.fill([y - 0.5, y + 0.5, y + 0.5, y - 0.5], [x - 0.5, x - 0.5, x + 0.5, x + 0.5], fill=False, hatch='xxxxx', color='r', lw=0) ax.axis('off') ax.set_xlim(-0.7, nx - 0.3) ax.set_ylim(-0.7, ny - 0.3) ax.annotate(title[i], xy=(0.5, 0.975), xycoords='axes fraction', ha='center', va='top', size=labelsize, color='w') if i == 1: for source in self.nearby: ax.annotate('%.1f' % source['mag'], xy=(source['x'] - source['x0'], source['y'] - source['y0']), ha='center', va='center', size=labelsize - 2, color='w', fontweight='bold') # Plot hi res image if self.hires is not None: ax = axes[-1] ax.imshow(self.hires, aspect='auto', extent=(-0.5, nx - 0.5, -0.5, ny - 0.5), interpolation='bicubic', cmap=plasma) ax.contour(highres, levels=[0.5], extent=extent, origin='lower', colors='r', linewidths=1) ax.axis('off') ax.set_xlim(-0.7, nx - 0.3) ax.set_ylim(-0.7, ny - 0.3) ax.annotate('hires', xy=(0.5, 0.975), xycoords='axes fraction', ha='center', va='top', size=labelsize, color='w') else: ax = axes[-1] ax.axis('off')
python
def plot_aperture(self, axes, labelsize=8): ''' Plots the aperture and the pixel images at the beginning, middle, and end of the time series. Also plots a high resolution image of the target, if available. ''' log.info('Plotting the aperture...') # Get colormap plasma = pl.get_cmap('plasma') plasma.set_bad(alpha=0) # Get aperture contour def PadWithZeros(vector, pad_width, iaxis, kwargs): vector[:pad_width[0]] = 0 vector[-pad_width[1]:] = 0 return vector ny, nx = self.pixel_images[0].shape contour = np.zeros((ny, nx)) contour[np.where(self.aperture)] = 1 contour = np.lib.pad(contour, 1, PadWithZeros) highres = zoom(contour, 100, order=0, mode='nearest') extent = np.array([-1, nx, -1, ny]) # Plot first, mid, and last TPF image title = ['start', 'mid', 'end'] for i, image in enumerate(self.pixel_images): ax = axes[i] ax.imshow(image, aspect='auto', interpolation='nearest', cmap=plasma) ax.contour(highres, levels=[0.5], extent=extent, origin='lower', colors='r', linewidths=1) # Check for saturated columns for x in range(self.aperture.shape[0]): for y in range(self.aperture.shape[1]): if self.aperture[x][y] == AP_SATURATED_PIXEL: ax.fill([y - 0.5, y + 0.5, y + 0.5, y - 0.5], [x - 0.5, x - 0.5, x + 0.5, x + 0.5], fill=False, hatch='xxxxx', color='r', lw=0) ax.axis('off') ax.set_xlim(-0.7, nx - 0.3) ax.set_ylim(-0.7, ny - 0.3) ax.annotate(title[i], xy=(0.5, 0.975), xycoords='axes fraction', ha='center', va='top', size=labelsize, color='w') if i == 1: for source in self.nearby: ax.annotate('%.1f' % source['mag'], xy=(source['x'] - source['x0'], source['y'] - source['y0']), ha='center', va='center', size=labelsize - 2, color='w', fontweight='bold') # Plot hi res image if self.hires is not None: ax = axes[-1] ax.imshow(self.hires, aspect='auto', extent=(-0.5, nx - 0.5, -0.5, ny - 0.5), interpolation='bicubic', cmap=plasma) ax.contour(highres, levels=[0.5], extent=extent, origin='lower', colors='r', linewidths=1) ax.axis('off') ax.set_xlim(-0.7, nx - 0.3) ax.set_ylim(-0.7, ny - 0.3) ax.annotate('hires', xy=(0.5, 0.975), xycoords='axes fraction', ha='center', va='top', size=labelsize, color='w') else: ax = axes[-1] ax.axis('off')
[ "def", "plot_aperture", "(", "self", ",", "axes", ",", "labelsize", "=", "8", ")", ":", "log", ".", "info", "(", "'Plotting the aperture...'", ")", "# Get colormap", "plasma", "=", "pl", ".", "get_cmap", "(", "'plasma'", ")", "plasma", ".", "set_bad", "(", "alpha", "=", "0", ")", "# Get aperture contour", "def", "PadWithZeros", "(", "vector", ",", "pad_width", ",", "iaxis", ",", "kwargs", ")", ":", "vector", "[", ":", "pad_width", "[", "0", "]", "]", "=", "0", "vector", "[", "-", "pad_width", "[", "1", "]", ":", "]", "=", "0", "return", "vector", "ny", ",", "nx", "=", "self", ".", "pixel_images", "[", "0", "]", ".", "shape", "contour", "=", "np", ".", "zeros", "(", "(", "ny", ",", "nx", ")", ")", "contour", "[", "np", ".", "where", "(", "self", ".", "aperture", ")", "]", "=", "1", "contour", "=", "np", ".", "lib", ".", "pad", "(", "contour", ",", "1", ",", "PadWithZeros", ")", "highres", "=", "zoom", "(", "contour", ",", "100", ",", "order", "=", "0", ",", "mode", "=", "'nearest'", ")", "extent", "=", "np", ".", "array", "(", "[", "-", "1", ",", "nx", ",", "-", "1", ",", "ny", "]", ")", "# Plot first, mid, and last TPF image", "title", "=", "[", "'start'", ",", "'mid'", ",", "'end'", "]", "for", "i", ",", "image", "in", "enumerate", "(", "self", ".", "pixel_images", ")", ":", "ax", "=", "axes", "[", "i", "]", "ax", ".", "imshow", "(", "image", ",", "aspect", "=", "'auto'", ",", "interpolation", "=", "'nearest'", ",", "cmap", "=", "plasma", ")", "ax", ".", "contour", "(", "highres", ",", "levels", "=", "[", "0.5", "]", ",", "extent", "=", "extent", ",", "origin", "=", "'lower'", ",", "colors", "=", "'r'", ",", "linewidths", "=", "1", ")", "# Check for saturated columns", "for", "x", "in", "range", "(", "self", ".", "aperture", ".", "shape", "[", "0", "]", ")", ":", "for", "y", "in", "range", "(", "self", ".", "aperture", ".", "shape", "[", "1", "]", ")", ":", "if", "self", ".", "aperture", "[", "x", "]", "[", "y", "]", "==", "AP_SATURATED_PIXEL", ":", "ax", ".", "fill", "(", "[", "y", "-", "0.5", ",", "y", "+", "0.5", ",", "y", "+", "0.5", ",", "y", "-", "0.5", "]", ",", "[", "x", "-", "0.5", ",", "x", "-", "0.5", ",", "x", "+", "0.5", ",", "x", "+", "0.5", "]", ",", "fill", "=", "False", ",", "hatch", "=", "'xxxxx'", ",", "color", "=", "'r'", ",", "lw", "=", "0", ")", "ax", ".", "axis", "(", "'off'", ")", "ax", ".", "set_xlim", "(", "-", "0.7", ",", "nx", "-", "0.3", ")", "ax", ".", "set_ylim", "(", "-", "0.7", ",", "ny", "-", "0.3", ")", "ax", ".", "annotate", "(", "title", "[", "i", "]", ",", "xy", "=", "(", "0.5", ",", "0.975", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'center'", ",", "va", "=", "'top'", ",", "size", "=", "labelsize", ",", "color", "=", "'w'", ")", "if", "i", "==", "1", ":", "for", "source", "in", "self", ".", "nearby", ":", "ax", ".", "annotate", "(", "'%.1f'", "%", "source", "[", "'mag'", "]", ",", "xy", "=", "(", "source", "[", "'x'", "]", "-", "source", "[", "'x0'", "]", ",", "source", "[", "'y'", "]", "-", "source", "[", "'y0'", "]", ")", ",", "ha", "=", "'center'", ",", "va", "=", "'center'", ",", "size", "=", "labelsize", "-", "2", ",", "color", "=", "'w'", ",", "fontweight", "=", "'bold'", ")", "# Plot hi res image", "if", "self", ".", "hires", "is", "not", "None", ":", "ax", "=", "axes", "[", "-", "1", "]", "ax", ".", "imshow", "(", "self", ".", "hires", ",", "aspect", "=", "'auto'", ",", "extent", "=", "(", "-", "0.5", ",", "nx", "-", "0.5", ",", "-", "0.5", ",", "ny", "-", "0.5", ")", ",", "interpolation", "=", "'bicubic'", ",", "cmap", "=", "plasma", ")", "ax", ".", "contour", "(", "highres", ",", "levels", "=", "[", "0.5", "]", ",", "extent", "=", "extent", ",", "origin", "=", "'lower'", ",", "colors", "=", "'r'", ",", "linewidths", "=", "1", ")", "ax", ".", "axis", "(", "'off'", ")", "ax", ".", "set_xlim", "(", "-", "0.7", ",", "nx", "-", "0.3", ")", "ax", ".", "set_ylim", "(", "-", "0.7", ",", "ny", "-", "0.3", ")", "ax", ".", "annotate", "(", "'hires'", ",", "xy", "=", "(", "0.5", ",", "0.975", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'center'", ",", "va", "=", "'top'", ",", "size", "=", "labelsize", ",", "color", "=", "'w'", ")", "else", ":", "ax", "=", "axes", "[", "-", "1", "]", "ax", ".", "axis", "(", "'off'", ")" ]
Plots the aperture and the pixel images at the beginning, middle, and end of the time series. Also plots a high resolution image of the target, if available.
[ "Plots", "the", "aperture", "and", "the", "pixel", "images", "at", "the", "beginning", "middle", "and", "end", "of", "the", "time", "series", ".", "Also", "plots", "a", "high", "resolution", "image", "of", "the", "target", "if", "available", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/basecamp.py#L708-L779
rodluger/everest
everest/basecamp.py
Basecamp.overfit
def overfit(self, tau=None, plot=True, clobber=False, w=9, **kwargs): r""" Compute the masked & unmasked overfitting metrics for the light curve. This routine injects a transit model given by `tau` at every cadence in the light curve and recovers the transit depth when (1) leaving the transit unmasked and (2) masking the transit prior to performing regression. :param tau: A function or callable that accepts two arguments, \ `time` and `t0`, and returns an array corresponding to a \ zero-mean, unit depth transit model centered at \ `t0` and evaluated at `time`. \ The easiest way to provide this is to use an instance of \ :py:class:`everest.transit.TransitShape`. Default is \ :py:class:`everest.transit.TransitShape(dur=0.1)`, a transit \ with solar-like limb darkening and a duratio of 0.1 days. :param bool plot: Plot the results as a PDF? Default :py:obj:`True` :param bool clobber: Overwrite the results if present? Default \ :py:obj:`False` :param int w: The size of the masking window in cadences for \ computing the masked overfitting metric. Default `9` \ (about 4.5 hours for `K2` long cadence). :returns: An instance of `everest.basecamp.Overfitting`. """ fname = os.path.join(self.dir, self.name + '_overfit.npz') figname = os.path.join(self.dir, self.name) # Compute if not os.path.exists(fname) or clobber: # Baseline med = np.nanmedian(self.fraw) # Default transit model if tau is None: tau = TransitShape(dur=0.1) # The overfitting metrics O1 = [None for brkpt in self.breakpoints] O2 = [None for brkpt in self.breakpoints] O3 = [None for brkpt in self.breakpoints] O4 = [None for brkpt in self.breakpoints] O5 = [None for brkpt in self.breakpoints] # Loop over all chunks for b, brkpt in enumerate(self.breakpoints): # Masks for current chunk m = self.get_masked_chunk(b, pad=False) time = self.time[m] ferr = self.fraw_err[m] / med y = self.fraw[m] / med - 1 # The metrics we're computing here O1[b] = np.zeros(len(y)) * np.nan O2[b] = np.zeros(len(y)) * np.nan O3[b] = np.zeros(len(y)) * np.nan O4[b] = np.zeros(len(y)) * np.nan O5[b] = np.zeros(len(y)) * np.nan # Compute the astrophysical covariance and its inverse log.info("Computing the covariance...") if self.kernel == 'Basic': wh, am, ta = self.kernel_params wh /= med am /= med kernel_params = [wh, am, ta] elif self.kernel == 'QuasiPeriodic': wh, am, ga, pe = self.kernel_params wh /= med am /= med kernel_params = [wh, am, ga, pe] K = GetCovariance(self.kernel, kernel_params, time, ferr) Kinv = cho_solve((cholesky(K), False), np.eye(len(time))) # Loop over all orders log.info("Computing some large matrices...") X = [None for n in range(self.pld_order)] XL = [None for n in range(self.pld_order)] XLX = [None for n in range(self.pld_order)] for n in range(self.pld_order): if (self.lam_idx >= n) and (self.lam[b][n] is not None): X[n] = self.X(n, m, **kwargs) XL[n] = (self.lam[b][n] / med ** 2) * X[n] XLX[n] = np.dot(XL[n], X[n].T) X = np.hstack(X) XL = np.hstack(XL) XLX = np.sum(XLX, axis=0) # The full covariance C = XLX + K # The unmasked linear problem log.info("Solving the unmasked linear problem...") m = np.dot(XLX, np.linalg.solve(C, y)) m -= np.nanmedian(m) f = y - m R = np.linalg.solve(C, XLX.T).T # The masked linear problem log.info("Solving the masked linear problem...") A = MaskSolve(C, y, w=w) # Now loop through and compute the metric log.info("Computing the overfitting metrics...") for n in prange(len(y)): # # *** Unmasked overfitting metric *** # # Evaluate the sparse transit model TAU = tau(time, t0=time[n]) i = np.where(TAU < 0)[0] TAU = TAU.reshape(-1, 1) # Fast sparse algebra AA = np.dot(np.dot(TAU[i].T, Kinv[i, :][:, i]), TAU[i]) BB = np.dot(TAU[i].T, Kinv[i, :]) CC = TAU - np.dot(R[:, i], TAU[i]) O1[b][n] = AA O2[b][n] = np.dot(BB, CC) O3[b][n] = np.dot(BB, f) O4[b][n] = np.dot(BB, y) # # *** Masked overfitting metric *** # # The current mask and mask centerpoint mask = np.arange(n, n + w) j = n + (w + 1) // 2 - 1 if j >= len(y) - w: continue # The regularized design matrix # This is the same as # XLmX[:, n - 1] = \ # np.dot(XL, np.delete(X, mask, axis=0).T)[:, n - 1] if n == 0: XLmX = np.dot(XL, np.delete(X, mask, axis=0).T) else: XLmX[:, n - 1] = np.dot(XL, X[n - 1, :].T) # The linear solution to this step m = np.dot(XLmX, A[n]) # Evaluate the sparse transit model TAU = tau(time, t0=time[j]) i = np.where(TAU < 0)[0] TAU = TAU[i].reshape(-1, 1) # Dot the transit model in den = np.dot(np.dot(TAU.T, Kinv[i, :][:, i]), TAU) num = np.dot(TAU.T, Kinv[i, :]) # Compute the overfitting metric # Divide this number by a depth # to get the overfitting for that # particular depth. O5[b][j] = -np.dot(num, y - m) / den # Save! np.savez(fname, O1=O1, O2=O2, O3=O3, O4=O4, O5=O5) else: data = np.load(fname) O1 = data['O1'] O2 = data['O2'] O3 = data['O3'] O4 = data['O4'] O5 = data['O5'] # Plot if plot and (clobber or not os.path.exists(figname + '_overfit.pdf')): log.info("Plotting the overfitting metrics...") # Masked time array time = self.apply_mask(self.time) # Plot the final corrected light curve ovr = OVERFIT() self.plot_info(ovr) # Loop over the two metrics for kind, axes, axesh in zip(['unmasked', 'masked'], [ovr.axes1, ovr.axes2], [ovr.axes1h, ovr.axes2h]): # Loop over three depths for depth, ax, axh in zip([0.01, 0.001, 0.0001], axes, axesh): # Get the metric if kind == 'unmasked': metric = 1 - (np.hstack(O2) + np.hstack(O3) / depth) / np.hstack(O1) color = 'r' elif kind == 'masked': metric = np.hstack(O5) / depth color = 'b' else: raise ValueError("Invalid metric.") # Median and median absolute deviation med = np.nanmedian(metric) mad = np.nanmedian(np.abs(metric - med)) # Plot the metric as a function of time ax.plot(time, metric, 'k.', alpha=0.5, ms=2) ax.plot(time, metric, 'k-', alpha=0.1, lw=0.5) ylim = (-0.2, 1.0) ax.margins(0, None) ax.axhline(0, color='k', lw=1, alpha=0.5) ax.set_ylim(*ylim) if kind == 'masked' and depth == 0.0001: ax.set_xlabel('Time (days)', fontsize=14) else: ax.set_xticklabels([]) # Plot the histogram rng = (max(ylim[0], np.nanmin(metric)), min(ylim[1], np.nanmax(metric))) axh.hist(metric, bins=30, range=rng, orientation="horizontal", histtype="step", fill=False, color='k') axh.axhline(med, color=color, ls='-', lw=1) axh.axhspan(med - mad, med + mad, color=color, alpha=0.1) axh.axhline(0, color='k', lw=1, alpha=0.5) axh.yaxis.tick_right() axh.set_ylim(*ax.get_ylim()) axh.set_xticklabels([]) bbox = dict(fc="w", ec="1", alpha=0.5) info = r"$\mathrm{med}=%.3f$" % med + \ "\n" + r"$\mathrm{mad}=%.3f$" % mad axh.annotate(info, xy=(0.1, 0.925), xycoords='axes fraction', ha="left", va="top", bbox=bbox, color=color) bbox = dict(fc="w", ec="1", alpha=0.95) ax.annotate("%s overfitting metric" % kind, xy=(1-0.035, 0.92), xycoords='axes fraction', ha='right', va='top', bbox=bbox, color=color) pl.figtext(0.025, 0.77, "depth = 0.01", rotation=90, ha='left', va='center', fontsize=18) pl.figtext(0.025, 0.48, "depth = 0.001", rotation=90, ha='left', va='center', fontsize=18) pl.figtext(0.025, 0.19, "depth = 0.0001", rotation=90, ha='left', va='center', fontsize=18) ovr.fig.savefig(figname + '_overfit.pdf') log.info("Saved plot to %s_overfit.pdf" % figname) pl.close() return Overfitting(O1, O2, O3, O4, O5, figname + '_overfit.pdf')
python
def overfit(self, tau=None, plot=True, clobber=False, w=9, **kwargs): r""" Compute the masked & unmasked overfitting metrics for the light curve. This routine injects a transit model given by `tau` at every cadence in the light curve and recovers the transit depth when (1) leaving the transit unmasked and (2) masking the transit prior to performing regression. :param tau: A function or callable that accepts two arguments, \ `time` and `t0`, and returns an array corresponding to a \ zero-mean, unit depth transit model centered at \ `t0` and evaluated at `time`. \ The easiest way to provide this is to use an instance of \ :py:class:`everest.transit.TransitShape`. Default is \ :py:class:`everest.transit.TransitShape(dur=0.1)`, a transit \ with solar-like limb darkening and a duratio of 0.1 days. :param bool plot: Plot the results as a PDF? Default :py:obj:`True` :param bool clobber: Overwrite the results if present? Default \ :py:obj:`False` :param int w: The size of the masking window in cadences for \ computing the masked overfitting metric. Default `9` \ (about 4.5 hours for `K2` long cadence). :returns: An instance of `everest.basecamp.Overfitting`. """ fname = os.path.join(self.dir, self.name + '_overfit.npz') figname = os.path.join(self.dir, self.name) # Compute if not os.path.exists(fname) or clobber: # Baseline med = np.nanmedian(self.fraw) # Default transit model if tau is None: tau = TransitShape(dur=0.1) # The overfitting metrics O1 = [None for brkpt in self.breakpoints] O2 = [None for brkpt in self.breakpoints] O3 = [None for brkpt in self.breakpoints] O4 = [None for brkpt in self.breakpoints] O5 = [None for brkpt in self.breakpoints] # Loop over all chunks for b, brkpt in enumerate(self.breakpoints): # Masks for current chunk m = self.get_masked_chunk(b, pad=False) time = self.time[m] ferr = self.fraw_err[m] / med y = self.fraw[m] / med - 1 # The metrics we're computing here O1[b] = np.zeros(len(y)) * np.nan O2[b] = np.zeros(len(y)) * np.nan O3[b] = np.zeros(len(y)) * np.nan O4[b] = np.zeros(len(y)) * np.nan O5[b] = np.zeros(len(y)) * np.nan # Compute the astrophysical covariance and its inverse log.info("Computing the covariance...") if self.kernel == 'Basic': wh, am, ta = self.kernel_params wh /= med am /= med kernel_params = [wh, am, ta] elif self.kernel == 'QuasiPeriodic': wh, am, ga, pe = self.kernel_params wh /= med am /= med kernel_params = [wh, am, ga, pe] K = GetCovariance(self.kernel, kernel_params, time, ferr) Kinv = cho_solve((cholesky(K), False), np.eye(len(time))) # Loop over all orders log.info("Computing some large matrices...") X = [None for n in range(self.pld_order)] XL = [None for n in range(self.pld_order)] XLX = [None for n in range(self.pld_order)] for n in range(self.pld_order): if (self.lam_idx >= n) and (self.lam[b][n] is not None): X[n] = self.X(n, m, **kwargs) XL[n] = (self.lam[b][n] / med ** 2) * X[n] XLX[n] = np.dot(XL[n], X[n].T) X = np.hstack(X) XL = np.hstack(XL) XLX = np.sum(XLX, axis=0) # The full covariance C = XLX + K # The unmasked linear problem log.info("Solving the unmasked linear problem...") m = np.dot(XLX, np.linalg.solve(C, y)) m -= np.nanmedian(m) f = y - m R = np.linalg.solve(C, XLX.T).T # The masked linear problem log.info("Solving the masked linear problem...") A = MaskSolve(C, y, w=w) # Now loop through and compute the metric log.info("Computing the overfitting metrics...") for n in prange(len(y)): # # *** Unmasked overfitting metric *** # # Evaluate the sparse transit model TAU = tau(time, t0=time[n]) i = np.where(TAU < 0)[0] TAU = TAU.reshape(-1, 1) # Fast sparse algebra AA = np.dot(np.dot(TAU[i].T, Kinv[i, :][:, i]), TAU[i]) BB = np.dot(TAU[i].T, Kinv[i, :]) CC = TAU - np.dot(R[:, i], TAU[i]) O1[b][n] = AA O2[b][n] = np.dot(BB, CC) O3[b][n] = np.dot(BB, f) O4[b][n] = np.dot(BB, y) # # *** Masked overfitting metric *** # # The current mask and mask centerpoint mask = np.arange(n, n + w) j = n + (w + 1) // 2 - 1 if j >= len(y) - w: continue # The regularized design matrix # This is the same as # XLmX[:, n - 1] = \ # np.dot(XL, np.delete(X, mask, axis=0).T)[:, n - 1] if n == 0: XLmX = np.dot(XL, np.delete(X, mask, axis=0).T) else: XLmX[:, n - 1] = np.dot(XL, X[n - 1, :].T) # The linear solution to this step m = np.dot(XLmX, A[n]) # Evaluate the sparse transit model TAU = tau(time, t0=time[j]) i = np.where(TAU < 0)[0] TAU = TAU[i].reshape(-1, 1) # Dot the transit model in den = np.dot(np.dot(TAU.T, Kinv[i, :][:, i]), TAU) num = np.dot(TAU.T, Kinv[i, :]) # Compute the overfitting metric # Divide this number by a depth # to get the overfitting for that # particular depth. O5[b][j] = -np.dot(num, y - m) / den # Save! np.savez(fname, O1=O1, O2=O2, O3=O3, O4=O4, O5=O5) else: data = np.load(fname) O1 = data['O1'] O2 = data['O2'] O3 = data['O3'] O4 = data['O4'] O5 = data['O5'] # Plot if plot and (clobber or not os.path.exists(figname + '_overfit.pdf')): log.info("Plotting the overfitting metrics...") # Masked time array time = self.apply_mask(self.time) # Plot the final corrected light curve ovr = OVERFIT() self.plot_info(ovr) # Loop over the two metrics for kind, axes, axesh in zip(['unmasked', 'masked'], [ovr.axes1, ovr.axes2], [ovr.axes1h, ovr.axes2h]): # Loop over three depths for depth, ax, axh in zip([0.01, 0.001, 0.0001], axes, axesh): # Get the metric if kind == 'unmasked': metric = 1 - (np.hstack(O2) + np.hstack(O3) / depth) / np.hstack(O1) color = 'r' elif kind == 'masked': metric = np.hstack(O5) / depth color = 'b' else: raise ValueError("Invalid metric.") # Median and median absolute deviation med = np.nanmedian(metric) mad = np.nanmedian(np.abs(metric - med)) # Plot the metric as a function of time ax.plot(time, metric, 'k.', alpha=0.5, ms=2) ax.plot(time, metric, 'k-', alpha=0.1, lw=0.5) ylim = (-0.2, 1.0) ax.margins(0, None) ax.axhline(0, color='k', lw=1, alpha=0.5) ax.set_ylim(*ylim) if kind == 'masked' and depth == 0.0001: ax.set_xlabel('Time (days)', fontsize=14) else: ax.set_xticklabels([]) # Plot the histogram rng = (max(ylim[0], np.nanmin(metric)), min(ylim[1], np.nanmax(metric))) axh.hist(metric, bins=30, range=rng, orientation="horizontal", histtype="step", fill=False, color='k') axh.axhline(med, color=color, ls='-', lw=1) axh.axhspan(med - mad, med + mad, color=color, alpha=0.1) axh.axhline(0, color='k', lw=1, alpha=0.5) axh.yaxis.tick_right() axh.set_ylim(*ax.get_ylim()) axh.set_xticklabels([]) bbox = dict(fc="w", ec="1", alpha=0.5) info = r"$\mathrm{med}=%.3f$" % med + \ "\n" + r"$\mathrm{mad}=%.3f$" % mad axh.annotate(info, xy=(0.1, 0.925), xycoords='axes fraction', ha="left", va="top", bbox=bbox, color=color) bbox = dict(fc="w", ec="1", alpha=0.95) ax.annotate("%s overfitting metric" % kind, xy=(1-0.035, 0.92), xycoords='axes fraction', ha='right', va='top', bbox=bbox, color=color) pl.figtext(0.025, 0.77, "depth = 0.01", rotation=90, ha='left', va='center', fontsize=18) pl.figtext(0.025, 0.48, "depth = 0.001", rotation=90, ha='left', va='center', fontsize=18) pl.figtext(0.025, 0.19, "depth = 0.0001", rotation=90, ha='left', va='center', fontsize=18) ovr.fig.savefig(figname + '_overfit.pdf') log.info("Saved plot to %s_overfit.pdf" % figname) pl.close() return Overfitting(O1, O2, O3, O4, O5, figname + '_overfit.pdf')
[ "def", "overfit", "(", "self", ",", "tau", "=", "None", ",", "plot", "=", "True", ",", "clobber", "=", "False", ",", "w", "=", "9", ",", "*", "*", "kwargs", ")", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "self", ".", "dir", ",", "self", ".", "name", "+", "'_overfit.npz'", ")", "figname", "=", "os", ".", "path", ".", "join", "(", "self", ".", "dir", ",", "self", ".", "name", ")", "# Compute", "if", "not", "os", ".", "path", ".", "exists", "(", "fname", ")", "or", "clobber", ":", "# Baseline", "med", "=", "np", ".", "nanmedian", "(", "self", ".", "fraw", ")", "# Default transit model", "if", "tau", "is", "None", ":", "tau", "=", "TransitShape", "(", "dur", "=", "0.1", ")", "# The overfitting metrics", "O1", "=", "[", "None", "for", "brkpt", "in", "self", ".", "breakpoints", "]", "O2", "=", "[", "None", "for", "brkpt", "in", "self", ".", "breakpoints", "]", "O3", "=", "[", "None", "for", "brkpt", "in", "self", ".", "breakpoints", "]", "O4", "=", "[", "None", "for", "brkpt", "in", "self", ".", "breakpoints", "]", "O5", "=", "[", "None", "for", "brkpt", "in", "self", ".", "breakpoints", "]", "# Loop over all chunks", "for", "b", ",", "brkpt", "in", "enumerate", "(", "self", ".", "breakpoints", ")", ":", "# Masks for current chunk", "m", "=", "self", ".", "get_masked_chunk", "(", "b", ",", "pad", "=", "False", ")", "time", "=", "self", ".", "time", "[", "m", "]", "ferr", "=", "self", ".", "fraw_err", "[", "m", "]", "/", "med", "y", "=", "self", ".", "fraw", "[", "m", "]", "/", "med", "-", "1", "# The metrics we're computing here", "O1", "[", "b", "]", "=", "np", ".", "zeros", "(", "len", "(", "y", ")", ")", "*", "np", ".", "nan", "O2", "[", "b", "]", "=", "np", ".", "zeros", "(", "len", "(", "y", ")", ")", "*", "np", ".", "nan", "O3", "[", "b", "]", "=", "np", ".", "zeros", "(", "len", "(", "y", ")", ")", "*", "np", ".", "nan", "O4", "[", "b", "]", "=", "np", ".", "zeros", "(", "len", "(", "y", ")", ")", "*", "np", ".", "nan", "O5", "[", "b", "]", "=", "np", ".", "zeros", "(", "len", "(", "y", ")", ")", "*", "np", ".", "nan", "# Compute the astrophysical covariance and its inverse", "log", ".", "info", "(", "\"Computing the covariance...\"", ")", "if", "self", ".", "kernel", "==", "'Basic'", ":", "wh", ",", "am", ",", "ta", "=", "self", ".", "kernel_params", "wh", "/=", "med", "am", "/=", "med", "kernel_params", "=", "[", "wh", ",", "am", ",", "ta", "]", "elif", "self", ".", "kernel", "==", "'QuasiPeriodic'", ":", "wh", ",", "am", ",", "ga", ",", "pe", "=", "self", ".", "kernel_params", "wh", "/=", "med", "am", "/=", "med", "kernel_params", "=", "[", "wh", ",", "am", ",", "ga", ",", "pe", "]", "K", "=", "GetCovariance", "(", "self", ".", "kernel", ",", "kernel_params", ",", "time", ",", "ferr", ")", "Kinv", "=", "cho_solve", "(", "(", "cholesky", "(", "K", ")", ",", "False", ")", ",", "np", ".", "eye", "(", "len", "(", "time", ")", ")", ")", "# Loop over all orders", "log", ".", "info", "(", "\"Computing some large matrices...\"", ")", "X", "=", "[", "None", "for", "n", "in", "range", "(", "self", ".", "pld_order", ")", "]", "XL", "=", "[", "None", "for", "n", "in", "range", "(", "self", ".", "pld_order", ")", "]", "XLX", "=", "[", "None", "for", "n", "in", "range", "(", "self", ".", "pld_order", ")", "]", "for", "n", "in", "range", "(", "self", ".", "pld_order", ")", ":", "if", "(", "self", ".", "lam_idx", ">=", "n", ")", "and", "(", "self", ".", "lam", "[", "b", "]", "[", "n", "]", "is", "not", "None", ")", ":", "X", "[", "n", "]", "=", "self", ".", "X", "(", "n", ",", "m", ",", "*", "*", "kwargs", ")", "XL", "[", "n", "]", "=", "(", "self", ".", "lam", "[", "b", "]", "[", "n", "]", "/", "med", "**", "2", ")", "*", "X", "[", "n", "]", "XLX", "[", "n", "]", "=", "np", ".", "dot", "(", "XL", "[", "n", "]", ",", "X", "[", "n", "]", ".", "T", ")", "X", "=", "np", ".", "hstack", "(", "X", ")", "XL", "=", "np", ".", "hstack", "(", "XL", ")", "XLX", "=", "np", ".", "sum", "(", "XLX", ",", "axis", "=", "0", ")", "# The full covariance", "C", "=", "XLX", "+", "K", "# The unmasked linear problem", "log", ".", "info", "(", "\"Solving the unmasked linear problem...\"", ")", "m", "=", "np", ".", "dot", "(", "XLX", ",", "np", ".", "linalg", ".", "solve", "(", "C", ",", "y", ")", ")", "m", "-=", "np", ".", "nanmedian", "(", "m", ")", "f", "=", "y", "-", "m", "R", "=", "np", ".", "linalg", ".", "solve", "(", "C", ",", "XLX", ".", "T", ")", ".", "T", "# The masked linear problem", "log", ".", "info", "(", "\"Solving the masked linear problem...\"", ")", "A", "=", "MaskSolve", "(", "C", ",", "y", ",", "w", "=", "w", ")", "# Now loop through and compute the metric", "log", ".", "info", "(", "\"Computing the overfitting metrics...\"", ")", "for", "n", "in", "prange", "(", "len", "(", "y", ")", ")", ":", "#", "# *** Unmasked overfitting metric ***", "#", "# Evaluate the sparse transit model", "TAU", "=", "tau", "(", "time", ",", "t0", "=", "time", "[", "n", "]", ")", "i", "=", "np", ".", "where", "(", "TAU", "<", "0", ")", "[", "0", "]", "TAU", "=", "TAU", ".", "reshape", "(", "-", "1", ",", "1", ")", "# Fast sparse algebra", "AA", "=", "np", ".", "dot", "(", "np", ".", "dot", "(", "TAU", "[", "i", "]", ".", "T", ",", "Kinv", "[", "i", ",", ":", "]", "[", ":", ",", "i", "]", ")", ",", "TAU", "[", "i", "]", ")", "BB", "=", "np", ".", "dot", "(", "TAU", "[", "i", "]", ".", "T", ",", "Kinv", "[", "i", ",", ":", "]", ")", "CC", "=", "TAU", "-", "np", ".", "dot", "(", "R", "[", ":", ",", "i", "]", ",", "TAU", "[", "i", "]", ")", "O1", "[", "b", "]", "[", "n", "]", "=", "AA", "O2", "[", "b", "]", "[", "n", "]", "=", "np", ".", "dot", "(", "BB", ",", "CC", ")", "O3", "[", "b", "]", "[", "n", "]", "=", "np", ".", "dot", "(", "BB", ",", "f", ")", "O4", "[", "b", "]", "[", "n", "]", "=", "np", ".", "dot", "(", "BB", ",", "y", ")", "#", "# *** Masked overfitting metric ***", "#", "# The current mask and mask centerpoint", "mask", "=", "np", ".", "arange", "(", "n", ",", "n", "+", "w", ")", "j", "=", "n", "+", "(", "w", "+", "1", ")", "//", "2", "-", "1", "if", "j", ">=", "len", "(", "y", ")", "-", "w", ":", "continue", "# The regularized design matrix", "# This is the same as", "# XLmX[:, n - 1] = \\", "# np.dot(XL, np.delete(X, mask, axis=0).T)[:, n - 1]", "if", "n", "==", "0", ":", "XLmX", "=", "np", ".", "dot", "(", "XL", ",", "np", ".", "delete", "(", "X", ",", "mask", ",", "axis", "=", "0", ")", ".", "T", ")", "else", ":", "XLmX", "[", ":", ",", "n", "-", "1", "]", "=", "np", ".", "dot", "(", "XL", ",", "X", "[", "n", "-", "1", ",", ":", "]", ".", "T", ")", "# The linear solution to this step", "m", "=", "np", ".", "dot", "(", "XLmX", ",", "A", "[", "n", "]", ")", "# Evaluate the sparse transit model", "TAU", "=", "tau", "(", "time", ",", "t0", "=", "time", "[", "j", "]", ")", "i", "=", "np", ".", "where", "(", "TAU", "<", "0", ")", "[", "0", "]", "TAU", "=", "TAU", "[", "i", "]", ".", "reshape", "(", "-", "1", ",", "1", ")", "# Dot the transit model in", "den", "=", "np", ".", "dot", "(", "np", ".", "dot", "(", "TAU", ".", "T", ",", "Kinv", "[", "i", ",", ":", "]", "[", ":", ",", "i", "]", ")", ",", "TAU", ")", "num", "=", "np", ".", "dot", "(", "TAU", ".", "T", ",", "Kinv", "[", "i", ",", ":", "]", ")", "# Compute the overfitting metric", "# Divide this number by a depth", "# to get the overfitting for that", "# particular depth.", "O5", "[", "b", "]", "[", "j", "]", "=", "-", "np", ".", "dot", "(", "num", ",", "y", "-", "m", ")", "/", "den", "# Save!", "np", ".", "savez", "(", "fname", ",", "O1", "=", "O1", ",", "O2", "=", "O2", ",", "O3", "=", "O3", ",", "O4", "=", "O4", ",", "O5", "=", "O5", ")", "else", ":", "data", "=", "np", ".", "load", "(", "fname", ")", "O1", "=", "data", "[", "'O1'", "]", "O2", "=", "data", "[", "'O2'", "]", "O3", "=", "data", "[", "'O3'", "]", "O4", "=", "data", "[", "'O4'", "]", "O5", "=", "data", "[", "'O5'", "]", "# Plot", "if", "plot", "and", "(", "clobber", "or", "not", "os", ".", "path", ".", "exists", "(", "figname", "+", "'_overfit.pdf'", ")", ")", ":", "log", ".", "info", "(", "\"Plotting the overfitting metrics...\"", ")", "# Masked time array", "time", "=", "self", ".", "apply_mask", "(", "self", ".", "time", ")", "# Plot the final corrected light curve", "ovr", "=", "OVERFIT", "(", ")", "self", ".", "plot_info", "(", "ovr", ")", "# Loop over the two metrics", "for", "kind", ",", "axes", ",", "axesh", "in", "zip", "(", "[", "'unmasked'", ",", "'masked'", "]", ",", "[", "ovr", ".", "axes1", ",", "ovr", ".", "axes2", "]", ",", "[", "ovr", ".", "axes1h", ",", "ovr", ".", "axes2h", "]", ")", ":", "# Loop over three depths", "for", "depth", ",", "ax", ",", "axh", "in", "zip", "(", "[", "0.01", ",", "0.001", ",", "0.0001", "]", ",", "axes", ",", "axesh", ")", ":", "# Get the metric", "if", "kind", "==", "'unmasked'", ":", "metric", "=", "1", "-", "(", "np", ".", "hstack", "(", "O2", ")", "+", "np", ".", "hstack", "(", "O3", ")", "/", "depth", ")", "/", "np", ".", "hstack", "(", "O1", ")", "color", "=", "'r'", "elif", "kind", "==", "'masked'", ":", "metric", "=", "np", ".", "hstack", "(", "O5", ")", "/", "depth", "color", "=", "'b'", "else", ":", "raise", "ValueError", "(", "\"Invalid metric.\"", ")", "# Median and median absolute deviation", "med", "=", "np", ".", "nanmedian", "(", "metric", ")", "mad", "=", "np", ".", "nanmedian", "(", "np", ".", "abs", "(", "metric", "-", "med", ")", ")", "# Plot the metric as a function of time", "ax", ".", "plot", "(", "time", ",", "metric", ",", "'k.'", ",", "alpha", "=", "0.5", ",", "ms", "=", "2", ")", "ax", ".", "plot", "(", "time", ",", "metric", ",", "'k-'", ",", "alpha", "=", "0.1", ",", "lw", "=", "0.5", ")", "ylim", "=", "(", "-", "0.2", ",", "1.0", ")", "ax", ".", "margins", "(", "0", ",", "None", ")", "ax", ".", "axhline", "(", "0", ",", "color", "=", "'k'", ",", "lw", "=", "1", ",", "alpha", "=", "0.5", ")", "ax", ".", "set_ylim", "(", "*", "ylim", ")", "if", "kind", "==", "'masked'", "and", "depth", "==", "0.0001", ":", "ax", ".", "set_xlabel", "(", "'Time (days)'", ",", "fontsize", "=", "14", ")", "else", ":", "ax", ".", "set_xticklabels", "(", "[", "]", ")", "# Plot the histogram", "rng", "=", "(", "max", "(", "ylim", "[", "0", "]", ",", "np", ".", "nanmin", "(", "metric", ")", ")", ",", "min", "(", "ylim", "[", "1", "]", ",", "np", ".", "nanmax", "(", "metric", ")", ")", ")", "axh", ".", "hist", "(", "metric", ",", "bins", "=", "30", ",", "range", "=", "rng", ",", "orientation", "=", "\"horizontal\"", ",", "histtype", "=", "\"step\"", ",", "fill", "=", "False", ",", "color", "=", "'k'", ")", "axh", ".", "axhline", "(", "med", ",", "color", "=", "color", ",", "ls", "=", "'-'", ",", "lw", "=", "1", ")", "axh", ".", "axhspan", "(", "med", "-", "mad", ",", "med", "+", "mad", ",", "color", "=", "color", ",", "alpha", "=", "0.1", ")", "axh", ".", "axhline", "(", "0", ",", "color", "=", "'k'", ",", "lw", "=", "1", ",", "alpha", "=", "0.5", ")", "axh", ".", "yaxis", ".", "tick_right", "(", ")", "axh", ".", "set_ylim", "(", "*", "ax", ".", "get_ylim", "(", ")", ")", "axh", ".", "set_xticklabels", "(", "[", "]", ")", "bbox", "=", "dict", "(", "fc", "=", "\"w\"", ",", "ec", "=", "\"1\"", ",", "alpha", "=", "0.5", ")", "info", "=", "r\"$\\mathrm{med}=%.3f$\"", "%", "med", "+", "\"\\n\"", "+", "r\"$\\mathrm{mad}=%.3f$\"", "%", "mad", "axh", ".", "annotate", "(", "info", ",", "xy", "=", "(", "0.1", ",", "0.925", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "\"left\"", ",", "va", "=", "\"top\"", ",", "bbox", "=", "bbox", ",", "color", "=", "color", ")", "bbox", "=", "dict", "(", "fc", "=", "\"w\"", ",", "ec", "=", "\"1\"", ",", "alpha", "=", "0.95", ")", "ax", ".", "annotate", "(", "\"%s overfitting metric\"", "%", "kind", ",", "xy", "=", "(", "1", "-", "0.035", ",", "0.92", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'right'", ",", "va", "=", "'top'", ",", "bbox", "=", "bbox", ",", "color", "=", "color", ")", "pl", ".", "figtext", "(", "0.025", ",", "0.77", ",", "\"depth = 0.01\"", ",", "rotation", "=", "90", ",", "ha", "=", "'left'", ",", "va", "=", "'center'", ",", "fontsize", "=", "18", ")", "pl", ".", "figtext", "(", "0.025", ",", "0.48", ",", "\"depth = 0.001\"", ",", "rotation", "=", "90", ",", "ha", "=", "'left'", ",", "va", "=", "'center'", ",", "fontsize", "=", "18", ")", "pl", ".", "figtext", "(", "0.025", ",", "0.19", ",", "\"depth = 0.0001\"", ",", "rotation", "=", "90", ",", "ha", "=", "'left'", ",", "va", "=", "'center'", ",", "fontsize", "=", "18", ")", "ovr", ".", "fig", ".", "savefig", "(", "figname", "+", "'_overfit.pdf'", ")", "log", ".", "info", "(", "\"Saved plot to %s_overfit.pdf\"", "%", "figname", ")", "pl", ".", "close", "(", ")", "return", "Overfitting", "(", "O1", ",", "O2", ",", "O3", ",", "O4", ",", "O5", ",", "figname", "+", "'_overfit.pdf'", ")" ]
r""" Compute the masked & unmasked overfitting metrics for the light curve. This routine injects a transit model given by `tau` at every cadence in the light curve and recovers the transit depth when (1) leaving the transit unmasked and (2) masking the transit prior to performing regression. :param tau: A function or callable that accepts two arguments, \ `time` and `t0`, and returns an array corresponding to a \ zero-mean, unit depth transit model centered at \ `t0` and evaluated at `time`. \ The easiest way to provide this is to use an instance of \ :py:class:`everest.transit.TransitShape`. Default is \ :py:class:`everest.transit.TransitShape(dur=0.1)`, a transit \ with solar-like limb darkening and a duratio of 0.1 days. :param bool plot: Plot the results as a PDF? Default :py:obj:`True` :param bool clobber: Overwrite the results if present? Default \ :py:obj:`False` :param int w: The size of the masking window in cadences for \ computing the masked overfitting metric. Default `9` \ (about 4.5 hours for `K2` long cadence). :returns: An instance of `everest.basecamp.Overfitting`.
[ "r", "Compute", "the", "masked", "&", "unmasked", "overfitting", "metrics", "for", "the", "light", "curve", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/basecamp.py#L814-L1075
rodluger/everest
everest/basecamp.py
Basecamp.lnlike
def lnlike(self, model, refactor=False, pos_tol=2.5, neg_tol=50., full_output=False): r""" Return the likelihood of the astrophysical model `model`. Returns the likelihood of `model` marginalized over the PLD model. :param ndarray model: A vector of the same shape as `self.time` \ corresponding to the astrophysical model. :param bool refactor: Re-compute the Cholesky decomposition? This \ typically does not need to be done, except when the PLD \ model changes. Default :py:obj:`False`. :param float pos_tol: the positive (i.e., above the median) \ outlier tolerance in standard deviations. :param float neg_tol: the negative (i.e., below the median) \ outlier tolerance in standard deviations. :param bool full_output: If :py:obj:`True`, returns the maximum \ likelihood model amplitude and the variance on the amplitude \ in addition to the log-likelihood. In the case of a transit \ model, these are the transit depth and depth variance. Default \ :py:obj:`False`. """ lnl = 0 # Re-factorize the Cholesky decomposition? try: self._ll_info except AttributeError: refactor = True if refactor: # Smooth the light curve and reset the outlier mask t = np.delete(self.time, np.concatenate([self.nanmask, self.badmask])) f = np.delete(self.flux, np.concatenate([self.nanmask, self.badmask])) f = SavGol(f) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) pos_inds = np.where((f > med + pos_tol * MAD))[0] pos_inds = np.array([np.argmax(self.time == t[i]) for i in pos_inds]) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) neg_inds = np.where((f < med - neg_tol * MAD))[0] neg_inds = np.array([np.argmax(self.time == t[i]) for i in neg_inds]) outmask = np.array(self.outmask) transitmask = np.array(self.transitmask) self.outmask = np.concatenate([neg_inds, pos_inds]) self.transitmask = np.array([], dtype=int) # Now re-factorize the Cholesky decomposition self._ll_info = [None for b in self.breakpoints] for b, brkpt in enumerate(self.breakpoints): # Masks for current chunk m = self.get_masked_chunk(b, pad=False) # This block of the masked covariance matrix K = GetCovariance(self.kernel, self.kernel_params, self.time[m], self.fraw_err[m]) # The masked X.L.X^T term A = np.zeros((len(m), len(m))) for n in range(self.pld_order): XM = self.X(n, m) A += self.lam[b][n] * np.dot(XM, XM.T) K += A self._ll_info[b] = [cho_factor(K), m] # Reset the outlier masks self.outmask = outmask self.transitmask = transitmask # Compute the likelihood for each chunk amp = [None for b in self.breakpoints] var = [None for b in self.breakpoints] for b, brkpt in enumerate(self.breakpoints): # Get the inverse covariance and the mask CDK = self._ll_info[b][0] m = self._ll_info[b][1] # Compute the maximum likelihood model amplitude # (for transits, this is the transit depth) var[b] = 1. / np.dot(model[m], cho_solve(CDK, model[m])) amp[b] = var[b] * np.dot(model[m], cho_solve(CDK, self.fraw[m])) # Compute the residual r = self.fraw[m] - amp[b] * model[m] # Finally, compute the likelihood lnl += -0.5 * np.dot(r, cho_solve(CDK, r)) if full_output: # We need to multiply the Gaussians for all chunks to get the # amplitude and amplitude variance for the entire dataset vari = var[0] ampi = amp[0] for v, a in zip(var[1:], amp[1:]): ampi = (ampi * v + a * vari) / (vari + v) vari = vari * v / (vari + v) med = np.nanmedian(self.fraw) return lnl, ampi / med, vari / med ** 2 else: return lnl
python
def lnlike(self, model, refactor=False, pos_tol=2.5, neg_tol=50., full_output=False): r""" Return the likelihood of the astrophysical model `model`. Returns the likelihood of `model` marginalized over the PLD model. :param ndarray model: A vector of the same shape as `self.time` \ corresponding to the astrophysical model. :param bool refactor: Re-compute the Cholesky decomposition? This \ typically does not need to be done, except when the PLD \ model changes. Default :py:obj:`False`. :param float pos_tol: the positive (i.e., above the median) \ outlier tolerance in standard deviations. :param float neg_tol: the negative (i.e., below the median) \ outlier tolerance in standard deviations. :param bool full_output: If :py:obj:`True`, returns the maximum \ likelihood model amplitude and the variance on the amplitude \ in addition to the log-likelihood. In the case of a transit \ model, these are the transit depth and depth variance. Default \ :py:obj:`False`. """ lnl = 0 # Re-factorize the Cholesky decomposition? try: self._ll_info except AttributeError: refactor = True if refactor: # Smooth the light curve and reset the outlier mask t = np.delete(self.time, np.concatenate([self.nanmask, self.badmask])) f = np.delete(self.flux, np.concatenate([self.nanmask, self.badmask])) f = SavGol(f) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) pos_inds = np.where((f > med + pos_tol * MAD))[0] pos_inds = np.array([np.argmax(self.time == t[i]) for i in pos_inds]) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) neg_inds = np.where((f < med - neg_tol * MAD))[0] neg_inds = np.array([np.argmax(self.time == t[i]) for i in neg_inds]) outmask = np.array(self.outmask) transitmask = np.array(self.transitmask) self.outmask = np.concatenate([neg_inds, pos_inds]) self.transitmask = np.array([], dtype=int) # Now re-factorize the Cholesky decomposition self._ll_info = [None for b in self.breakpoints] for b, brkpt in enumerate(self.breakpoints): # Masks for current chunk m = self.get_masked_chunk(b, pad=False) # This block of the masked covariance matrix K = GetCovariance(self.kernel, self.kernel_params, self.time[m], self.fraw_err[m]) # The masked X.L.X^T term A = np.zeros((len(m), len(m))) for n in range(self.pld_order): XM = self.X(n, m) A += self.lam[b][n] * np.dot(XM, XM.T) K += A self._ll_info[b] = [cho_factor(K), m] # Reset the outlier masks self.outmask = outmask self.transitmask = transitmask # Compute the likelihood for each chunk amp = [None for b in self.breakpoints] var = [None for b in self.breakpoints] for b, brkpt in enumerate(self.breakpoints): # Get the inverse covariance and the mask CDK = self._ll_info[b][0] m = self._ll_info[b][1] # Compute the maximum likelihood model amplitude # (for transits, this is the transit depth) var[b] = 1. / np.dot(model[m], cho_solve(CDK, model[m])) amp[b] = var[b] * np.dot(model[m], cho_solve(CDK, self.fraw[m])) # Compute the residual r = self.fraw[m] - amp[b] * model[m] # Finally, compute the likelihood lnl += -0.5 * np.dot(r, cho_solve(CDK, r)) if full_output: # We need to multiply the Gaussians for all chunks to get the # amplitude and amplitude variance for the entire dataset vari = var[0] ampi = amp[0] for v, a in zip(var[1:], amp[1:]): ampi = (ampi * v + a * vari) / (vari + v) vari = vari * v / (vari + v) med = np.nanmedian(self.fraw) return lnl, ampi / med, vari / med ** 2 else: return lnl
[ "def", "lnlike", "(", "self", ",", "model", ",", "refactor", "=", "False", ",", "pos_tol", "=", "2.5", ",", "neg_tol", "=", "50.", ",", "full_output", "=", "False", ")", ":", "lnl", "=", "0", "# Re-factorize the Cholesky decomposition?", "try", ":", "self", ".", "_ll_info", "except", "AttributeError", ":", "refactor", "=", "True", "if", "refactor", ":", "# Smooth the light curve and reset the outlier mask", "t", "=", "np", ".", "delete", "(", "self", ".", "time", ",", "np", ".", "concatenate", "(", "[", "self", ".", "nanmask", ",", "self", ".", "badmask", "]", ")", ")", "f", "=", "np", ".", "delete", "(", "self", ".", "flux", ",", "np", ".", "concatenate", "(", "[", "self", ".", "nanmask", ",", "self", ".", "badmask", "]", ")", ")", "f", "=", "SavGol", "(", "f", ")", "med", "=", "np", ".", "nanmedian", "(", "f", ")", "MAD", "=", "1.4826", "*", "np", ".", "nanmedian", "(", "np", ".", "abs", "(", "f", "-", "med", ")", ")", "pos_inds", "=", "np", ".", "where", "(", "(", "f", ">", "med", "+", "pos_tol", "*", "MAD", ")", ")", "[", "0", "]", "pos_inds", "=", "np", ".", "array", "(", "[", "np", ".", "argmax", "(", "self", ".", "time", "==", "t", "[", "i", "]", ")", "for", "i", "in", "pos_inds", "]", ")", "MAD", "=", "1.4826", "*", "np", ".", "nanmedian", "(", "np", ".", "abs", "(", "f", "-", "med", ")", ")", "neg_inds", "=", "np", ".", "where", "(", "(", "f", "<", "med", "-", "neg_tol", "*", "MAD", ")", ")", "[", "0", "]", "neg_inds", "=", "np", ".", "array", "(", "[", "np", ".", "argmax", "(", "self", ".", "time", "==", "t", "[", "i", "]", ")", "for", "i", "in", "neg_inds", "]", ")", "outmask", "=", "np", ".", "array", "(", "self", ".", "outmask", ")", "transitmask", "=", "np", ".", "array", "(", "self", ".", "transitmask", ")", "self", ".", "outmask", "=", "np", ".", "concatenate", "(", "[", "neg_inds", ",", "pos_inds", "]", ")", "self", ".", "transitmask", "=", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "int", ")", "# Now re-factorize the Cholesky decomposition", "self", ".", "_ll_info", "=", "[", "None", "for", "b", "in", "self", ".", "breakpoints", "]", "for", "b", ",", "brkpt", "in", "enumerate", "(", "self", ".", "breakpoints", ")", ":", "# Masks for current chunk", "m", "=", "self", ".", "get_masked_chunk", "(", "b", ",", "pad", "=", "False", ")", "# This block of the masked covariance matrix", "K", "=", "GetCovariance", "(", "self", ".", "kernel", ",", "self", ".", "kernel_params", ",", "self", ".", "time", "[", "m", "]", ",", "self", ".", "fraw_err", "[", "m", "]", ")", "# The masked X.L.X^T term", "A", "=", "np", ".", "zeros", "(", "(", "len", "(", "m", ")", ",", "len", "(", "m", ")", ")", ")", "for", "n", "in", "range", "(", "self", ".", "pld_order", ")", ":", "XM", "=", "self", ".", "X", "(", "n", ",", "m", ")", "A", "+=", "self", ".", "lam", "[", "b", "]", "[", "n", "]", "*", "np", ".", "dot", "(", "XM", ",", "XM", ".", "T", ")", "K", "+=", "A", "self", ".", "_ll_info", "[", "b", "]", "=", "[", "cho_factor", "(", "K", ")", ",", "m", "]", "# Reset the outlier masks", "self", ".", "outmask", "=", "outmask", "self", ".", "transitmask", "=", "transitmask", "# Compute the likelihood for each chunk", "amp", "=", "[", "None", "for", "b", "in", "self", ".", "breakpoints", "]", "var", "=", "[", "None", "for", "b", "in", "self", ".", "breakpoints", "]", "for", "b", ",", "brkpt", "in", "enumerate", "(", "self", ".", "breakpoints", ")", ":", "# Get the inverse covariance and the mask", "CDK", "=", "self", ".", "_ll_info", "[", "b", "]", "[", "0", "]", "m", "=", "self", ".", "_ll_info", "[", "b", "]", "[", "1", "]", "# Compute the maximum likelihood model amplitude", "# (for transits, this is the transit depth)", "var", "[", "b", "]", "=", "1.", "/", "np", ".", "dot", "(", "model", "[", "m", "]", ",", "cho_solve", "(", "CDK", ",", "model", "[", "m", "]", ")", ")", "amp", "[", "b", "]", "=", "var", "[", "b", "]", "*", "np", ".", "dot", "(", "model", "[", "m", "]", ",", "cho_solve", "(", "CDK", ",", "self", ".", "fraw", "[", "m", "]", ")", ")", "# Compute the residual", "r", "=", "self", ".", "fraw", "[", "m", "]", "-", "amp", "[", "b", "]", "*", "model", "[", "m", "]", "# Finally, compute the likelihood", "lnl", "+=", "-", "0.5", "*", "np", ".", "dot", "(", "r", ",", "cho_solve", "(", "CDK", ",", "r", ")", ")", "if", "full_output", ":", "# We need to multiply the Gaussians for all chunks to get the", "# amplitude and amplitude variance for the entire dataset", "vari", "=", "var", "[", "0", "]", "ampi", "=", "amp", "[", "0", "]", "for", "v", ",", "a", "in", "zip", "(", "var", "[", "1", ":", "]", ",", "amp", "[", "1", ":", "]", ")", ":", "ampi", "=", "(", "ampi", "*", "v", "+", "a", "*", "vari", ")", "/", "(", "vari", "+", "v", ")", "vari", "=", "vari", "*", "v", "/", "(", "vari", "+", "v", ")", "med", "=", "np", ".", "nanmedian", "(", "self", ".", "fraw", ")", "return", "lnl", ",", "ampi", "/", "med", ",", "vari", "/", "med", "**", "2", "else", ":", "return", "lnl" ]
r""" Return the likelihood of the astrophysical model `model`. Returns the likelihood of `model` marginalized over the PLD model. :param ndarray model: A vector of the same shape as `self.time` \ corresponding to the astrophysical model. :param bool refactor: Re-compute the Cholesky decomposition? This \ typically does not need to be done, except when the PLD \ model changes. Default :py:obj:`False`. :param float pos_tol: the positive (i.e., above the median) \ outlier tolerance in standard deviations. :param float neg_tol: the negative (i.e., below the median) \ outlier tolerance in standard deviations. :param bool full_output: If :py:obj:`True`, returns the maximum \ likelihood model amplitude and the variance on the amplitude \ in addition to the log-likelihood. In the case of a transit \ model, these are the transit depth and depth variance. Default \ :py:obj:`False`.
[ "r", "Return", "the", "likelihood", "of", "the", "astrophysical", "model", "model", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/basecamp.py#L1077-L1178
rodluger/everest
everest/inject.py
Inject
def Inject(ID, inj_model='nPLD', t0=None, per=None, dur=0.1, depth=0.001, mask=False, trn_win=5, poly_order=3, make_fits=False, **kwargs): ''' Run one of the :py:obj:`everest` models with injected transits and attempt to recover the transit depth at the end with a simple linear regression with a polynomial baseline. The depth is stored in the :py:obj:`inject` attribute of the model (a dictionary) as :py:obj:`rec_depth`. A control injection is also performed, in which the transits are injected into the de-trended data; the recovered depth in the control run is stored in :py:obj:`inject` as :py:obj:`rec_depth_control`. :param int ID: The target id :param str inj_model: The name of the :py:obj:`everest` model to run. \ Default `"nPLD"` :param float t0: The transit ephemeris in days. Default is to draw from \ the uniform distributon [0., :py:obj:`per`) :param float per: The injected planet period in days. Default is to draw \ from the uniform distribution [2, 10] :param float dur: The transit duration in days. Must be in the range \ [0.05, 0.5]. Default 0.1 :param float depth: The fractional transit depth. Default 0.001 :param bool mask: Explicitly mask the in-transit cadences when computing \ the PLD model? Default :py:obj:`False` :param float trn_win: The size of the transit window in units of the \ transit duration :param int poly_order: The order of the polynomial used to fit the \ continuum ''' # Randomize the planet params if per is None: a = 3. b = 10. per = a + (b - a) * np.random.random() if t0 is None: t0 = per * np.random.random() # Get the actual class _model = eval(inj_model) inject = {'t0': t0, 'per': per, 'dur': dur, 'depth': depth, 'mask': mask, 'poly_order': poly_order, 'trn_win': trn_win} # Define the injection class class Injection(_model): ''' The :py:obj:`Injection` class is a special subclass of a user-selected :py:obj:`everest` model. See :py:func:`Inject` for more details. ''' def __init__(self, *args, **kwargs): ''' ''' self.inject = kwargs.pop('inject', None) self.parent_class = kwargs.pop('parent_class', None) self.kwargs = kwargs super(Injection, self).__init__(*args, **kwargs) @property def name(self): ''' ''' if self.inject['mask']: maskchar = 'M' else: maskchar = 'U' return '%s_Inject_%s%g' % (self.parent_class, maskchar, self.inject['depth']) def load_tpf(self): ''' Loads the target pixel files and injects transits at the pixel level. ''' # Load the TPF super(Injection, self).load_tpf() log.info("Injecting transits...") # Inject the transits into the regular data transit_model = Transit( self.time, t0=self.inject['t0'], per=self.inject['per'], dur=self.inject['dur'], depth=self.inject['depth']) for i in range(self.fpix.shape[1]): self.fpix[:, i] *= transit_model self.fraw = np.sum(self.fpix, axis=1) if self.inject['mask']: self.transitmask = np.array(list(set(np.concatenate( [self.transitmask, np.where(transit_model < 1.)[0]]))), dtype=int) # Update the PLD normalization self.get_norm() def recover_depth(self): ''' Recovers the injected transit depth from the long cadence data with a simple LLS solver. The results are all stored in the :py:obj:`inject` attribute of the model. ''' # Control run transit_model = Transit( self.time, t0=self.inject['t0'], per=self.inject['per'], dur=self.inject['dur'], depth=self.inject['depth']) kwargs = dict(self.kwargs) kwargs.update({'clobber': False}) control = eval(self.parent_class)( self.ID, is_parent=True, **kwargs) control.fraw *= transit_model # Get params log.info("Recovering transit depth...") t0 = self.inject['t0'] per = self.inject['per'] dur = self.inject['dur'] depth = self.inject['depth'] trn_win = self.inject['trn_win'] poly_order = self.inject['poly_order'] for run, tag in zip([self, control], ['', '_control']): # Compute the model mask = np.array( list(set(np.concatenate([run.badmask, run.nanmask]))), dtype=int) flux = np.delete(run.flux / np.nanmedian(run.flux), mask) time = np.delete(run.time, mask) transit_model = (Transit(time, t0=t0, per=per, dur=dur, depth=depth) - 1) / depth # Count the transits t0 += np.ceil((time[0] - dur - t0) / per) * per ttimes0 = np.arange(t0, time[-1] + dur, per) tinds = [] for tt in ttimes0: # Get indices for this chunk inds = np.where(np.abs(time - tt) < trn_win * dur / 2.)[0] # Ensure there's a transit in this chunk, and that # there are enough points for the polynomial fit if np.any(transit_model[inds] < 0.) and \ len(inds) > poly_order: tinds.append(inds) # Our design matrix sz = (poly_order + 1) * len(tinds) X = np.empty((0, 1 + sz), dtype=float) Y = np.array([], dtype=float) T = np.array([], dtype=float) # Loop over all transits for i, inds in enumerate(tinds): # Get the transit model trnvec = transit_model[inds].reshape(-1, 1) # Normalize the time array t = time[inds] t = (t - t[0]) / (t[-1] - t[0]) # Cumulative arrays T = np.append(T, time[inds]) Y = np.append(Y, flux[inds]) # Polynomial vector polyvec = np.array( [t ** o for o in range(0, poly_order + 1)]).T # Update the design matrix with this chunk lzeros = np.zeros((len(t), i * (poly_order + 1))) rzeros = np.zeros( (len(t), sz - (i + 1) * (poly_order + 1))) chunk = np.hstack((trnvec, lzeros, polyvec, rzeros)) X = np.vstack((X, chunk)) # Get the relative depth A = np.dot(X.T, X) B = np.dot(X.T, Y) C = np.linalg.solve(A, B) rec_depth = C[0] # Get the uncertainties sig = 1.4826 * \ np.nanmedian(np.abs(flux - np.nanmedian(flux)) ) / np.nanmedian(flux) cov = sig ** 2 * np.linalg.solve(A, np.eye(A.shape[0])) err = np.sqrt(np.diag(cov)) rec_depth_err = err[0] # Store the results self.inject.update( {'rec_depth%s' % tag: rec_depth, 'rec_depth_err%s' % tag: rec_depth_err}) # Store the detrended, folded data D = (Y - np.dot(C[1:], X[:, 1:].T) + np.nanmedian(Y)) / np.nanmedian(Y) T = (T - t0 - per / 2.) % per - per / 2. self.inject.update( {'fold_time%s' % tag: T, 'fold_flux%s' % tag: D}) def plot_final(self, ax): ''' Plots the injection recovery results. ''' from mpl_toolkits.axes_grid.inset_locator import inset_axes ax.axis('off') ax1 = inset_axes(ax, width="47%", height="100%", loc=6) ax2 = inset_axes(ax, width="47%", height="100%", loc=7) # Plot the recovered folded transits ax1.plot(self.inject['fold_time'], self.inject['fold_flux'], 'k.', alpha=0.3) x = np.linspace(np.min(self.inject['fold_time']), np.max( self.inject['fold_time']), 500) try: y = Transit( x, t0=0., per=self.inject['per'], dur=self.inject['dur'], depth=self.inject['rec_depth']) except: # Log the error, and carry on exctype, value, tb = sys.exc_info() for line in traceback.format_exception_only(exctype, value): l = line.replace('\n', '') log.error(l) y = np.ones_like(x) * np.nan ax1.plot(x, y, 'r-') ax1.annotate('INJECTED', xy=(0.98, 0.025), xycoords='axes fraction', ha='right', va='bottom', fontsize=10, alpha=0.5, fontweight='bold') ax1.annotate('True depth:\nRecovered depth:', xy=(0.02, 0.025), xycoords='axes fraction', ha='left', va='bottom', fontsize=6, color='r') ax1.annotate('%.6f\n%.6f' % (self.inject['depth'], self.inject['rec_depth']), xy=(0.4, 0.025), xycoords='axes fraction', ha='left', va='bottom', fontsize=6, color='r') ax1.margins(0, None) ax1.ticklabel_format(useOffset=False) # Plot the recovered folded transits (control) ax2.plot(self.inject['fold_time_control'], self.inject['fold_flux_control'], 'k.', alpha=0.3) x = np.linspace(np.min(self.inject['fold_time_control']), np.max( self.inject['fold_time_control']), 500) try: y = Transit( x, t0=0., per=self.inject['per'], dur=self.inject['dur'], depth=self.inject['rec_depth_control']) except: # Log the error, and carry on exctype, value, tb = sys.exc_info() for line in traceback.format_exception_only(exctype, value): l = line.replace('\n', '') log.error(l) y = np.ones_like(x) * np.nan ax2.plot(x, y, 'r-') ax2.annotate('CONTROL', xy=(0.98, 0.025), xycoords='axes fraction', ha='right', va='bottom', fontsize=10, alpha=0.5, fontweight='bold') ax2.annotate('True depth:\nRecovered depth:', xy=(0.02, 0.025), xycoords='axes fraction', ha='left', va='bottom', fontsize=6, color='r') ax2.annotate('%.6f\n%.6f' % (self.inject['depth'], self.inject['rec_depth_control']), xy=(0.4, 0.025), xycoords='axes fraction', ha='left', va='bottom', fontsize=6, color='r') ax2.margins(0, None) ax2.ticklabel_format(useOffset=False) N = int(0.995 * len(self.inject['fold_flux_control'])) hi, lo = self.inject['fold_flux_control'][np.argsort( self.inject['fold_flux_control'])][[N, -N]] fsort = self.inject['fold_flux_control'][np.argsort( self.inject['fold_flux_control'])] pad = (hi - lo) * 0.2 ylim = (lo - 2 * pad, hi + pad) ax2.set_ylim(ylim) ax1.set_ylim(ylim) ax2.set_yticklabels([]) for tick in ax1.get_xticklabels() + ax1.get_yticklabels() + \ ax2.get_xticklabels(): tick.set_fontsize(5) def finalize(self): ''' Calls the depth recovery routine at the end of the de-trending step. ''' super(Injection, self).finalize() self.recover_depth() return Injection(ID, inject=inject, parent_class=inj_model, make_fits=make_fits, **kwargs)
python
def Inject(ID, inj_model='nPLD', t0=None, per=None, dur=0.1, depth=0.001, mask=False, trn_win=5, poly_order=3, make_fits=False, **kwargs): ''' Run one of the :py:obj:`everest` models with injected transits and attempt to recover the transit depth at the end with a simple linear regression with a polynomial baseline. The depth is stored in the :py:obj:`inject` attribute of the model (a dictionary) as :py:obj:`rec_depth`. A control injection is also performed, in which the transits are injected into the de-trended data; the recovered depth in the control run is stored in :py:obj:`inject` as :py:obj:`rec_depth_control`. :param int ID: The target id :param str inj_model: The name of the :py:obj:`everest` model to run. \ Default `"nPLD"` :param float t0: The transit ephemeris in days. Default is to draw from \ the uniform distributon [0., :py:obj:`per`) :param float per: The injected planet period in days. Default is to draw \ from the uniform distribution [2, 10] :param float dur: The transit duration in days. Must be in the range \ [0.05, 0.5]. Default 0.1 :param float depth: The fractional transit depth. Default 0.001 :param bool mask: Explicitly mask the in-transit cadences when computing \ the PLD model? Default :py:obj:`False` :param float trn_win: The size of the transit window in units of the \ transit duration :param int poly_order: The order of the polynomial used to fit the \ continuum ''' # Randomize the planet params if per is None: a = 3. b = 10. per = a + (b - a) * np.random.random() if t0 is None: t0 = per * np.random.random() # Get the actual class _model = eval(inj_model) inject = {'t0': t0, 'per': per, 'dur': dur, 'depth': depth, 'mask': mask, 'poly_order': poly_order, 'trn_win': trn_win} # Define the injection class class Injection(_model): ''' The :py:obj:`Injection` class is a special subclass of a user-selected :py:obj:`everest` model. See :py:func:`Inject` for more details. ''' def __init__(self, *args, **kwargs): ''' ''' self.inject = kwargs.pop('inject', None) self.parent_class = kwargs.pop('parent_class', None) self.kwargs = kwargs super(Injection, self).__init__(*args, **kwargs) @property def name(self): ''' ''' if self.inject['mask']: maskchar = 'M' else: maskchar = 'U' return '%s_Inject_%s%g' % (self.parent_class, maskchar, self.inject['depth']) def load_tpf(self): ''' Loads the target pixel files and injects transits at the pixel level. ''' # Load the TPF super(Injection, self).load_tpf() log.info("Injecting transits...") # Inject the transits into the regular data transit_model = Transit( self.time, t0=self.inject['t0'], per=self.inject['per'], dur=self.inject['dur'], depth=self.inject['depth']) for i in range(self.fpix.shape[1]): self.fpix[:, i] *= transit_model self.fraw = np.sum(self.fpix, axis=1) if self.inject['mask']: self.transitmask = np.array(list(set(np.concatenate( [self.transitmask, np.where(transit_model < 1.)[0]]))), dtype=int) # Update the PLD normalization self.get_norm() def recover_depth(self): ''' Recovers the injected transit depth from the long cadence data with a simple LLS solver. The results are all stored in the :py:obj:`inject` attribute of the model. ''' # Control run transit_model = Transit( self.time, t0=self.inject['t0'], per=self.inject['per'], dur=self.inject['dur'], depth=self.inject['depth']) kwargs = dict(self.kwargs) kwargs.update({'clobber': False}) control = eval(self.parent_class)( self.ID, is_parent=True, **kwargs) control.fraw *= transit_model # Get params log.info("Recovering transit depth...") t0 = self.inject['t0'] per = self.inject['per'] dur = self.inject['dur'] depth = self.inject['depth'] trn_win = self.inject['trn_win'] poly_order = self.inject['poly_order'] for run, tag in zip([self, control], ['', '_control']): # Compute the model mask = np.array( list(set(np.concatenate([run.badmask, run.nanmask]))), dtype=int) flux = np.delete(run.flux / np.nanmedian(run.flux), mask) time = np.delete(run.time, mask) transit_model = (Transit(time, t0=t0, per=per, dur=dur, depth=depth) - 1) / depth # Count the transits t0 += np.ceil((time[0] - dur - t0) / per) * per ttimes0 = np.arange(t0, time[-1] + dur, per) tinds = [] for tt in ttimes0: # Get indices for this chunk inds = np.where(np.abs(time - tt) < trn_win * dur / 2.)[0] # Ensure there's a transit in this chunk, and that # there are enough points for the polynomial fit if np.any(transit_model[inds] < 0.) and \ len(inds) > poly_order: tinds.append(inds) # Our design matrix sz = (poly_order + 1) * len(tinds) X = np.empty((0, 1 + sz), dtype=float) Y = np.array([], dtype=float) T = np.array([], dtype=float) # Loop over all transits for i, inds in enumerate(tinds): # Get the transit model trnvec = transit_model[inds].reshape(-1, 1) # Normalize the time array t = time[inds] t = (t - t[0]) / (t[-1] - t[0]) # Cumulative arrays T = np.append(T, time[inds]) Y = np.append(Y, flux[inds]) # Polynomial vector polyvec = np.array( [t ** o for o in range(0, poly_order + 1)]).T # Update the design matrix with this chunk lzeros = np.zeros((len(t), i * (poly_order + 1))) rzeros = np.zeros( (len(t), sz - (i + 1) * (poly_order + 1))) chunk = np.hstack((trnvec, lzeros, polyvec, rzeros)) X = np.vstack((X, chunk)) # Get the relative depth A = np.dot(X.T, X) B = np.dot(X.T, Y) C = np.linalg.solve(A, B) rec_depth = C[0] # Get the uncertainties sig = 1.4826 * \ np.nanmedian(np.abs(flux - np.nanmedian(flux)) ) / np.nanmedian(flux) cov = sig ** 2 * np.linalg.solve(A, np.eye(A.shape[0])) err = np.sqrt(np.diag(cov)) rec_depth_err = err[0] # Store the results self.inject.update( {'rec_depth%s' % tag: rec_depth, 'rec_depth_err%s' % tag: rec_depth_err}) # Store the detrended, folded data D = (Y - np.dot(C[1:], X[:, 1:].T) + np.nanmedian(Y)) / np.nanmedian(Y) T = (T - t0 - per / 2.) % per - per / 2. self.inject.update( {'fold_time%s' % tag: T, 'fold_flux%s' % tag: D}) def plot_final(self, ax): ''' Plots the injection recovery results. ''' from mpl_toolkits.axes_grid.inset_locator import inset_axes ax.axis('off') ax1 = inset_axes(ax, width="47%", height="100%", loc=6) ax2 = inset_axes(ax, width="47%", height="100%", loc=7) # Plot the recovered folded transits ax1.plot(self.inject['fold_time'], self.inject['fold_flux'], 'k.', alpha=0.3) x = np.linspace(np.min(self.inject['fold_time']), np.max( self.inject['fold_time']), 500) try: y = Transit( x, t0=0., per=self.inject['per'], dur=self.inject['dur'], depth=self.inject['rec_depth']) except: # Log the error, and carry on exctype, value, tb = sys.exc_info() for line in traceback.format_exception_only(exctype, value): l = line.replace('\n', '') log.error(l) y = np.ones_like(x) * np.nan ax1.plot(x, y, 'r-') ax1.annotate('INJECTED', xy=(0.98, 0.025), xycoords='axes fraction', ha='right', va='bottom', fontsize=10, alpha=0.5, fontweight='bold') ax1.annotate('True depth:\nRecovered depth:', xy=(0.02, 0.025), xycoords='axes fraction', ha='left', va='bottom', fontsize=6, color='r') ax1.annotate('%.6f\n%.6f' % (self.inject['depth'], self.inject['rec_depth']), xy=(0.4, 0.025), xycoords='axes fraction', ha='left', va='bottom', fontsize=6, color='r') ax1.margins(0, None) ax1.ticklabel_format(useOffset=False) # Plot the recovered folded transits (control) ax2.plot(self.inject['fold_time_control'], self.inject['fold_flux_control'], 'k.', alpha=0.3) x = np.linspace(np.min(self.inject['fold_time_control']), np.max( self.inject['fold_time_control']), 500) try: y = Transit( x, t0=0., per=self.inject['per'], dur=self.inject['dur'], depth=self.inject['rec_depth_control']) except: # Log the error, and carry on exctype, value, tb = sys.exc_info() for line in traceback.format_exception_only(exctype, value): l = line.replace('\n', '') log.error(l) y = np.ones_like(x) * np.nan ax2.plot(x, y, 'r-') ax2.annotate('CONTROL', xy=(0.98, 0.025), xycoords='axes fraction', ha='right', va='bottom', fontsize=10, alpha=0.5, fontweight='bold') ax2.annotate('True depth:\nRecovered depth:', xy=(0.02, 0.025), xycoords='axes fraction', ha='left', va='bottom', fontsize=6, color='r') ax2.annotate('%.6f\n%.6f' % (self.inject['depth'], self.inject['rec_depth_control']), xy=(0.4, 0.025), xycoords='axes fraction', ha='left', va='bottom', fontsize=6, color='r') ax2.margins(0, None) ax2.ticklabel_format(useOffset=False) N = int(0.995 * len(self.inject['fold_flux_control'])) hi, lo = self.inject['fold_flux_control'][np.argsort( self.inject['fold_flux_control'])][[N, -N]] fsort = self.inject['fold_flux_control'][np.argsort( self.inject['fold_flux_control'])] pad = (hi - lo) * 0.2 ylim = (lo - 2 * pad, hi + pad) ax2.set_ylim(ylim) ax1.set_ylim(ylim) ax2.set_yticklabels([]) for tick in ax1.get_xticklabels() + ax1.get_yticklabels() + \ ax2.get_xticklabels(): tick.set_fontsize(5) def finalize(self): ''' Calls the depth recovery routine at the end of the de-trending step. ''' super(Injection, self).finalize() self.recover_depth() return Injection(ID, inject=inject, parent_class=inj_model, make_fits=make_fits, **kwargs)
[ "def", "Inject", "(", "ID", ",", "inj_model", "=", "'nPLD'", ",", "t0", "=", "None", ",", "per", "=", "None", ",", "dur", "=", "0.1", ",", "depth", "=", "0.001", ",", "mask", "=", "False", ",", "trn_win", "=", "5", ",", "poly_order", "=", "3", ",", "make_fits", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# Randomize the planet params", "if", "per", "is", "None", ":", "a", "=", "3.", "b", "=", "10.", "per", "=", "a", "+", "(", "b", "-", "a", ")", "*", "np", ".", "random", ".", "random", "(", ")", "if", "t0", "is", "None", ":", "t0", "=", "per", "*", "np", ".", "random", ".", "random", "(", ")", "# Get the actual class", "_model", "=", "eval", "(", "inj_model", ")", "inject", "=", "{", "'t0'", ":", "t0", ",", "'per'", ":", "per", ",", "'dur'", ":", "dur", ",", "'depth'", ":", "depth", ",", "'mask'", ":", "mask", ",", "'poly_order'", ":", "poly_order", ",", "'trn_win'", ":", "trn_win", "}", "# Define the injection class", "class", "Injection", "(", "_model", ")", ":", "'''\n The :py:obj:`Injection` class is a special subclass of a\n user-selected :py:obj:`everest` model.\n See :py:func:`Inject` for more details.\n\n '''", "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "'''\n\n '''", "self", ".", "inject", "=", "kwargs", ".", "pop", "(", "'inject'", ",", "None", ")", "self", ".", "parent_class", "=", "kwargs", ".", "pop", "(", "'parent_class'", ",", "None", ")", "self", ".", "kwargs", "=", "kwargs", "super", "(", "Injection", ",", "self", ")", ".", "__init__", "(", "*", "args", ",", "*", "*", "kwargs", ")", "@", "property", "def", "name", "(", "self", ")", ":", "'''\n\n '''", "if", "self", ".", "inject", "[", "'mask'", "]", ":", "maskchar", "=", "'M'", "else", ":", "maskchar", "=", "'U'", "return", "'%s_Inject_%s%g'", "%", "(", "self", ".", "parent_class", ",", "maskchar", ",", "self", ".", "inject", "[", "'depth'", "]", ")", "def", "load_tpf", "(", "self", ")", ":", "'''\n Loads the target pixel files and injects transits at the pixel level.\n\n '''", "# Load the TPF", "super", "(", "Injection", ",", "self", ")", ".", "load_tpf", "(", ")", "log", ".", "info", "(", "\"Injecting transits...\"", ")", "# Inject the transits into the regular data", "transit_model", "=", "Transit", "(", "self", ".", "time", ",", "t0", "=", "self", ".", "inject", "[", "'t0'", "]", ",", "per", "=", "self", ".", "inject", "[", "'per'", "]", ",", "dur", "=", "self", ".", "inject", "[", "'dur'", "]", ",", "depth", "=", "self", ".", "inject", "[", "'depth'", "]", ")", "for", "i", "in", "range", "(", "self", ".", "fpix", ".", "shape", "[", "1", "]", ")", ":", "self", ".", "fpix", "[", ":", ",", "i", "]", "*=", "transit_model", "self", ".", "fraw", "=", "np", ".", "sum", "(", "self", ".", "fpix", ",", "axis", "=", "1", ")", "if", "self", ".", "inject", "[", "'mask'", "]", ":", "self", ".", "transitmask", "=", "np", ".", "array", "(", "list", "(", "set", "(", "np", ".", "concatenate", "(", "[", "self", ".", "transitmask", ",", "np", ".", "where", "(", "transit_model", "<", "1.", ")", "[", "0", "]", "]", ")", ")", ")", ",", "dtype", "=", "int", ")", "# Update the PLD normalization", "self", ".", "get_norm", "(", ")", "def", "recover_depth", "(", "self", ")", ":", "'''\n Recovers the injected transit depth from the long\n cadence data with a simple LLS solver.\n The results are all stored in the :py:obj:`inject`\n attribute of the model.\n\n '''", "# Control run", "transit_model", "=", "Transit", "(", "self", ".", "time", ",", "t0", "=", "self", ".", "inject", "[", "'t0'", "]", ",", "per", "=", "self", ".", "inject", "[", "'per'", "]", ",", "dur", "=", "self", ".", "inject", "[", "'dur'", "]", ",", "depth", "=", "self", ".", "inject", "[", "'depth'", "]", ")", "kwargs", "=", "dict", "(", "self", ".", "kwargs", ")", "kwargs", ".", "update", "(", "{", "'clobber'", ":", "False", "}", ")", "control", "=", "eval", "(", "self", ".", "parent_class", ")", "(", "self", ".", "ID", ",", "is_parent", "=", "True", ",", "*", "*", "kwargs", ")", "control", ".", "fraw", "*=", "transit_model", "# Get params", "log", ".", "info", "(", "\"Recovering transit depth...\"", ")", "t0", "=", "self", ".", "inject", "[", "'t0'", "]", "per", "=", "self", ".", "inject", "[", "'per'", "]", "dur", "=", "self", ".", "inject", "[", "'dur'", "]", "depth", "=", "self", ".", "inject", "[", "'depth'", "]", "trn_win", "=", "self", ".", "inject", "[", "'trn_win'", "]", "poly_order", "=", "self", ".", "inject", "[", "'poly_order'", "]", "for", "run", ",", "tag", "in", "zip", "(", "[", "self", ",", "control", "]", ",", "[", "''", ",", "'_control'", "]", ")", ":", "# Compute the model", "mask", "=", "np", ".", "array", "(", "list", "(", "set", "(", "np", ".", "concatenate", "(", "[", "run", ".", "badmask", ",", "run", ".", "nanmask", "]", ")", ")", ")", ",", "dtype", "=", "int", ")", "flux", "=", "np", ".", "delete", "(", "run", ".", "flux", "/", "np", ".", "nanmedian", "(", "run", ".", "flux", ")", ",", "mask", ")", "time", "=", "np", ".", "delete", "(", "run", ".", "time", ",", "mask", ")", "transit_model", "=", "(", "Transit", "(", "time", ",", "t0", "=", "t0", ",", "per", "=", "per", ",", "dur", "=", "dur", ",", "depth", "=", "depth", ")", "-", "1", ")", "/", "depth", "# Count the transits", "t0", "+=", "np", ".", "ceil", "(", "(", "time", "[", "0", "]", "-", "dur", "-", "t0", ")", "/", "per", ")", "*", "per", "ttimes0", "=", "np", ".", "arange", "(", "t0", ",", "time", "[", "-", "1", "]", "+", "dur", ",", "per", ")", "tinds", "=", "[", "]", "for", "tt", "in", "ttimes0", ":", "# Get indices for this chunk", "inds", "=", "np", ".", "where", "(", "np", ".", "abs", "(", "time", "-", "tt", ")", "<", "trn_win", "*", "dur", "/", "2.", ")", "[", "0", "]", "# Ensure there's a transit in this chunk, and that", "# there are enough points for the polynomial fit", "if", "np", ".", "any", "(", "transit_model", "[", "inds", "]", "<", "0.", ")", "and", "len", "(", "inds", ")", ">", "poly_order", ":", "tinds", ".", "append", "(", "inds", ")", "# Our design matrix", "sz", "=", "(", "poly_order", "+", "1", ")", "*", "len", "(", "tinds", ")", "X", "=", "np", ".", "empty", "(", "(", "0", ",", "1", "+", "sz", ")", ",", "dtype", "=", "float", ")", "Y", "=", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "float", ")", "T", "=", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "float", ")", "# Loop over all transits", "for", "i", ",", "inds", "in", "enumerate", "(", "tinds", ")", ":", "# Get the transit model", "trnvec", "=", "transit_model", "[", "inds", "]", ".", "reshape", "(", "-", "1", ",", "1", ")", "# Normalize the time array", "t", "=", "time", "[", "inds", "]", "t", "=", "(", "t", "-", "t", "[", "0", "]", ")", "/", "(", "t", "[", "-", "1", "]", "-", "t", "[", "0", "]", ")", "# Cumulative arrays", "T", "=", "np", ".", "append", "(", "T", ",", "time", "[", "inds", "]", ")", "Y", "=", "np", ".", "append", "(", "Y", ",", "flux", "[", "inds", "]", ")", "# Polynomial vector", "polyvec", "=", "np", ".", "array", "(", "[", "t", "**", "o", "for", "o", "in", "range", "(", "0", ",", "poly_order", "+", "1", ")", "]", ")", ".", "T", "# Update the design matrix with this chunk", "lzeros", "=", "np", ".", "zeros", "(", "(", "len", "(", "t", ")", ",", "i", "*", "(", "poly_order", "+", "1", ")", ")", ")", "rzeros", "=", "np", ".", "zeros", "(", "(", "len", "(", "t", ")", ",", "sz", "-", "(", "i", "+", "1", ")", "*", "(", "poly_order", "+", "1", ")", ")", ")", "chunk", "=", "np", ".", "hstack", "(", "(", "trnvec", ",", "lzeros", ",", "polyvec", ",", "rzeros", ")", ")", "X", "=", "np", ".", "vstack", "(", "(", "X", ",", "chunk", ")", ")", "# Get the relative depth", "A", "=", "np", ".", "dot", "(", "X", ".", "T", ",", "X", ")", "B", "=", "np", ".", "dot", "(", "X", ".", "T", ",", "Y", ")", "C", "=", "np", ".", "linalg", ".", "solve", "(", "A", ",", "B", ")", "rec_depth", "=", "C", "[", "0", "]", "# Get the uncertainties", "sig", "=", "1.4826", "*", "np", ".", "nanmedian", "(", "np", ".", "abs", "(", "flux", "-", "np", ".", "nanmedian", "(", "flux", ")", ")", ")", "/", "np", ".", "nanmedian", "(", "flux", ")", "cov", "=", "sig", "**", "2", "*", "np", ".", "linalg", ".", "solve", "(", "A", ",", "np", ".", "eye", "(", "A", ".", "shape", "[", "0", "]", ")", ")", "err", "=", "np", ".", "sqrt", "(", "np", ".", "diag", "(", "cov", ")", ")", "rec_depth_err", "=", "err", "[", "0", "]", "# Store the results", "self", ".", "inject", ".", "update", "(", "{", "'rec_depth%s'", "%", "tag", ":", "rec_depth", ",", "'rec_depth_err%s'", "%", "tag", ":", "rec_depth_err", "}", ")", "# Store the detrended, folded data", "D", "=", "(", "Y", "-", "np", ".", "dot", "(", "C", "[", "1", ":", "]", ",", "X", "[", ":", ",", "1", ":", "]", ".", "T", ")", "+", "np", ".", "nanmedian", "(", "Y", ")", ")", "/", "np", ".", "nanmedian", "(", "Y", ")", "T", "=", "(", "T", "-", "t0", "-", "per", "/", "2.", ")", "%", "per", "-", "per", "/", "2.", "self", ".", "inject", ".", "update", "(", "{", "'fold_time%s'", "%", "tag", ":", "T", ",", "'fold_flux%s'", "%", "tag", ":", "D", "}", ")", "def", "plot_final", "(", "self", ",", "ax", ")", ":", "'''\n Plots the injection recovery results.\n\n '''", "from", "mpl_toolkits", ".", "axes_grid", ".", "inset_locator", "import", "inset_axes", "ax", ".", "axis", "(", "'off'", ")", "ax1", "=", "inset_axes", "(", "ax", ",", "width", "=", "\"47%\"", ",", "height", "=", "\"100%\"", ",", "loc", "=", "6", ")", "ax2", "=", "inset_axes", "(", "ax", ",", "width", "=", "\"47%\"", ",", "height", "=", "\"100%\"", ",", "loc", "=", "7", ")", "# Plot the recovered folded transits", "ax1", ".", "plot", "(", "self", ".", "inject", "[", "'fold_time'", "]", ",", "self", ".", "inject", "[", "'fold_flux'", "]", ",", "'k.'", ",", "alpha", "=", "0.3", ")", "x", "=", "np", ".", "linspace", "(", "np", ".", "min", "(", "self", ".", "inject", "[", "'fold_time'", "]", ")", ",", "np", ".", "max", "(", "self", ".", "inject", "[", "'fold_time'", "]", ")", ",", "500", ")", "try", ":", "y", "=", "Transit", "(", "x", ",", "t0", "=", "0.", ",", "per", "=", "self", ".", "inject", "[", "'per'", "]", ",", "dur", "=", "self", ".", "inject", "[", "'dur'", "]", ",", "depth", "=", "self", ".", "inject", "[", "'rec_depth'", "]", ")", "except", ":", "# Log the error, and carry on", "exctype", ",", "value", ",", "tb", "=", "sys", ".", "exc_info", "(", ")", "for", "line", "in", "traceback", ".", "format_exception_only", "(", "exctype", ",", "value", ")", ":", "l", "=", "line", ".", "replace", "(", "'\\n'", ",", "''", ")", "log", ".", "error", "(", "l", ")", "y", "=", "np", ".", "ones_like", "(", "x", ")", "*", "np", ".", "nan", "ax1", ".", "plot", "(", "x", ",", "y", ",", "'r-'", ")", "ax1", ".", "annotate", "(", "'INJECTED'", ",", "xy", "=", "(", "0.98", ",", "0.025", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'right'", ",", "va", "=", "'bottom'", ",", "fontsize", "=", "10", ",", "alpha", "=", "0.5", ",", "fontweight", "=", "'bold'", ")", "ax1", ".", "annotate", "(", "'True depth:\\nRecovered depth:'", ",", "xy", "=", "(", "0.02", ",", "0.025", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'left'", ",", "va", "=", "'bottom'", ",", "fontsize", "=", "6", ",", "color", "=", "'r'", ")", "ax1", ".", "annotate", "(", "'%.6f\\n%.6f'", "%", "(", "self", ".", "inject", "[", "'depth'", "]", ",", "self", ".", "inject", "[", "'rec_depth'", "]", ")", ",", "xy", "=", "(", "0.4", ",", "0.025", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'left'", ",", "va", "=", "'bottom'", ",", "fontsize", "=", "6", ",", "color", "=", "'r'", ")", "ax1", ".", "margins", "(", "0", ",", "None", ")", "ax1", ".", "ticklabel_format", "(", "useOffset", "=", "False", ")", "# Plot the recovered folded transits (control)", "ax2", ".", "plot", "(", "self", ".", "inject", "[", "'fold_time_control'", "]", ",", "self", ".", "inject", "[", "'fold_flux_control'", "]", ",", "'k.'", ",", "alpha", "=", "0.3", ")", "x", "=", "np", ".", "linspace", "(", "np", ".", "min", "(", "self", ".", "inject", "[", "'fold_time_control'", "]", ")", ",", "np", ".", "max", "(", "self", ".", "inject", "[", "'fold_time_control'", "]", ")", ",", "500", ")", "try", ":", "y", "=", "Transit", "(", "x", ",", "t0", "=", "0.", ",", "per", "=", "self", ".", "inject", "[", "'per'", "]", ",", "dur", "=", "self", ".", "inject", "[", "'dur'", "]", ",", "depth", "=", "self", ".", "inject", "[", "'rec_depth_control'", "]", ")", "except", ":", "# Log the error, and carry on", "exctype", ",", "value", ",", "tb", "=", "sys", ".", "exc_info", "(", ")", "for", "line", "in", "traceback", ".", "format_exception_only", "(", "exctype", ",", "value", ")", ":", "l", "=", "line", ".", "replace", "(", "'\\n'", ",", "''", ")", "log", ".", "error", "(", "l", ")", "y", "=", "np", ".", "ones_like", "(", "x", ")", "*", "np", ".", "nan", "ax2", ".", "plot", "(", "x", ",", "y", ",", "'r-'", ")", "ax2", ".", "annotate", "(", "'CONTROL'", ",", "xy", "=", "(", "0.98", ",", "0.025", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'right'", ",", "va", "=", "'bottom'", ",", "fontsize", "=", "10", ",", "alpha", "=", "0.5", ",", "fontweight", "=", "'bold'", ")", "ax2", ".", "annotate", "(", "'True depth:\\nRecovered depth:'", ",", "xy", "=", "(", "0.02", ",", "0.025", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'left'", ",", "va", "=", "'bottom'", ",", "fontsize", "=", "6", ",", "color", "=", "'r'", ")", "ax2", ".", "annotate", "(", "'%.6f\\n%.6f'", "%", "(", "self", ".", "inject", "[", "'depth'", "]", ",", "self", ".", "inject", "[", "'rec_depth_control'", "]", ")", ",", "xy", "=", "(", "0.4", ",", "0.025", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'left'", ",", "va", "=", "'bottom'", ",", "fontsize", "=", "6", ",", "color", "=", "'r'", ")", "ax2", ".", "margins", "(", "0", ",", "None", ")", "ax2", ".", "ticklabel_format", "(", "useOffset", "=", "False", ")", "N", "=", "int", "(", "0.995", "*", "len", "(", "self", ".", "inject", "[", "'fold_flux_control'", "]", ")", ")", "hi", ",", "lo", "=", "self", ".", "inject", "[", "'fold_flux_control'", "]", "[", "np", ".", "argsort", "(", "self", ".", "inject", "[", "'fold_flux_control'", "]", ")", "]", "[", "[", "N", ",", "-", "N", "]", "]", "fsort", "=", "self", ".", "inject", "[", "'fold_flux_control'", "]", "[", "np", ".", "argsort", "(", "self", ".", "inject", "[", "'fold_flux_control'", "]", ")", "]", "pad", "=", "(", "hi", "-", "lo", ")", "*", "0.2", "ylim", "=", "(", "lo", "-", "2", "*", "pad", ",", "hi", "+", "pad", ")", "ax2", ".", "set_ylim", "(", "ylim", ")", "ax1", ".", "set_ylim", "(", "ylim", ")", "ax2", ".", "set_yticklabels", "(", "[", "]", ")", "for", "tick", "in", "ax1", ".", "get_xticklabels", "(", ")", "+", "ax1", ".", "get_yticklabels", "(", ")", "+", "ax2", ".", "get_xticklabels", "(", ")", ":", "tick", ".", "set_fontsize", "(", "5", ")", "def", "finalize", "(", "self", ")", ":", "'''\n Calls the depth recovery routine at the end\n of the de-trending step.\n\n '''", "super", "(", "Injection", ",", "self", ")", ".", "finalize", "(", ")", "self", ".", "recover_depth", "(", ")", "return", "Injection", "(", "ID", ",", "inject", "=", "inject", ",", "parent_class", "=", "inj_model", ",", "make_fits", "=", "make_fits", ",", "*", "*", "kwargs", ")" ]
Run one of the :py:obj:`everest` models with injected transits and attempt to recover the transit depth at the end with a simple linear regression with a polynomial baseline. The depth is stored in the :py:obj:`inject` attribute of the model (a dictionary) as :py:obj:`rec_depth`. A control injection is also performed, in which the transits are injected into the de-trended data; the recovered depth in the control run is stored in :py:obj:`inject` as :py:obj:`rec_depth_control`. :param int ID: The target id :param str inj_model: The name of the :py:obj:`everest` model to run. \ Default `"nPLD"` :param float t0: The transit ephemeris in days. Default is to draw from \ the uniform distributon [0., :py:obj:`per`) :param float per: The injected planet period in days. Default is to draw \ from the uniform distribution [2, 10] :param float dur: The transit duration in days. Must be in the range \ [0.05, 0.5]. Default 0.1 :param float depth: The fractional transit depth. Default 0.001 :param bool mask: Explicitly mask the in-transit cadences when computing \ the PLD model? Default :py:obj:`False` :param float trn_win: The size of the transit window in units of the \ transit duration :param int poly_order: The order of the polynomial used to fit the \ continuum
[ "Run", "one", "of", "the", ":", "py", ":", "obj", ":", "everest", "models", "with", "injected", "transits", "and", "attempt", "to", "recover", "the", "transit", "depth", "at", "the", "end", "with", "a", "simple", "linear", "regression", "with", "a", "polynomial", "baseline", ".", "The", "depth", "is", "stored", "in", "the", ":", "py", ":", "obj", ":", "inject", "attribute", "of", "the", "model", "(", "a", "dictionary", ")", "as", ":", "py", ":", "obj", ":", "rec_depth", ".", "A", "control", "injection", "is", "also", "performed", "in", "which", "the", "transits", "are", "injected", "into", "the", "de", "-", "trended", "data", ";", "the", "recovered", "depth", "in", "the", "control", "run", "is", "stored", "in", ":", "py", ":", "obj", ":", "inject", "as", ":", "py", ":", "obj", ":", "rec_depth_control", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/inject.py#L28-L334
lsbardel/python-stdnet
stdnet/apps/searchengine/models.py
WordItem.object
def object(self, session): '''Instance of :attr:`model_type` with id :attr:`object_id`.''' if not hasattr(self, '_object'): pkname = self.model_type._meta.pkname() query = session.query(self.model_type).filter(**{pkname: self.object_id}) return query.items(callback=self.__set_object) else: return self._object
python
def object(self, session): '''Instance of :attr:`model_type` with id :attr:`object_id`.''' if not hasattr(self, '_object'): pkname = self.model_type._meta.pkname() query = session.query(self.model_type).filter(**{pkname: self.object_id}) return query.items(callback=self.__set_object) else: return self._object
[ "def", "object", "(", "self", ",", "session", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_object'", ")", ":", "pkname", "=", "self", ".", "model_type", ".", "_meta", ".", "pkname", "(", ")", "query", "=", "session", ".", "query", "(", "self", ".", "model_type", ")", ".", "filter", "(", "*", "*", "{", "pkname", ":", "self", ".", "object_id", "}", ")", "return", "query", ".", "items", "(", "callback", "=", "self", ".", "__set_object", ")", "else", ":", "return", "self", ".", "_object" ]
Instance of :attr:`model_type` with id :attr:`object_id`.
[ "Instance", "of", ":", "attr", ":", "model_type", "with", "id", ":", "attr", ":", "object_id", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/apps/searchengine/models.py#L36-L44
rodluger/everest
everest/missions/tess/tess.py
GetData
def GetData(ID, season = None, cadence = 'lc', clobber = False, delete_raw = False, aperture_name = None, saturated_aperture_name = None, max_pixels = None, download_only = False, saturation_tolerance = None, bad_bits = None, **kwargs): ''' Returns a :py:obj:`DataContainer` instance with the raw data for the target. :param int ID: The target ID number :param int season: The observing season. Default :py:obj:`None` :param str cadence: The light curve cadence. Default `lc` :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param bool delete_raw: Delete the FITS TPF after processing it? Default :py:obj:`False` :param str aperture_name: The name of the aperture to use. Select `custom` to call \ :py:func:`GetCustomAperture`. Default :py:obj:`None` :param str saturated_aperture_name: The name of the aperture to use if the target is \ saturated. Default :py:obj:`None` :param int max_pixels: Maximum number of pixels in the TPF. Default :py:obj:`None` :param bool download_only: Download raw TPF and return? Default :py:obj:`False` :param float saturation_tolerance: Target is considered saturated if flux is within \ this fraction of the pixel well depth. Default :py:obj:`None` :param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider outliers when \ computing the model. Default :py:obj:`None` ''' raise NotImplementedError('This mission is not yet supported.')
python
def GetData(ID, season = None, cadence = 'lc', clobber = False, delete_raw = False, aperture_name = None, saturated_aperture_name = None, max_pixels = None, download_only = False, saturation_tolerance = None, bad_bits = None, **kwargs): ''' Returns a :py:obj:`DataContainer` instance with the raw data for the target. :param int ID: The target ID number :param int season: The observing season. Default :py:obj:`None` :param str cadence: The light curve cadence. Default `lc` :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param bool delete_raw: Delete the FITS TPF after processing it? Default :py:obj:`False` :param str aperture_name: The name of the aperture to use. Select `custom` to call \ :py:func:`GetCustomAperture`. Default :py:obj:`None` :param str saturated_aperture_name: The name of the aperture to use if the target is \ saturated. Default :py:obj:`None` :param int max_pixels: Maximum number of pixels in the TPF. Default :py:obj:`None` :param bool download_only: Download raw TPF and return? Default :py:obj:`False` :param float saturation_tolerance: Target is considered saturated if flux is within \ this fraction of the pixel well depth. Default :py:obj:`None` :param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider outliers when \ computing the model. Default :py:obj:`None` ''' raise NotImplementedError('This mission is not yet supported.')
[ "def", "GetData", "(", "ID", ",", "season", "=", "None", ",", "cadence", "=", "'lc'", ",", "clobber", "=", "False", ",", "delete_raw", "=", "False", ",", "aperture_name", "=", "None", ",", "saturated_aperture_name", "=", "None", ",", "max_pixels", "=", "None", ",", "download_only", "=", "False", ",", "saturation_tolerance", "=", "None", ",", "bad_bits", "=", "None", ",", "*", "*", "kwargs", ")", ":", "raise", "NotImplementedError", "(", "'This mission is not yet supported.'", ")" ]
Returns a :py:obj:`DataContainer` instance with the raw data for the target. :param int ID: The target ID number :param int season: The observing season. Default :py:obj:`None` :param str cadence: The light curve cadence. Default `lc` :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param bool delete_raw: Delete the FITS TPF after processing it? Default :py:obj:`False` :param str aperture_name: The name of the aperture to use. Select `custom` to call \ :py:func:`GetCustomAperture`. Default :py:obj:`None` :param str saturated_aperture_name: The name of the aperture to use if the target is \ saturated. Default :py:obj:`None` :param int max_pixels: Maximum number of pixels in the TPF. Default :py:obj:`None` :param bool download_only: Download raw TPF and return? Default :py:obj:`False` :param float saturation_tolerance: Target is considered saturated if flux is within \ this fraction of the pixel well depth. Default :py:obj:`None` :param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider outliers when \ computing the model. Default :py:obj:`None`
[ "Returns", "a", ":", "py", ":", "obj", ":", "DataContainer", "instance", "with", "the", "raw", "data", "for", "the", "target", ".", ":", "param", "int", "ID", ":", "The", "target", "ID", "number", ":", "param", "int", "season", ":", "The", "observing", "season", ".", "Default", ":", "py", ":", "obj", ":", "None", ":", "param", "str", "cadence", ":", "The", "light", "curve", "cadence", ".", "Default", "lc", ":", "param", "bool", "clobber", ":", "Overwrite", "existing", "files?", "Default", ":", "py", ":", "obj", ":", "False", ":", "param", "bool", "delete_raw", ":", "Delete", "the", "FITS", "TPF", "after", "processing", "it?", "Default", ":", "py", ":", "obj", ":", "False", ":", "param", "str", "aperture_name", ":", "The", "name", "of", "the", "aperture", "to", "use", ".", "Select", "custom", "to", "call", "\\", ":", "py", ":", "func", ":", "GetCustomAperture", ".", "Default", ":", "py", ":", "obj", ":", "None", ":", "param", "str", "saturated_aperture_name", ":", "The", "name", "of", "the", "aperture", "to", "use", "if", "the", "target", "is", "\\", "saturated", ".", "Default", ":", "py", ":", "obj", ":", "None", ":", "param", "int", "max_pixels", ":", "Maximum", "number", "of", "pixels", "in", "the", "TPF", ".", "Default", ":", "py", ":", "obj", ":", "None", ":", "param", "bool", "download_only", ":", "Download", "raw", "TPF", "and", "return?", "Default", ":", "py", ":", "obj", ":", "False", ":", "param", "float", "saturation_tolerance", ":", "Target", "is", "considered", "saturated", "if", "flux", "is", "within", "\\", "this", "fraction", "of", "the", "pixel", "well", "depth", ".", "Default", ":", "py", ":", "obj", ":", "None", ":", "param", "array_like", "bad_bits", ":", "Flagged", ":", "py", ":", "obj", "QUALITY", "bits", "to", "consider", "outliers", "when", "\\", "computing", "the", "model", ".", "Default", ":", "py", ":", "obj", ":", "None" ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/tess/tess.py#L65-L90
rodluger/everest
everest/missions/tess/tess.py
GetNeighbors
def GetNeighbors(ID, model = None, neighbors = None, mag_range = None, cdpp_range = None, aperture_name = None, cadence = 'lc', **kwargs): ''' Return `neighbors` random bright stars on the same module as `EPIC`. :param int ID: The target ID number :param str model: The :py:obj:`everest` model name. Only used when imposing CDPP bounds. Default :py:obj:`None` :param int neighbors: Number of neighbors to return. Default None :param str aperture_name: The name of the aperture to use. Select `custom` to call \ :py:func:`GetCustomAperture`. Default :py:obj:`None` :param str cadence: The light curve cadence. Default `lc` :param tuple mag_range: (`low`, `high`) values for the Kepler magnitude. Default :py:obj:`None` :param tuple cdpp_range: (`low`, `high`) values for the de-trended CDPP. Default :py:obj:`None` ''' raise NotImplementedError('This mission is not yet supported.')
python
def GetNeighbors(ID, model = None, neighbors = None, mag_range = None, cdpp_range = None, aperture_name = None, cadence = 'lc', **kwargs): ''' Return `neighbors` random bright stars on the same module as `EPIC`. :param int ID: The target ID number :param str model: The :py:obj:`everest` model name. Only used when imposing CDPP bounds. Default :py:obj:`None` :param int neighbors: Number of neighbors to return. Default None :param str aperture_name: The name of the aperture to use. Select `custom` to call \ :py:func:`GetCustomAperture`. Default :py:obj:`None` :param str cadence: The light curve cadence. Default `lc` :param tuple mag_range: (`low`, `high`) values for the Kepler magnitude. Default :py:obj:`None` :param tuple cdpp_range: (`low`, `high`) values for the de-trended CDPP. Default :py:obj:`None` ''' raise NotImplementedError('This mission is not yet supported.')
[ "def", "GetNeighbors", "(", "ID", ",", "model", "=", "None", ",", "neighbors", "=", "None", ",", "mag_range", "=", "None", ",", "cdpp_range", "=", "None", ",", "aperture_name", "=", "None", ",", "cadence", "=", "'lc'", ",", "*", "*", "kwargs", ")", ":", "raise", "NotImplementedError", "(", "'This mission is not yet supported.'", ")" ]
Return `neighbors` random bright stars on the same module as `EPIC`. :param int ID: The target ID number :param str model: The :py:obj:`everest` model name. Only used when imposing CDPP bounds. Default :py:obj:`None` :param int neighbors: Number of neighbors to return. Default None :param str aperture_name: The name of the aperture to use. Select `custom` to call \ :py:func:`GetCustomAperture`. Default :py:obj:`None` :param str cadence: The light curve cadence. Default `lc` :param tuple mag_range: (`low`, `high`) values for the Kepler magnitude. Default :py:obj:`None` :param tuple cdpp_range: (`low`, `high`) values for the de-trended CDPP. Default :py:obj:`None`
[ "Return", "neighbors", "random", "bright", "stars", "on", "the", "same", "module", "as", "EPIC", ".", ":", "param", "int", "ID", ":", "The", "target", "ID", "number", ":", "param", "str", "model", ":", "The", ":", "py", ":", "obj", ":", "everest", "model", "name", ".", "Only", "used", "when", "imposing", "CDPP", "bounds", ".", "Default", ":", "py", ":", "obj", ":", "None", ":", "param", "int", "neighbors", ":", "Number", "of", "neighbors", "to", "return", ".", "Default", "None", ":", "param", "str", "aperture_name", ":", "The", "name", "of", "the", "aperture", "to", "use", ".", "Select", "custom", "to", "call", "\\", ":", "py", ":", "func", ":", "GetCustomAperture", ".", "Default", ":", "py", ":", "obj", ":", "None", ":", "param", "str", "cadence", ":", "The", "light", "curve", "cadence", ".", "Default", "lc", ":", "param", "tuple", "mag_range", ":", "(", "low", "high", ")", "values", "for", "the", "Kepler", "magnitude", ".", "Default", ":", "py", ":", "obj", ":", "None", ":", "param", "tuple", "cdpp_range", ":", "(", "low", "high", ")", "values", "for", "the", "de", "-", "trended", "CDPP", ".", "Default", ":", "py", ":", "obj", ":", "None" ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/tess/tess.py#L92-L109
rodluger/everest
everest/missions/k2/pipelines.py
get
def get(ID, pipeline='everest2', campaign=None): ''' Returns the `time` and `flux` for a given EPIC `ID` and a given `pipeline` name. ''' log.info('Downloading %s light curve for %d...' % (pipeline, ID)) # Dev version hack if EVEREST_DEV: if pipeline.lower() == 'everest2' or pipeline.lower() == 'k2sff': from . import Season, TargetDirectory, FITSFile if campaign is None: campaign = Season(ID) fits = os.path.join(TargetDirectory( ID, campaign), FITSFile(ID, campaign)) newdir = os.path.join(KPLR_ROOT, "data", "everest", str(ID)) if not os.path.exists(newdir): os.makedirs(newdir) if os.path.exists(fits): shutil.copy(fits, newdir) if pipeline.lower() == 'everest2': s = k2plr.EVEREST(ID, version=2, sci_campaign=campaign) time = s.time flux = s.flux elif pipeline.lower() == 'everest1': s = k2plr.EVEREST(ID, version=1, sci_campaign=campaign) time = s.time flux = s.flux elif pipeline.lower() == 'k2sff': s = k2plr.K2SFF(ID, sci_campaign=campaign) time = s.time flux = s.fcor # Normalize to the median flux s = k2plr.EVEREST(ID, version=2, sci_campaign=campaign) flux *= np.nanmedian(s.flux) elif pipeline.lower() == 'k2sc': s = k2plr.K2SC(ID, sci_campaign=campaign) time = s.time flux = s.pdcflux elif pipeline.lower() == 'raw': s = k2plr.EVEREST(ID, version=2, raw=True, sci_campaign=campaign) time = s.time flux = s.flux else: raise ValueError('Invalid pipeline: `%s`.' % pipeline) return time, flux
python
def get(ID, pipeline='everest2', campaign=None): ''' Returns the `time` and `flux` for a given EPIC `ID` and a given `pipeline` name. ''' log.info('Downloading %s light curve for %d...' % (pipeline, ID)) # Dev version hack if EVEREST_DEV: if pipeline.lower() == 'everest2' or pipeline.lower() == 'k2sff': from . import Season, TargetDirectory, FITSFile if campaign is None: campaign = Season(ID) fits = os.path.join(TargetDirectory( ID, campaign), FITSFile(ID, campaign)) newdir = os.path.join(KPLR_ROOT, "data", "everest", str(ID)) if not os.path.exists(newdir): os.makedirs(newdir) if os.path.exists(fits): shutil.copy(fits, newdir) if pipeline.lower() == 'everest2': s = k2plr.EVEREST(ID, version=2, sci_campaign=campaign) time = s.time flux = s.flux elif pipeline.lower() == 'everest1': s = k2plr.EVEREST(ID, version=1, sci_campaign=campaign) time = s.time flux = s.flux elif pipeline.lower() == 'k2sff': s = k2plr.K2SFF(ID, sci_campaign=campaign) time = s.time flux = s.fcor # Normalize to the median flux s = k2plr.EVEREST(ID, version=2, sci_campaign=campaign) flux *= np.nanmedian(s.flux) elif pipeline.lower() == 'k2sc': s = k2plr.K2SC(ID, sci_campaign=campaign) time = s.time flux = s.pdcflux elif pipeline.lower() == 'raw': s = k2plr.EVEREST(ID, version=2, raw=True, sci_campaign=campaign) time = s.time flux = s.flux else: raise ValueError('Invalid pipeline: `%s`.' % pipeline) return time, flux
[ "def", "get", "(", "ID", ",", "pipeline", "=", "'everest2'", ",", "campaign", "=", "None", ")", ":", "log", ".", "info", "(", "'Downloading %s light curve for %d...'", "%", "(", "pipeline", ",", "ID", ")", ")", "# Dev version hack", "if", "EVEREST_DEV", ":", "if", "pipeline", ".", "lower", "(", ")", "==", "'everest2'", "or", "pipeline", ".", "lower", "(", ")", "==", "'k2sff'", ":", "from", ".", "import", "Season", ",", "TargetDirectory", ",", "FITSFile", "if", "campaign", "is", "None", ":", "campaign", "=", "Season", "(", "ID", ")", "fits", "=", "os", ".", "path", ".", "join", "(", "TargetDirectory", "(", "ID", ",", "campaign", ")", ",", "FITSFile", "(", "ID", ",", "campaign", ")", ")", "newdir", "=", "os", ".", "path", ".", "join", "(", "KPLR_ROOT", ",", "\"data\"", ",", "\"everest\"", ",", "str", "(", "ID", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "newdir", ")", ":", "os", ".", "makedirs", "(", "newdir", ")", "if", "os", ".", "path", ".", "exists", "(", "fits", ")", ":", "shutil", ".", "copy", "(", "fits", ",", "newdir", ")", "if", "pipeline", ".", "lower", "(", ")", "==", "'everest2'", ":", "s", "=", "k2plr", ".", "EVEREST", "(", "ID", ",", "version", "=", "2", ",", "sci_campaign", "=", "campaign", ")", "time", "=", "s", ".", "time", "flux", "=", "s", ".", "flux", "elif", "pipeline", ".", "lower", "(", ")", "==", "'everest1'", ":", "s", "=", "k2plr", ".", "EVEREST", "(", "ID", ",", "version", "=", "1", ",", "sci_campaign", "=", "campaign", ")", "time", "=", "s", ".", "time", "flux", "=", "s", ".", "flux", "elif", "pipeline", ".", "lower", "(", ")", "==", "'k2sff'", ":", "s", "=", "k2plr", ".", "K2SFF", "(", "ID", ",", "sci_campaign", "=", "campaign", ")", "time", "=", "s", ".", "time", "flux", "=", "s", ".", "fcor", "# Normalize to the median flux", "s", "=", "k2plr", ".", "EVEREST", "(", "ID", ",", "version", "=", "2", ",", "sci_campaign", "=", "campaign", ")", "flux", "*=", "np", ".", "nanmedian", "(", "s", ".", "flux", ")", "elif", "pipeline", ".", "lower", "(", ")", "==", "'k2sc'", ":", "s", "=", "k2plr", ".", "K2SC", "(", "ID", ",", "sci_campaign", "=", "campaign", ")", "time", "=", "s", ".", "time", "flux", "=", "s", ".", "pdcflux", "elif", "pipeline", ".", "lower", "(", ")", "==", "'raw'", ":", "s", "=", "k2plr", ".", "EVEREST", "(", "ID", ",", "version", "=", "2", ",", "raw", "=", "True", ",", "sci_campaign", "=", "campaign", ")", "time", "=", "s", ".", "time", "flux", "=", "s", ".", "flux", "else", ":", "raise", "ValueError", "(", "'Invalid pipeline: `%s`.'", "%", "pipeline", ")", "return", "time", ",", "flux" ]
Returns the `time` and `flux` for a given EPIC `ID` and a given `pipeline` name.
[ "Returns", "the", "time", "and", "flux", "for", "a", "given", "EPIC", "ID", "and", "a", "given", "pipeline", "name", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/pipelines.py#L39-L88
rodluger/everest
everest/missions/k2/pipelines.py
plot
def plot(ID, pipeline='everest2', show=True, campaign=None): ''' Plots the de-trended flux for the given EPIC `ID` and for the specified `pipeline`. ''' # Get the data time, flux = get(ID, pipeline=pipeline, campaign=campaign) # Remove nans mask = np.where(np.isnan(flux))[0] time = np.delete(time, mask) flux = np.delete(flux, mask) # Plot it fig, ax = pl.subplots(1, figsize=(10, 4)) fig.subplots_adjust(bottom=0.15) ax.plot(time, flux, "k.", markersize=3, alpha=0.5) # Axis limits N = int(0.995 * len(flux)) hi, lo = flux[np.argsort(flux)][[N, -N]] pad = (hi - lo) * 0.1 ylim = (lo - pad, hi + pad) ax.set_ylim(ylim) # Show the CDPP from .k2 import CDPP ax.annotate('%.2f ppm' % CDPP(flux), xy=(0.98, 0.975), xycoords='axes fraction', ha='right', va='top', fontsize=12, color='r', zorder=99) # Appearance ax.margins(0, None) ax.set_xlabel("Time (BJD - 2454833)", fontsize=16) ax.set_ylabel("%s Flux" % pipeline.upper(), fontsize=16) fig.canvas.set_window_title("%s: EPIC %d" % (pipeline.upper(), ID)) if show: pl.show() pl.close() else: return fig, ax
python
def plot(ID, pipeline='everest2', show=True, campaign=None): ''' Plots the de-trended flux for the given EPIC `ID` and for the specified `pipeline`. ''' # Get the data time, flux = get(ID, pipeline=pipeline, campaign=campaign) # Remove nans mask = np.where(np.isnan(flux))[0] time = np.delete(time, mask) flux = np.delete(flux, mask) # Plot it fig, ax = pl.subplots(1, figsize=(10, 4)) fig.subplots_adjust(bottom=0.15) ax.plot(time, flux, "k.", markersize=3, alpha=0.5) # Axis limits N = int(0.995 * len(flux)) hi, lo = flux[np.argsort(flux)][[N, -N]] pad = (hi - lo) * 0.1 ylim = (lo - pad, hi + pad) ax.set_ylim(ylim) # Show the CDPP from .k2 import CDPP ax.annotate('%.2f ppm' % CDPP(flux), xy=(0.98, 0.975), xycoords='axes fraction', ha='right', va='top', fontsize=12, color='r', zorder=99) # Appearance ax.margins(0, None) ax.set_xlabel("Time (BJD - 2454833)", fontsize=16) ax.set_ylabel("%s Flux" % pipeline.upper(), fontsize=16) fig.canvas.set_window_title("%s: EPIC %d" % (pipeline.upper(), ID)) if show: pl.show() pl.close() else: return fig, ax
[ "def", "plot", "(", "ID", ",", "pipeline", "=", "'everest2'", ",", "show", "=", "True", ",", "campaign", "=", "None", ")", ":", "# Get the data", "time", ",", "flux", "=", "get", "(", "ID", ",", "pipeline", "=", "pipeline", ",", "campaign", "=", "campaign", ")", "# Remove nans", "mask", "=", "np", ".", "where", "(", "np", ".", "isnan", "(", "flux", ")", ")", "[", "0", "]", "time", "=", "np", ".", "delete", "(", "time", ",", "mask", ")", "flux", "=", "np", ".", "delete", "(", "flux", ",", "mask", ")", "# Plot it", "fig", ",", "ax", "=", "pl", ".", "subplots", "(", "1", ",", "figsize", "=", "(", "10", ",", "4", ")", ")", "fig", ".", "subplots_adjust", "(", "bottom", "=", "0.15", ")", "ax", ".", "plot", "(", "time", ",", "flux", ",", "\"k.\"", ",", "markersize", "=", "3", ",", "alpha", "=", "0.5", ")", "# Axis limits", "N", "=", "int", "(", "0.995", "*", "len", "(", "flux", ")", ")", "hi", ",", "lo", "=", "flux", "[", "np", ".", "argsort", "(", "flux", ")", "]", "[", "[", "N", ",", "-", "N", "]", "]", "pad", "=", "(", "hi", "-", "lo", ")", "*", "0.1", "ylim", "=", "(", "lo", "-", "pad", ",", "hi", "+", "pad", ")", "ax", ".", "set_ylim", "(", "ylim", ")", "# Show the CDPP", "from", ".", "k2", "import", "CDPP", "ax", ".", "annotate", "(", "'%.2f ppm'", "%", "CDPP", "(", "flux", ")", ",", "xy", "=", "(", "0.98", ",", "0.975", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'right'", ",", "va", "=", "'top'", ",", "fontsize", "=", "12", ",", "color", "=", "'r'", ",", "zorder", "=", "99", ")", "# Appearance", "ax", ".", "margins", "(", "0", ",", "None", ")", "ax", ".", "set_xlabel", "(", "\"Time (BJD - 2454833)\"", ",", "fontsize", "=", "16", ")", "ax", ".", "set_ylabel", "(", "\"%s Flux\"", "%", "pipeline", ".", "upper", "(", ")", ",", "fontsize", "=", "16", ")", "fig", ".", "canvas", ".", "set_window_title", "(", "\"%s: EPIC %d\"", "%", "(", "pipeline", ".", "upper", "(", ")", ",", "ID", ")", ")", "if", "show", ":", "pl", ".", "show", "(", ")", "pl", ".", "close", "(", ")", "else", ":", "return", "fig", ",", "ax" ]
Plots the de-trended flux for the given EPIC `ID` and for the specified `pipeline`.
[ "Plots", "the", "de", "-", "trended", "flux", "for", "the", "given", "EPIC", "ID", "and", "for", "the", "specified", "pipeline", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/pipelines.py#L91-L134
rodluger/everest
everest/missions/k2/pipelines.py
get_cdpp
def get_cdpp(campaign, pipeline='everest2'): ''' Computes the CDPP for a given `campaign` and a given `pipeline`. Stores the results in a file under "/missions/k2/tables/". ''' # Imports from .k2 import CDPP from .utils import GetK2Campaign # Check pipeline assert pipeline.lower() in Pipelines, 'Invalid pipeline: `%s`.' % pipeline # Create file if it doesn't exist file = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(campaign), pipeline)) if not os.path.exists(file): open(file, 'a').close() # Get all EPIC stars stars = GetK2Campaign(campaign, epics_only=True) nstars = len(stars) # Remove ones we've done with warnings.catch_warnings(): warnings.simplefilter("ignore") done = np.loadtxt(file, dtype=float) if len(done): done = [int(s) for s in done[:, 0]] stars = list(set(stars) - set(done)) n = len(done) + 1 # Open the output file with open(file, 'a', 1) as outfile: # Loop over all to get the CDPP for EPIC in stars: # Progress sys.stdout.write('\rRunning target %d/%d...' % (n, nstars)) sys.stdout.flush() n += 1 # Get the CDPP try: _, flux = get(EPIC, pipeline=pipeline, campaign=campaign) mask = np.where(np.isnan(flux))[0] flux = np.delete(flux, mask) cdpp = CDPP(flux) except (urllib.error.HTTPError, urllib.error.URLError, TypeError, ValueError, IndexError): print("{:>09d} {:>15.3f}".format(EPIC, 0), file=outfile) continue # Log to file print("{:>09d} {:>15.3f}".format(EPIC, cdpp), file=outfile)
python
def get_cdpp(campaign, pipeline='everest2'): ''' Computes the CDPP for a given `campaign` and a given `pipeline`. Stores the results in a file under "/missions/k2/tables/". ''' # Imports from .k2 import CDPP from .utils import GetK2Campaign # Check pipeline assert pipeline.lower() in Pipelines, 'Invalid pipeline: `%s`.' % pipeline # Create file if it doesn't exist file = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(campaign), pipeline)) if not os.path.exists(file): open(file, 'a').close() # Get all EPIC stars stars = GetK2Campaign(campaign, epics_only=True) nstars = len(stars) # Remove ones we've done with warnings.catch_warnings(): warnings.simplefilter("ignore") done = np.loadtxt(file, dtype=float) if len(done): done = [int(s) for s in done[:, 0]] stars = list(set(stars) - set(done)) n = len(done) + 1 # Open the output file with open(file, 'a', 1) as outfile: # Loop over all to get the CDPP for EPIC in stars: # Progress sys.stdout.write('\rRunning target %d/%d...' % (n, nstars)) sys.stdout.flush() n += 1 # Get the CDPP try: _, flux = get(EPIC, pipeline=pipeline, campaign=campaign) mask = np.where(np.isnan(flux))[0] flux = np.delete(flux, mask) cdpp = CDPP(flux) except (urllib.error.HTTPError, urllib.error.URLError, TypeError, ValueError, IndexError): print("{:>09d} {:>15.3f}".format(EPIC, 0), file=outfile) continue # Log to file print("{:>09d} {:>15.3f}".format(EPIC, cdpp), file=outfile)
[ "def", "get_cdpp", "(", "campaign", ",", "pipeline", "=", "'everest2'", ")", ":", "# Imports", "from", ".", "k2", "import", "CDPP", "from", ".", "utils", "import", "GetK2Campaign", "# Check pipeline", "assert", "pipeline", ".", "lower", "(", ")", "in", "Pipelines", ",", "'Invalid pipeline: `%s`.'", "%", "pipeline", "# Create file if it doesn't exist", "file", "=", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'c%02d_%s.cdpp'", "%", "(", "int", "(", "campaign", ")", ",", "pipeline", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "file", ")", ":", "open", "(", "file", ",", "'a'", ")", ".", "close", "(", ")", "# Get all EPIC stars", "stars", "=", "GetK2Campaign", "(", "campaign", ",", "epics_only", "=", "True", ")", "nstars", "=", "len", "(", "stars", ")", "# Remove ones we've done", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "done", "=", "np", ".", "loadtxt", "(", "file", ",", "dtype", "=", "float", ")", "if", "len", "(", "done", ")", ":", "done", "=", "[", "int", "(", "s", ")", "for", "s", "in", "done", "[", ":", ",", "0", "]", "]", "stars", "=", "list", "(", "set", "(", "stars", ")", "-", "set", "(", "done", ")", ")", "n", "=", "len", "(", "done", ")", "+", "1", "# Open the output file", "with", "open", "(", "file", ",", "'a'", ",", "1", ")", "as", "outfile", ":", "# Loop over all to get the CDPP", "for", "EPIC", "in", "stars", ":", "# Progress", "sys", ".", "stdout", ".", "write", "(", "'\\rRunning target %d/%d...'", "%", "(", "n", ",", "nstars", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "n", "+=", "1", "# Get the CDPP", "try", ":", "_", ",", "flux", "=", "get", "(", "EPIC", ",", "pipeline", "=", "pipeline", ",", "campaign", "=", "campaign", ")", "mask", "=", "np", ".", "where", "(", "np", ".", "isnan", "(", "flux", ")", ")", "[", "0", "]", "flux", "=", "np", ".", "delete", "(", "flux", ",", "mask", ")", "cdpp", "=", "CDPP", "(", "flux", ")", "except", "(", "urllib", ".", "error", ".", "HTTPError", ",", "urllib", ".", "error", ".", "URLError", ",", "TypeError", ",", "ValueError", ",", "IndexError", ")", ":", "print", "(", "\"{:>09d} {:>15.3f}\"", ".", "format", "(", "EPIC", ",", "0", ")", ",", "file", "=", "outfile", ")", "continue", "# Log to file", "print", "(", "\"{:>09d} {:>15.3f}\"", ".", "format", "(", "EPIC", ",", "cdpp", ")", ",", "file", "=", "outfile", ")" ]
Computes the CDPP for a given `campaign` and a given `pipeline`. Stores the results in a file under "/missions/k2/tables/".
[ "Computes", "the", "CDPP", "for", "a", "given", "campaign", "and", "a", "given", "pipeline", ".", "Stores", "the", "results", "in", "a", "file", "under", "/", "missions", "/", "k2", "/", "tables", "/", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/pipelines.py#L137-L193
rodluger/everest
everest/missions/k2/pipelines.py
get_outliers
def get_outliers(campaign, pipeline='everest2', sigma=5): ''' Computes the number of outliers for a given `campaign` and a given `pipeline`. Stores the results in a file under "/missions/k2/tables/". :param int sigma: The sigma level at which to clip outliers. Default 5 ''' # Imports from .utils import GetK2Campaign client = k2plr.API() # Check pipeline assert pipeline.lower() in Pipelines, 'Invalid pipeline: `%s`.' % pipeline # Create file if it doesn't exist file = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.out' % (int(campaign), pipeline)) if not os.path.exists(file): open(file, 'a').close() # Get all EPIC stars stars = GetK2Campaign(campaign, epics_only=True) nstars = len(stars) # Remove ones we've done with warnings.catch_warnings(): warnings.simplefilter("ignore") done = np.loadtxt(file, dtype=float) if len(done): done = [int(s) for s in done[:, 0]] stars = list(set(stars) - set(done)) n = len(done) + 1 # Open the output file with open(file, 'a', 1) as outfile: # Loop over all to get the CDPP for EPIC in stars: # Progress sys.stdout.write('\rRunning target %d/%d...' % (n, nstars)) sys.stdout.flush() n += 1 # Get the number of outliers try: time, flux = get(EPIC, pipeline=pipeline, campaign=campaign) # Get the raw K2 data tpf = os.path.join(KPLR_ROOT, "data", "k2", "target_pixel_files", "%09d" % EPIC, "ktwo%09d-c%02d_lpd-targ.fits.gz" % (EPIC, campaign)) if not os.path.exists(tpf): client.k2_star(EPIC).get_target_pixel_files(fetch=True) with pyfits.open(tpf) as f: k2_qual = np.array(f[1].data.field('QUALITY'), dtype=int) k2_time = np.array( f[1].data.field('TIME'), dtype='float64') mask = [] for b in [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17]: mask += list(np.where(k2_qual & 2 ** (b - 1))[0]) mask = np.array(sorted(list(set(mask)))) # Fill in missing cadences, if any tol = 0.005 if not ((len(time) == len(k2_time)) and (np.abs(time[0] - k2_time[0]) < tol) and (np.abs(time[-1] - k2_time[-1]) < tol)): ftmp = np.zeros_like(k2_time) * np.nan j = 0 for i, t in enumerate(k2_time): if np.abs(time[j] - t) < tol: ftmp[i] = flux[j] j += 1 if j == len(time) - 1: break flux = ftmp # Remove flagged cadences flux = np.delete(flux, mask) # Remove nans nanmask = np.where(np.isnan(flux))[0] flux = np.delete(flux, nanmask) # Iterative sigma clipping inds = np.array([], dtype=int) m = 1 while len(inds) < m: m = len(inds) f = SavGol(np.delete(flux, inds)) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) inds = np.append(inds, np.where( (f > med + sigma * MAD) | (f < med - sigma * MAD))[0]) nout = len(inds) ntot = len(flux) except (urllib.error.HTTPError, urllib.error.URLError, TypeError, ValueError, IndexError): print("{:>09d} {:>5d} {:>5d}".format( EPIC, -1, -1), file=outfile) continue # Log to file print("{:>09d} {:>5d} {:>5d}".format( EPIC, nout, ntot), file=outfile)
python
def get_outliers(campaign, pipeline='everest2', sigma=5): ''' Computes the number of outliers for a given `campaign` and a given `pipeline`. Stores the results in a file under "/missions/k2/tables/". :param int sigma: The sigma level at which to clip outliers. Default 5 ''' # Imports from .utils import GetK2Campaign client = k2plr.API() # Check pipeline assert pipeline.lower() in Pipelines, 'Invalid pipeline: `%s`.' % pipeline # Create file if it doesn't exist file = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.out' % (int(campaign), pipeline)) if not os.path.exists(file): open(file, 'a').close() # Get all EPIC stars stars = GetK2Campaign(campaign, epics_only=True) nstars = len(stars) # Remove ones we've done with warnings.catch_warnings(): warnings.simplefilter("ignore") done = np.loadtxt(file, dtype=float) if len(done): done = [int(s) for s in done[:, 0]] stars = list(set(stars) - set(done)) n = len(done) + 1 # Open the output file with open(file, 'a', 1) as outfile: # Loop over all to get the CDPP for EPIC in stars: # Progress sys.stdout.write('\rRunning target %d/%d...' % (n, nstars)) sys.stdout.flush() n += 1 # Get the number of outliers try: time, flux = get(EPIC, pipeline=pipeline, campaign=campaign) # Get the raw K2 data tpf = os.path.join(KPLR_ROOT, "data", "k2", "target_pixel_files", "%09d" % EPIC, "ktwo%09d-c%02d_lpd-targ.fits.gz" % (EPIC, campaign)) if not os.path.exists(tpf): client.k2_star(EPIC).get_target_pixel_files(fetch=True) with pyfits.open(tpf) as f: k2_qual = np.array(f[1].data.field('QUALITY'), dtype=int) k2_time = np.array( f[1].data.field('TIME'), dtype='float64') mask = [] for b in [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17]: mask += list(np.where(k2_qual & 2 ** (b - 1))[0]) mask = np.array(sorted(list(set(mask)))) # Fill in missing cadences, if any tol = 0.005 if not ((len(time) == len(k2_time)) and (np.abs(time[0] - k2_time[0]) < tol) and (np.abs(time[-1] - k2_time[-1]) < tol)): ftmp = np.zeros_like(k2_time) * np.nan j = 0 for i, t in enumerate(k2_time): if np.abs(time[j] - t) < tol: ftmp[i] = flux[j] j += 1 if j == len(time) - 1: break flux = ftmp # Remove flagged cadences flux = np.delete(flux, mask) # Remove nans nanmask = np.where(np.isnan(flux))[0] flux = np.delete(flux, nanmask) # Iterative sigma clipping inds = np.array([], dtype=int) m = 1 while len(inds) < m: m = len(inds) f = SavGol(np.delete(flux, inds)) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) inds = np.append(inds, np.where( (f > med + sigma * MAD) | (f < med - sigma * MAD))[0]) nout = len(inds) ntot = len(flux) except (urllib.error.HTTPError, urllib.error.URLError, TypeError, ValueError, IndexError): print("{:>09d} {:>5d} {:>5d}".format( EPIC, -1, -1), file=outfile) continue # Log to file print("{:>09d} {:>5d} {:>5d}".format( EPIC, nout, ntot), file=outfile)
[ "def", "get_outliers", "(", "campaign", ",", "pipeline", "=", "'everest2'", ",", "sigma", "=", "5", ")", ":", "# Imports", "from", ".", "utils", "import", "GetK2Campaign", "client", "=", "k2plr", ".", "API", "(", ")", "# Check pipeline", "assert", "pipeline", ".", "lower", "(", ")", "in", "Pipelines", ",", "'Invalid pipeline: `%s`.'", "%", "pipeline", "# Create file if it doesn't exist", "file", "=", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'c%02d_%s.out'", "%", "(", "int", "(", "campaign", ")", ",", "pipeline", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "file", ")", ":", "open", "(", "file", ",", "'a'", ")", ".", "close", "(", ")", "# Get all EPIC stars", "stars", "=", "GetK2Campaign", "(", "campaign", ",", "epics_only", "=", "True", ")", "nstars", "=", "len", "(", "stars", ")", "# Remove ones we've done", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "done", "=", "np", ".", "loadtxt", "(", "file", ",", "dtype", "=", "float", ")", "if", "len", "(", "done", ")", ":", "done", "=", "[", "int", "(", "s", ")", "for", "s", "in", "done", "[", ":", ",", "0", "]", "]", "stars", "=", "list", "(", "set", "(", "stars", ")", "-", "set", "(", "done", ")", ")", "n", "=", "len", "(", "done", ")", "+", "1", "# Open the output file", "with", "open", "(", "file", ",", "'a'", ",", "1", ")", "as", "outfile", ":", "# Loop over all to get the CDPP", "for", "EPIC", "in", "stars", ":", "# Progress", "sys", ".", "stdout", ".", "write", "(", "'\\rRunning target %d/%d...'", "%", "(", "n", ",", "nstars", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "n", "+=", "1", "# Get the number of outliers", "try", ":", "time", ",", "flux", "=", "get", "(", "EPIC", ",", "pipeline", "=", "pipeline", ",", "campaign", "=", "campaign", ")", "# Get the raw K2 data", "tpf", "=", "os", ".", "path", ".", "join", "(", "KPLR_ROOT", ",", "\"data\"", ",", "\"k2\"", ",", "\"target_pixel_files\"", ",", "\"%09d\"", "%", "EPIC", ",", "\"ktwo%09d-c%02d_lpd-targ.fits.gz\"", "%", "(", "EPIC", ",", "campaign", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "tpf", ")", ":", "client", ".", "k2_star", "(", "EPIC", ")", ".", "get_target_pixel_files", "(", "fetch", "=", "True", ")", "with", "pyfits", ".", "open", "(", "tpf", ")", "as", "f", ":", "k2_qual", "=", "np", ".", "array", "(", "f", "[", "1", "]", ".", "data", ".", "field", "(", "'QUALITY'", ")", ",", "dtype", "=", "int", ")", "k2_time", "=", "np", ".", "array", "(", "f", "[", "1", "]", ".", "data", ".", "field", "(", "'TIME'", ")", ",", "dtype", "=", "'float64'", ")", "mask", "=", "[", "]", "for", "b", "in", "[", "1", ",", "2", ",", "3", ",", "4", ",", "5", ",", "6", ",", "7", ",", "8", ",", "9", ",", "11", ",", "12", ",", "13", ",", "14", ",", "16", ",", "17", "]", ":", "mask", "+=", "list", "(", "np", ".", "where", "(", "k2_qual", "&", "2", "**", "(", "b", "-", "1", ")", ")", "[", "0", "]", ")", "mask", "=", "np", ".", "array", "(", "sorted", "(", "list", "(", "set", "(", "mask", ")", ")", ")", ")", "# Fill in missing cadences, if any", "tol", "=", "0.005", "if", "not", "(", "(", "len", "(", "time", ")", "==", "len", "(", "k2_time", ")", ")", "and", "(", "np", ".", "abs", "(", "time", "[", "0", "]", "-", "k2_time", "[", "0", "]", ")", "<", "tol", ")", "and", "(", "np", ".", "abs", "(", "time", "[", "-", "1", "]", "-", "k2_time", "[", "-", "1", "]", ")", "<", "tol", ")", ")", ":", "ftmp", "=", "np", ".", "zeros_like", "(", "k2_time", ")", "*", "np", ".", "nan", "j", "=", "0", "for", "i", ",", "t", "in", "enumerate", "(", "k2_time", ")", ":", "if", "np", ".", "abs", "(", "time", "[", "j", "]", "-", "t", ")", "<", "tol", ":", "ftmp", "[", "i", "]", "=", "flux", "[", "j", "]", "j", "+=", "1", "if", "j", "==", "len", "(", "time", ")", "-", "1", ":", "break", "flux", "=", "ftmp", "# Remove flagged cadences", "flux", "=", "np", ".", "delete", "(", "flux", ",", "mask", ")", "# Remove nans", "nanmask", "=", "np", ".", "where", "(", "np", ".", "isnan", "(", "flux", ")", ")", "[", "0", "]", "flux", "=", "np", ".", "delete", "(", "flux", ",", "nanmask", ")", "# Iterative sigma clipping", "inds", "=", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "int", ")", "m", "=", "1", "while", "len", "(", "inds", ")", "<", "m", ":", "m", "=", "len", "(", "inds", ")", "f", "=", "SavGol", "(", "np", ".", "delete", "(", "flux", ",", "inds", ")", ")", "med", "=", "np", ".", "nanmedian", "(", "f", ")", "MAD", "=", "1.4826", "*", "np", ".", "nanmedian", "(", "np", ".", "abs", "(", "f", "-", "med", ")", ")", "inds", "=", "np", ".", "append", "(", "inds", ",", "np", ".", "where", "(", "(", "f", ">", "med", "+", "sigma", "*", "MAD", ")", "|", "(", "f", "<", "med", "-", "sigma", "*", "MAD", ")", ")", "[", "0", "]", ")", "nout", "=", "len", "(", "inds", ")", "ntot", "=", "len", "(", "flux", ")", "except", "(", "urllib", ".", "error", ".", "HTTPError", ",", "urllib", ".", "error", ".", "URLError", ",", "TypeError", ",", "ValueError", ",", "IndexError", ")", ":", "print", "(", "\"{:>09d} {:>5d} {:>5d}\"", ".", "format", "(", "EPIC", ",", "-", "1", ",", "-", "1", ")", ",", "file", "=", "outfile", ")", "continue", "# Log to file", "print", "(", "\"{:>09d} {:>5d} {:>5d}\"", ".", "format", "(", "EPIC", ",", "nout", ",", "ntot", ")", ",", "file", "=", "outfile", ")" ]
Computes the number of outliers for a given `campaign` and a given `pipeline`. Stores the results in a file under "/missions/k2/tables/". :param int sigma: The sigma level at which to clip outliers. Default 5
[ "Computes", "the", "number", "of", "outliers", "for", "a", "given", "campaign", "and", "a", "given", "pipeline", ".", "Stores", "the", "results", "in", "a", "file", "under", "/", "missions", "/", "k2", "/", "tables", "/", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/pipelines.py#L196-L308