signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
@property<EOL><INDENT>def source_filename_rel_python_root(self) -> str:<DEDENT>
return relpath(self.source_filename,<EOL>start=self.python_package_root_dir)<EOL>
Returns the name of the source filename, relative to the Python package root. Used to calculate the name of Python modules.
f14657:c1:m5
@property<EOL><INDENT>def rst_dir(self) -> str:<DEDENT>
return dirname(self.target_rst_filename)<EOL>
Returns the directory of the target RST file.
f14657:c1:m6
@property<EOL><INDENT>def source_filename_rel_rst_file(self) -> str:<DEDENT>
return relpath(self.source_filename, start=self.rst_dir)<EOL>
Returns the source filename as seen from the RST filename that we will generate. Used for ``.. include::`` commands.
f14657:c1:m7
@property<EOL><INDENT>def rst_filename_rel_project_root(self) -> str:<DEDENT>
return relpath(self.target_rst_filename, start=self.project_root_dir)<EOL>
Returns the filename of the target RST file, relative to the project root directory. Used for labelling the RST file itself.
f14657:c1:m8
def rst_filename_rel_autodoc_index(self, index_filename: str) -> str:
index_dir = dirname(abspath(expanduser(index_filename)))<EOL>return relpath(self.target_rst_filename, start=index_dir)<EOL>
Returns the filename of the target RST file, relative to a specified index file. Used to make the index refer to the RST.
f14657:c1:m9
@property<EOL><INDENT>def python_module_name(self) -> str:<DEDENT>
if not self.is_python:<EOL><INDENT>return "<STR_LIT>"<EOL><DEDENT>filepath = self.source_filename_rel_python_root<EOL>dirs_and_base = splitext(filepath)[<NUM_LIT:0>]<EOL>dir_and_file_parts = dirs_and_base.split(sep)<EOL>return "<STR_LIT:.>".join(dir_and_file_parts)<EOL>
Returns the name of the Python module that this instance refers to, in dotted Python module notation, or a blank string if it doesn't.
f14657:c1:m10
@property<EOL><INDENT>def pygments_language(self) -> str:<DEDENT>
extension = splitext(self.source_filename)[<NUM_LIT:1>]<EOL>if extension in self.pygments_language_override:<EOL><INDENT>return self.pygments_language_override[extension]<EOL><DEDENT>try:<EOL><INDENT>lexer = get_lexer_for_filename(self.source_filename) <EOL>return lexer.name<EOL><DEDENT>except ClassNotFound:<EOL><INDENT>log.warning("<STR_LIT>",<EOL>self.source_extension)<EOL>return CODE_TYPE_NONE<EOL><DEDENT>
Returns the code type annotation for Pygments; e.g. ``python`` for Python, ``cpp`` for C++, etc.
f14657:c1:m11
def rst_content(self,<EOL>prefix: str = "<STR_LIT>",<EOL>suffix: str = "<STR_LIT>",<EOL>heading_underline_char: str = "<STR_LIT:=>",<EOL>method: AutodocMethod = None) -> str:
spacer = "<STR_LIT:U+0020>"<EOL>if method is None:<EOL><INDENT>method = self.method<EOL><DEDENT>is_python = self.is_python<EOL>if method == AutodocMethod.BEST:<EOL><INDENT>if is_python:<EOL><INDENT>method = AutodocMethod.AUTOMODULE<EOL><DEDENT>else:<EOL><INDENT>method = AutodocMethod.CONTENTS<EOL><DEDENT><DEDENT>elif method == AutodocMethod.AUTOMODULE:<EOL><INDENT>if not is_python:<EOL><INDENT>method = AutodocMethod.CONTENTS<EOL><DEDENT><DEDENT>if method == AutodocMethod.AUTOMODULE:<EOL><INDENT>if self.source_rst_title_style_python:<EOL><INDENT>title = self.python_module_name<EOL><DEDENT>else:<EOL><INDENT>title = self.source_filename_rel_project_root<EOL><DEDENT>instruction = "<STR_LIT>".format(<EOL>modulename=self.python_module_name<EOL>)<EOL><DEDENT>elif method == AutodocMethod.CONTENTS:<EOL><INDENT>title = self.source_filename_rel_project_root<EOL>instruction = (<EOL>"<STR_LIT>"<EOL>"<STR_LIT>".format(<EOL>filename=self.source_filename_rel_rst_file,<EOL>spacer=spacer,<EOL>language=self.pygments_language<EOL>)<EOL>)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>content = """<STR_LIT>""".format(<EOL>filename=self.rst_filename_rel_project_root,<EOL>AUTOGENERATED_COMMENT=AUTOGENERATED_COMMENT,<EOL>prefix=prefix,<EOL>underlined_title=rst_underline(<EOL>title, underline_char=heading_underline_char),<EOL>instruction=instruction,<EOL>suffix=suffix,<EOL>).strip() + "<STR_LIT:\n>"<EOL>return content<EOL>
Returns the text contents of an RST file that will automatically document our source file. Args: prefix: prefix, e.g. RST copyright comment suffix: suffix, after the part we're creating heading_underline_char: RST character to use to underline the heading method: optional method to override ``self.method``; see constructor Returns: the RST contents
f14657:c1:m12
def write_rst(self,<EOL>prefix: str = "<STR_LIT>",<EOL>suffix: str = "<STR_LIT>",<EOL>heading_underline_char: str = "<STR_LIT:=>",<EOL>method: AutodocMethod = None,<EOL>overwrite: bool = False,<EOL>mock: bool = False) -> None:
content = self.rst_content(<EOL>prefix=prefix,<EOL>suffix=suffix,<EOL>heading_underline_char=heading_underline_char,<EOL>method=method<EOL>)<EOL>write_if_allowed(self.target_rst_filename, content,<EOL>overwrite=overwrite, mock=mock)<EOL>
Writes the RST file to our destination RST filename, making any necessary directories. Args: prefix: as for :func:`rst_content` suffix: as for :func:`rst_content` heading_underline_char: as for :func:`rst_content` method: as for :func:`rst_content` overwrite: overwrite the file if it exists already? mock: pretend to write, but don't
f14657:c1:m13
def __init__(self,<EOL>index_filename: str,<EOL>project_root_dir: str,<EOL>autodoc_rst_root_dir: str,<EOL>highest_code_dir: str,<EOL>python_package_root_dir: str = None,<EOL>source_filenames_or_globs: Union[str, Iterable[str]] = None,<EOL>index_heading_underline_char: str = "<STR_LIT:->",<EOL>source_rst_heading_underline_char: str = "<STR_LIT>",<EOL>title: str = DEFAULT_INDEX_TITLE,<EOL>introductory_rst: str = "<STR_LIT>",<EOL>recursive: bool = True,<EOL>skip_globs: List[str] = None,<EOL>toctree_maxdepth: int = <NUM_LIT:1>,<EOL>method: AutodocMethod = AutodocMethod.BEST,<EOL>rst_prefix: str = "<STR_LIT>",<EOL>rst_suffix: str = "<STR_LIT>",<EOL>source_rst_title_style_python: bool = True,<EOL>pygments_language_override: Dict[str, str] = None) -> None:
assert index_filename<EOL>assert project_root_dir<EOL>assert autodoc_rst_root_dir<EOL>assert isinstance(toctree_maxdepth, int)<EOL>assert isinstance(method, AutodocMethod)<EOL>self.index_filename = abspath(expanduser(index_filename))<EOL>self.title = title<EOL>self.introductory_rst = introductory_rst<EOL>self.project_root_dir = abspath(expanduser(project_root_dir))<EOL>self.autodoc_rst_root_dir = abspath(expanduser(autodoc_rst_root_dir))<EOL>self.highest_code_dir = abspath(expanduser(highest_code_dir))<EOL>self.python_package_root_dir = (<EOL>abspath(expanduser(python_package_root_dir))<EOL>if python_package_root_dir else self.project_root_dir<EOL>)<EOL>self.index_heading_underline_char = index_heading_underline_char<EOL>self.source_rst_heading_underline_char = source_rst_heading_underline_char <EOL>self.recursive = recursive<EOL>self.skip_globs = skip_globs if skip_globs is not None else DEFAULT_SKIP_GLOBS <EOL>self.toctree_maxdepth = toctree_maxdepth<EOL>self.method = method<EOL>self.rst_prefix = rst_prefix<EOL>self.rst_suffix = rst_suffix<EOL>self.source_rst_title_style_python = source_rst_title_style_python<EOL>self.pygments_language_override = pygments_language_override or {} <EOL>assert isdir(self.project_root_dir), (<EOL>"<STR_LIT>".format(<EOL>self.project_root_dir))<EOL>assert relative_filename_within_dir(<EOL>filename=self.index_filename,<EOL>directory=self.project_root_dir<EOL>), (<EOL>"<STR_LIT>".format(<EOL>self.index_filename, self.project_root_dir)<EOL>)<EOL>assert relative_filename_within_dir(<EOL>filename=self.highest_code_dir,<EOL>directory=self.project_root_dir<EOL>), (<EOL>"<STR_LIT>"<EOL>"<STR_LIT>".format(self.highest_code_dir, self.project_root_dir)<EOL>)<EOL>assert relative_filename_within_dir(<EOL>filename=self.autodoc_rst_root_dir,<EOL>directory=self.project_root_dir<EOL>), (<EOL>"<STR_LIT>"<EOL>"<STR_LIT>".format(<EOL>self.autodoc_rst_root_dir, self.project_root_dir)<EOL>)<EOL>assert isinstance(method, AutodocMethod)<EOL>assert isinstance(recursive, bool)<EOL>self.files_to_index = [] <EOL>if source_filenames_or_globs:<EOL><INDENT>self.add_source_files(source_filenames_or_globs)<EOL><DEDENT>
Args: index_filename: filename of the index ``.RST`` (ReStructured Text) file to create project_root_dir: top-level directory for the whole project autodoc_rst_root_dir: directory within which all automatically generated ``.RST`` files (each to document a specific source file) will be placed. A directory hierarchy within this directory will be created, reflecting the structure of the code relative to ``highest_code_dir`` (q.v.). highest_code_dir: the "lowest" directory such that all code is found within it; the directory structure within ``autodoc_rst_root_dir`` is to ``.RST`` files what the directory structure is of the source files, relative to ``highest_code_dir``. python_package_root_dir: if your Python modules live in a directory other than ``project_root_dir``, specify it here source_filenames_or_globs: optional string, or list of strings, each describing a file or glob-style file specification; these are the source filenames to create automatic RST` for. If you don't specify them here, you can use :func:`add_source_files`. To add sub-indexes, use :func:`add_index` and :func:`add_indexes`. index_heading_underline_char: the character used to underline the title in the index file source_rst_heading_underline_char: the character used to underline the heading in each of the source files title: title for the index introductory_rst: extra RST for the index, which goes between the title and the table of contents recursive: use :func:`glob.glob` in recursive mode? skip_globs: list of file names or file specifications to skip; e.g. ``['__init__.py']`` toctree_maxdepth: ``maxdepth`` parameter for the ``toctree`` command generated in the index file method: see :class:`FileToAutodocument` rst_prefix: optional RST content (e.g. copyright comment) to put early on in each of the RST files rst_suffix: optional RST content to put late on in each of the RST files source_rst_title_style_python: make the individual RST files use titles in the style of Python modules, ``x.y.z``, rather than path style (``x/y/z``); path style will be used for non-Python files in any case. pygments_language_override: if specified, a dictionary mapping file extensions to Pygments languages (for example: a ``.pro`` file will be autodetected as Prolog, but you might want to map that to ``none`` for Qt project files).
f14657:c2:m0
def add_source_files(<EOL>self,<EOL>source_filenames_or_globs: Union[str, List[str]],<EOL>method: AutodocMethod = None,<EOL>recursive: bool = None,<EOL>source_rst_title_style_python: bool = None,<EOL>pygments_language_override: Dict[str, str] = None) -> None:
if not source_filenames_or_globs:<EOL><INDENT>return<EOL><DEDENT>if method is None:<EOL><INDENT>method = self.method<EOL><DEDENT>if recursive is None:<EOL><INDENT>recursive = self.recursive<EOL><DEDENT>if source_rst_title_style_python is None:<EOL><INDENT>source_rst_title_style_python = self.source_rst_title_style_python<EOL><DEDENT>if pygments_language_override is None:<EOL><INDENT>pygments_language_override = self.pygments_language_override<EOL><DEDENT>final_filenames = self.get_sorted_source_files(<EOL>source_filenames_or_globs,<EOL>recursive=recursive<EOL>)<EOL>for source_filename in final_filenames:<EOL><INDENT>self.files_to_index.append(FileToAutodocument(<EOL>source_filename=source_filename,<EOL>project_root_dir=self.project_root_dir,<EOL>python_package_root_dir=self.python_package_root_dir,<EOL>target_rst_filename=self.specific_file_rst_filename(<EOL>source_filename<EOL>),<EOL>method=method,<EOL>source_rst_title_style_python=source_rst_title_style_python,<EOL>pygments_language_override=pygments_language_override,<EOL>))<EOL><DEDENT>
Adds source files to the index. Args: source_filenames_or_globs: string containing a filename or a glob, describing the file(s) to be added, or a list of such strings method: optional method to override ``self.method`` recursive: use :func:`glob.glob` in recursive mode? (If ``None``, the default, uses the version from the constructor.) source_rst_title_style_python: optional to override ``self.source_rst_title_style_python`` pygments_language_override: optional to override ``self.pygments_language_override``
f14657:c2:m2
def get_sorted_source_files(<EOL>self,<EOL>source_filenames_or_globs: Union[str, List[str]],<EOL>recursive: bool = True) -> List[str]:
if isinstance(source_filenames_or_globs, str):<EOL><INDENT>source_filenames_or_globs = [source_filenames_or_globs]<EOL><DEDENT>final_filenames = [] <EOL>for sfg in source_filenames_or_globs:<EOL><INDENT>sfg_expanded = expanduser(sfg)<EOL>log.debug("<STR_LIT>", sfg_expanded)<EOL>for filename in glob.glob(sfg_expanded, recursive=recursive):<EOL><INDENT>log.debug("<STR_LIT>", filename)<EOL>if self.should_exclude(filename):<EOL><INDENT>log.info("<STR_LIT>", filename)<EOL>continue<EOL><DEDENT>final_filenames.append(filename)<EOL><DEDENT><DEDENT>final_filenames.sort()<EOL>return final_filenames<EOL>
Returns a sorted list of filenames to process, from a filename, a glob string, or a list of filenames/globs. Args: source_filenames_or_globs: filename/glob, or list of them recursive: use :func:`glob.glob` in recursive mode? Returns: sorted list of files to process
f14657:c2:m3
@staticmethod<EOL><INDENT>def filename_matches_glob(filename: str, globtext: str) -> bool:<DEDENT>
<EOL>if fnmatch(filename, globtext):<EOL><INDENT>log.debug("<STR_LIT>", filename, globtext)<EOL>return True<EOL><DEDENT>bname = basename(filename)<EOL>if fnmatch(bname, globtext):<EOL><INDENT>log.debug("<STR_LIT>", bname, globtext)<EOL>return True<EOL><DEDENT>return False<EOL>
The ``glob.glob`` function doesn't do exclusion very well. We don't want to have to specify root directories for exclusion patterns. We don't want to have to trawl a massive set of files to find exclusion files. So let's implement a glob match. Args: filename: filename globtext: glob Returns: does the filename match the glob? See also: - https://stackoverflow.com/questions/20638040/glob-exclude-pattern
f14657:c2:m4
def should_exclude(self, filename) -> bool:
for skip_glob in self.skip_globs:<EOL><INDENT>if self.filename_matches_glob(filename, skip_glob):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL>
Should we exclude this file from consideration?
f14657:c2:m5
def add_index(self, index: "<STR_LIT>") -> None:
self.files_to_index.append(index)<EOL>
Add a sub-index file to this index. Args: index: index file to add, as an instance of :class:`AutodocIndex`
f14657:c2:m6
def add_indexes(self, indexes: List["<STR_LIT>"]) -> None:
for index in indexes:<EOL><INDENT>self.add_index(index)<EOL><DEDENT>
Adds multiple sub-indexes to this index. Args: indexes: list of sub-indexes
f14657:c2:m7
def specific_file_rst_filename(self, source_filename: str) -> str:
highest_code_to_target = relative_filename_within_dir(<EOL>source_filename, self.highest_code_dir)<EOL>bname = basename(source_filename)<EOL>result = join(self.autodoc_rst_root_dir,<EOL>dirname(highest_code_to_target),<EOL>bname + EXT_RST)<EOL>log.debug("<STR_LIT>", source_filename, result)<EOL>return result<EOL>
Gets the RST filename corresponding to a source filename. See the help for the constructor for more details. Args: source_filename: source filename within current project Returns: RST filename Note in particular: the way we structure the directories means that we won't get clashes between files with idential names in two different directories. However, we must also incorporate the original source filename, in particular for C++ where ``thing.h`` and ``thing.cpp`` must not generate the same RST filename. So we just add ``.rst``.
f14657:c2:m8
def write_index_and_rst_files(self, overwrite: bool = False,<EOL>mock: bool = False) -> None:
for f in self.files_to_index:<EOL><INDENT>if isinstance(f, FileToAutodocument):<EOL><INDENT>f.write_rst(<EOL>prefix=self.rst_prefix,<EOL>suffix=self.rst_suffix,<EOL>heading_underline_char=self.source_rst_heading_underline_char, <EOL>overwrite=overwrite,<EOL>mock=mock,<EOL>)<EOL><DEDENT>elif isinstance(f, AutodocIndex):<EOL><INDENT>f.write_index_and_rst_files(overwrite=overwrite, mock=mock)<EOL><DEDENT>else:<EOL><INDENT>fail("<STR_LIT>".format(f))<EOL><DEDENT><DEDENT>self.write_index(overwrite=overwrite, mock=mock)<EOL>
Writes both the individual RST files and the index. Args: overwrite: allow existing files to be overwritten? mock: pretend to write, but don't
f14657:c2:m9
@property<EOL><INDENT>def index_filename_rel_project_root(self) -> str:<DEDENT>
return relpath(self.index_filename, start=self.project_root_dir)<EOL>
Returns the name of the index filename, relative to the project root. Used for labelling the index file.
f14657:c2:m10
def index_filename_rel_other_index(self, other: str) -> str:
return relpath(self.index_filename, start=dirname(other))<EOL>
Returns the filename of this index, relative to the director of another index. (For inserting a reference to this index into ``other``.) Args: other: the other index Returns: relative filename of our index
f14657:c2:m11
def index_content(self) -> str:
<EOL>index_filename = self.index_filename<EOL>spacer = "<STR_LIT:U+0020>"<EOL>toctree_lines = [<EOL>"<STR_LIT>",<EOL>spacer + "<STR_LIT>".format(self.toctree_maxdepth),<EOL>"<STR_LIT>"<EOL>]<EOL>for f in self.files_to_index:<EOL><INDENT>if isinstance(f, FileToAutodocument):<EOL><INDENT>rst_filename = spacer + f.rst_filename_rel_autodoc_index(<EOL>index_filename)<EOL><DEDENT>elif isinstance(f, AutodocIndex):<EOL><INDENT>rst_filename = (<EOL>spacer + f.index_filename_rel_other_index(index_filename)<EOL>)<EOL><DEDENT>else:<EOL><INDENT>fail("<STR_LIT>".format(f))<EOL>rst_filename = "<STR_LIT>" <EOL><DEDENT>toctree_lines.append(rst_filename)<EOL><DEDENT>toctree = "<STR_LIT:\n>".join(toctree_lines)<EOL>content = """<STR_LIT>""".format(<EOL>filename=self.index_filename_rel_project_root,<EOL>AUTOGENERATED_COMMENT=AUTOGENERATED_COMMENT,<EOL>prefix=self.rst_prefix,<EOL>underlined_title=rst_underline(<EOL>self.title, underline_char=self.index_heading_underline_char),<EOL>introductory_rst=self.introductory_rst,<EOL>toctree=toctree,<EOL>suffix=self.rst_suffix,<EOL>).strip() + "<STR_LIT:\n>"<EOL>return content<EOL>
Returns the contents of the index RST file.
f14657:c2:m12
def write_index(self, overwrite: bool = False, mock: bool = False) -> None:
write_if_allowed(self.index_filename, self.index_content(),<EOL>overwrite=overwrite, mock=mock)<EOL>
Writes the index file, if permitted. Args: overwrite: allow existing files to be overwritten? mock: pretend to write, but don't
f14657:c2:m13
def get_values_and_permissible(values: Iterable[Tuple[Any, str]],<EOL>add_none: bool = False,<EOL>none_description: str = "<STR_LIT>")-> Tuple[List[Tuple[Any, str]], List[Any]]:
permissible_values = list(x[<NUM_LIT:0>] for x in values)<EOL>if add_none:<EOL><INDENT>none_tuple = (SERIALIZED_NONE, none_description)<EOL>values = [none_tuple] + list(values)<EOL><DEDENT>return values, permissible_values<EOL>
Used when building Colander nodes. Args: values: an iterable of tuples like ``(value, description)`` used in HTML forms add_none: add a tuple ``(None, none_description)`` at the start of ``values`` in the result? none_description: the description used for ``None`` if ``add_none`` is set Returns: a tuple ``(values, permissible_values)``, where - ``values`` is what was passed in (perhaps with the addition of the "None" tuple at the start) - ``permissible_values`` is a list of all the ``value`` elements of the original ``values``
f14658:m0
def serialize(self,<EOL>node: SchemaNode,<EOL>appstruct: Union[PotentialDatetimeType,<EOL>ColanderNullType])-> Union[str, ColanderNullType]:
if not appstruct:<EOL><INDENT>return colander.null<EOL><DEDENT>try:<EOL><INDENT>appstruct = coerce_to_pendulum(appstruct,<EOL>assume_local=self.use_local_tz)<EOL><DEDENT>except (ValueError, ParserError) as e:<EOL><INDENT>raise Invalid(<EOL>node,<EOL>"<STR_LIT>"<EOL>"<STR_LIT>".format(appstruct, e))<EOL><DEDENT>return appstruct.isoformat()<EOL>
Serializes Python object to string representation.
f14658:c0:m1
def deserialize(self,<EOL>node: SchemaNode,<EOL>cstruct: Union[str, ColanderNullType])-> Optional[Pendulum]:
if not cstruct:<EOL><INDENT>return colander.null<EOL><DEDENT>try:<EOL><INDENT>result = coerce_to_pendulum(cstruct,<EOL>assume_local=self.use_local_tz)<EOL><DEDENT>except (ValueError, ParserError) as e:<EOL><INDENT>raise Invalid(node, "<STR_LIT>"<EOL>"<STR_LIT>".format(cstruct, e))<EOL><DEDENT>return result<EOL>
Deserializes string representation to Python object.
f14658:c0:m2
def serialize(self, node: SchemaNode,<EOL>value: Any) -> Union[str, ColanderNullType]:
if value is None:<EOL><INDENT>retval = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>retval = self.type_.serialize(node, value)<EOL><DEDENT>return retval<EOL>
Serializes Python object to string representation.
f14658:c1:m1
def deserialize(self, node: SchemaNode,<EOL>value: Union[str, ColanderNullType]) -> Any:
if value is None or value == '<STR_LIT>':<EOL><INDENT>retval = None<EOL><DEDENT>else:<EOL><INDENT>retval = self.type_.deserialize(node, value)<EOL><DEDENT>return retval<EOL>
Deserializes string representation to Python object.
f14658:c1:m2
def __init__(self, *args, length: int = <NUM_LIT:4>, allowed_chars: str = None,<EOL>**kwargs) -> None:
self.allowed_chars = allowed_chars or "<STR_LIT>"<EOL>self.length = length<EOL>super().__init__(*args, **kwargs)<EOL>
Args: length: code length required from the user allowed_chars: string containing the permitted characters (by default, digits)
f14658:c13:m0
def generic_service_main(cls: Type[WindowsService], name: str) -> None:
argc = len(sys.argv)<EOL>if argc == <NUM_LIT:1>:<EOL><INDENT>try:<EOL><INDENT>print("<STR_LIT>")<EOL>evtsrc_dll = os.path.abspath(servicemanager.__file__)<EOL>servicemanager.PrepareToHostSingle(cls) <EOL>servicemanager.Initialize(name, evtsrc_dll)<EOL>servicemanager.StartServiceCtrlDispatcher()<EOL><DEDENT>except win32service.error as details:<EOL><INDENT>print("<STR_LIT>".format(details))<EOL>errnum = details.winerror<EOL>if errnum == winerror.ERROR_FAILED_SERVICE_CONTROLLER_CONNECT:<EOL><INDENT>win32serviceutil.usage()<EOL><DEDENT><DEDENT><DEDENT>elif argc == <NUM_LIT:2> and sys.argv[<NUM_LIT:1>] == '<STR_LIT>':<EOL><INDENT>s = cls()<EOL>s.run_debug()<EOL><DEDENT>else:<EOL><INDENT>win32serviceutil.HandleCommandLine(cls)<EOL><DEDENT>
Call this from your command-line entry point to manage a service. - Via inherited functions, enables you to ``install``, ``update``, ``remove``, ``start``, ``stop``, and ``restart`` the service. - Via our additional code, allows you to run the service function directly from the command line in debug mode, using the ``debug`` command. - Run with an invalid command like ``help`` to see help (!). See https://mail.python.org/pipermail/python-win32/2008-April/007299.html Args: cls: class deriving from :class:`WindowsService` name: name of this service
f14659:m0
def __init__(self,<EOL>name: str,<EOL>procargs: List[str],<EOL>logfile_out: str = '<STR_LIT>',<EOL>logfile_err: str = '<STR_LIT>') -> None:
self.name = name<EOL>self.procargs = procargs<EOL>self.logfile_out = logfile_out<EOL>self.logfile_err = logfile_err<EOL>
Args: name: cosmetic name of the process procargs: command-line arguments logfile_out: filename to write ``stdout`` to logfile_err: filename to write ``stderr`` to
f14659:c0:m0
def __init__(self,<EOL>details: ProcessDetails,<EOL>procnum: int,<EOL>nprocs: int,<EOL>kill_timeout_sec: float = <NUM_LIT:5>,<EOL>debugging: bool = False):
self.details = details<EOL>self.procnum = procnum<EOL>self.nprocs = nprocs<EOL>self.kill_timeout_sec = kill_timeout_sec<EOL>self.process = None <EOL>self.running = False<EOL>self.stdout = None <EOL>self.stderr = None <EOL>self.debugging = debugging<EOL>
Args: details: description of the process as a :class:`ProcessDetails` object procnum: for cosmetic purposes only: the process sequence number of this process nprocs: for cosmetic purposes only: the total number of processes (including others not managed by this instance) kill_timeout_sec: how long (in seconds) will we wait for the process to end peacefully, before we try to kill it? debugging: be verbose?
f14659:c1:m0
@property<EOL><INDENT>def fullname(self) -> str:<DEDENT>
fullname = "<STR_LIT>".format(self.procnum, self.nprocs,<EOL>self.details.name)<EOL>if self.running:<EOL><INDENT>fullname += "<STR_LIT>".format(self.process.pid)<EOL><DEDENT>return fullname<EOL>
Description of the process.
f14659:c1:m1
def debug(self, msg: str) -> None:
if self.debugging:<EOL><INDENT>s = "<STR_LIT>".format(self.fullname, msg)<EOL>log.debug(s)<EOL><DEDENT>
If we are being verbose, write a debug message to the Python disk log.
f14659:c1:m2
def info(self, msg: str) -> None:
<EOL>s = "<STR_LIT>".format(self.fullname, msg)<EOL>servicemanager.LogInfoMsg(s)<EOL>if self.debugging:<EOL><INDENT>log.info(s)<EOL><DEDENT>
Write an info message to the Windows Application log (± to the Python disk log).
f14659:c1:m3
def warning(self, msg: str) -> None:
<EOL>s = "<STR_LIT>".format(self.fullname, msg)<EOL>servicemanager.LogWarningMsg(s)<EOL>if self.debugging:<EOL><INDENT>log.warning(s)<EOL><DEDENT>
Write a warning message to the Windows Application log (± to the Python disk log).
f14659:c1:m4
def error(self, msg: str) -> None:
<EOL>s = "<STR_LIT>".format(self.fullname, msg)<EOL>servicemanager.LogErrorMsg(s)<EOL>if self.debugging:<EOL><INDENT>log.warning(s)<EOL><DEDENT>
Write an error message to the Windows Application log (± to the Python disk log).
f14659:c1:m5
def open_logs(self) -> None:
if self.details.logfile_out:<EOL><INDENT>self.stdout = open(self.details.logfile_out, '<STR_LIT:a>')<EOL><DEDENT>else:<EOL><INDENT>self.stdout = None<EOL><DEDENT>if self.details.logfile_err:<EOL><INDENT>if self.details.logfile_err == self.details.logfile_out:<EOL><INDENT>self.stderr = subprocess.STDOUT<EOL><DEDENT>else:<EOL><INDENT>self.stderr = open(self.details.logfile_err, '<STR_LIT:a>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.stderr = None<EOL><DEDENT>
Open Python disk logs.
f14659:c1:m6
def close_logs(self) -> None:
if self.stdout is not None:<EOL><INDENT>self.stdout.close()<EOL>self.stdout = None<EOL><DEDENT>if self.stderr is not None and self.stderr != subprocess.STDOUT:<EOL><INDENT>self.stderr.close()<EOL>self.stderr = None<EOL><DEDENT>
Close Python disk logs.
f14659:c1:m7
def start(self) -> None:
if self.running:<EOL><INDENT>return<EOL><DEDENT>self.info("<STR_LIT>".format(<EOL>self.details.procargs,<EOL>self.details.logfile_out,<EOL>self.details.logfile_err))<EOL>self.open_logs()<EOL>creationflags = CREATE_NEW_PROCESS_GROUP if WINDOWS else <NUM_LIT:0><EOL>self.process = subprocess.Popen(self.details.procargs, stdin=None,<EOL>stdout=self.stdout, stderr=self.stderr,<EOL>creationflags=creationflags)<EOL>self.running = True<EOL>
Starts a subprocess. Optionally routes its output to our disk logs.
f14659:c1:m8
def stop(self) -> None:
if not self.running:<EOL><INDENT>return<EOL><DEDENT>try:<EOL><INDENT>self.wait(timeout_s=<NUM_LIT:0>)<EOL><DEDENT>except subprocess.TimeoutExpired: <EOL><INDENT>for kill_level in self.ALL_KILL_LEVELS:<EOL><INDENT>tried_to_kill = self._terminate(level=kill_level) <EOL>if tried_to_kill:<EOL><INDENT>try:<EOL><INDENT>self.wait(timeout_s=self.kill_timeout_sec)<EOL>break<EOL><DEDENT>except subprocess.TimeoutExpired: <EOL><INDENT>self.warning("<STR_LIT>")<EOL>pass <EOL><DEDENT><DEDENT><DEDENT><DEDENT>self.close_logs()<EOL>self.running = False<EOL>
Stops a subprocess. Asks nicely. Waits. Asks less nicely. Repeat until subprocess is dead. .. todo:: cardinal_pythonlib.winservice.ProcessManager._kill: make it reliable under Windows
f14659:c1:m9
def _terminate(self, level: int) -> bool:
if not self.running:<EOL><INDENT>return True<EOL><DEDENT>try:<EOL><INDENT>self.wait(<NUM_LIT:0>)<EOL>return True<EOL><DEDENT>except subprocess.TimeoutExpired: <EOL><INDENT>pass<EOL><DEDENT>suffix = "<STR_LIT>".format(self.process.pid)<EOL>if level == self.KILL_LEVEL_CTRL_C_OR_SOFT_KILL:<EOL><INDENT>if WINDOWS:<EOL><INDENT>success = <NUM_LIT:0> != ctypes.windll.kernel32.GenerateConsoleCtrlEvent(<EOL>CTRL_C_EVENT, self.process.pid)<EOL>if success:<EOL><INDENT>self.info("<STR_LIT>" + suffix)<EOL><DEDENT>else:<EOL><INDENT>self.info("<STR_LIT>" + suffix)<EOL><DEDENT>return success<EOL><DEDENT>else:<EOL><INDENT>self.warning("<STR_LIT>" + suffix)<EOL>self.process.terminate() <EOL>return True<EOL><DEDENT><DEDENT>elif level == self.KILL_LEVEL_CTRL_BREAK:<EOL><INDENT>if not WINDOWS:<EOL><INDENT>return False<EOL><DEDENT>success = <NUM_LIT:0> != ctypes.windll.kernel32.GenerateConsoleCtrlEvent(<EOL>CTRL_BREAK_EVENT, self.process.pid)<EOL>if success:<EOL><INDENT>self.info("<STR_LIT>" + suffix)<EOL><DEDENT>else:<EOL><INDENT>self.info("<STR_LIT>" + suffix)<EOL><DEDENT>return success<EOL><DEDENT>elif level == self.KILL_LEVEL_TASKKILL:<EOL><INDENT>if not WINDOWS:<EOL><INDENT>return False<EOL><DEDENT>retcode = self._taskkill(force=False) <EOL>return retcode == winerror.ERROR_SUCCESS<EOL><DEDENT>elif level == self.KILL_LEVEL_TASKKILL_FORCE:<EOL><INDENT>if not WINDOWS:<EOL><INDENT>return False<EOL><DEDENT>retcode = self._taskkill(force=True) <EOL>return retcode == winerror.ERROR_SUCCESS<EOL><DEDENT>elif level == self.KILL_LEVEL_HARD_KILL:<EOL><INDENT>self._kill() <EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>
Returns: succeeded in *attempting* a kill?
f14659:c1:m10
def _taskkill(self, force: bool = False) -> int:
<EOL>args = [<EOL>"<STR_LIT>", <EOL>"<STR_LIT>", str(self.process.pid),<EOL>"<STR_LIT>", <EOL>]<EOL>if force:<EOL><INDENT>args.append("<STR_LIT>") <EOL><DEDENT>callname = "<STR_LIT:U+0020>".join(args)<EOL>retcode = subprocess.call(args)<EOL>if retcode == winerror.ERROR_SUCCESS: <EOL><INDENT>self.info("<STR_LIT>" + repr(callname))<EOL><DEDENT>elif retcode == winerror.ERROR_INVALID_FUNCTION: <EOL><INDENT>self.warning(<EOL>repr(callname) +<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT>elif retcode == winerror.ERROR_WAIT_NO_CHILDREN: <EOL><INDENT>self.warning(<EOL>repr(callname) +<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT>elif retcode == winerror.ERROR_EA_LIST_INCONSISTENT: <EOL><INDENT>self.warning(<EOL>repr(callname) +<EOL>"<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>self.warning(callname + "<STR_LIT>".format(retcode))<EOL><DEDENT>return retcode<EOL>
Executes a Windows ``TASKKILL /pid PROCESS_ID /t`` command (``/t`` for "tree kill" = "kill all children"). Args: force: also add ``/f`` (forcefully) Returns: return code from ``TASKKILL`` **Test code:** Firstly we need a program that won't let itself be killed. Save this as ``nokill.py``: .. code-block:: python #!/usr/bin/env python import logging import time import os from cardinal_pythonlib.logs import main_only_quicksetup_rootlogger from cardinal_pythonlib.signalfunc import trap_ctrl_c_ctrl_break main_only_quicksetup_rootlogger(level=logging.DEBUG) trap_ctrl_c_ctrl_break() while True: print("Process ID is {}; time is {} s".format(os.getpid(), time.clock())) time.sleep(1) Now run that with ``python nokill.py``. It should resist CTRL-C and CTRL-BREAK. Start another command prompt in which to play with ``TASKKILL``. .. code-block:: bat REM Firstly, avoid this single-ampersand syntax: REM taskkill /im notepad.exe & echo %errorlevel% REM ... as it prints the WRONG (previous?) errorlevel. notepad.exe taskkill /im notepad.exe echo %errorlevel% REM ... 0 for success (Windows 10), e.g. REM 'SUCCESS: Sent termination signal to the process "notepad.exe" with PID 6988.' taskkill /im notepad.exe echo %errorlevel% REM ... 128 for "not found" (Windows 10), e.g. REM 'ERROR: The process "notepad.exe" not found.' REM Now run notepad.exe as Administrator taskkill /im notepad.exe & echo %errorlevel% REM ... 1 for "access denied" (Windows 10) REM Now kill the nokill.py process by its PID (e.g. 11892 here): taskkill /pid 11892 echo %errorlevel% REM ... 1 for "not allowed" (Windows 10), e.g. REM 'ERROR: The process with PID 11892 could not be terminated.' REM 'Reason: This process can only be terminated forcefully (with /F option).' REM Now forcefully: taskkill /pid 11892 /f echo %errorlevel% REM ... 0 for success (Windows 10), e.g. REM 'SUCCESS: The process with PID 11892 has been terminated.'
f14659:c1:m11
def _kill(self) -> None:
<EOL>self.warning("<STR_LIT>")<EOL>pid = self.process.pid<EOL>gone, still_alive = kill_proc_tree(pid, including_parent=True,<EOL>timeout_s=self.kill_timeout_sec)<EOL>self.debug("<STR_LIT>".format(gone))<EOL>self.warning("<STR_LIT>".format(still_alive))<EOL>
Hard kill. - PROBLEM: originally, via ``self.process.kill()``, could leave orphans under Windows. - SOLUTION: see https://stackoverflow.com/questions/1230669/subprocess-deleting-child-processes-in-windows, which uses ``psutil``.
f14659:c1:m12
def wait(self, timeout_s: float = None) -> int:
if not self.running:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>retcode = self.process.wait(timeout=timeout_s)<EOL>if retcode is None:<EOL><INDENT>self.error("<STR_LIT>")<EOL>retcode = <NUM_LIT:1> <EOL><DEDENT>elif retcode == <NUM_LIT:0>:<EOL><INDENT>self.info("<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>self.error(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>".format(<EOL>retcode,<EOL>self.details.logfile_out,<EOL>self.details.logfile_err))<EOL><DEDENT>self.running = False<EOL>return retcode<EOL>
Wait for up to ``timeout_s`` for the child process to finish. Args: timeout_s: maximum time to wait or ``None`` to wait forever Returns: process return code; or ``0`` if it wasn't running, or ``1`` if it managed to exit without a return code Raises: subprocess.TimeoutExpired: if the process continues to run
f14659:c1:m13
def debug(self, msg: str) -> None:
if self.debugging:<EOL><INDENT>log.debug(msg)<EOL><DEDENT>
If we are being verbose, write a debug message to the Python log.
f14659:c2:m1
def info(self, msg: str) -> None:
<EOL>servicemanager.LogInfoMsg(str(msg))<EOL>if self.debugging:<EOL><INDENT>log.info(msg)<EOL><DEDENT>
Write an info message to the Windows Application log (± to the Python disk log).
f14659:c2:m2
def error(self, msg: str) -> None:
<EOL>servicemanager.LogErrorMsg(str(msg))<EOL>if self.debugging:<EOL><INDENT>log.error(msg)<EOL><DEDENT>
Write an error message to the Windows Application log (± to the Python disk log).
f14659:c2:m3
def SvcStop(self) -> None:
<EOL>self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)<EOL>win32event.SetEvent(self.h_stop_event)<EOL>
Called when the service is being shut down.
f14659:c2:m4
def SvcDoRun(self) -> None:
<EOL>self.debug("<STR_LIT>")<EOL>servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,<EOL>servicemanager.PYS_SERVICE_STARTED,<EOL>(self._svc_name_, '<STR_LIT>'))<EOL>self.main() <EOL>servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,<EOL>servicemanager.PYS_SERVICE_STOPPED,<EOL>(self._svc_name_, '<STR_LIT>'))<EOL>self.ReportServiceStatus(win32service.SERVICE_STOPPED)<EOL>
Called when the service is started.
f14659:c2:m5
def run_debug(self) -> None:
self.debugging = True<EOL>self.main()<EOL>
Enable verbose mode and call :func:`main`.
f14659:c2:m7
def main(self) -> None:
<EOL>try:<EOL><INDENT>self.service()<EOL><DEDENT>except Exception as e:<EOL><INDENT>self.error("<STR_LIT>".format(<EOL>e=e, t=traceback.format_exc()))<EOL><DEDENT>
Main entry point. Runs :func:`service`.
f14659:c2:m8
def service(self) -> None:
raise NotImplementedError()<EOL>
Service function. Must be overridden by derived classes.
f14659:c2:m9
def run_processes(self,<EOL>procdetails: List[ProcessDetails],<EOL>subproc_run_timeout_sec: float = <NUM_LIT:1>,<EOL>stop_event_timeout_ms: int = <NUM_LIT:1000>,<EOL>kill_timeout_sec: float = <NUM_LIT:5>) -> None:
<EOL>def cleanup():<EOL><INDENT>self.debug("<STR_LIT>")<EOL>for pmgr_ in self.process_managers:<EOL><INDENT>pmgr_.stop()<EOL><DEDENT><DEDENT>atexit.register(cleanup)<EOL>self.process_managers = [] <EOL>n = len(procdetails)<EOL>for i, details in enumerate(procdetails):<EOL><INDENT>pmgr = ProcessManager(details, i + <NUM_LIT:1>, n,<EOL>kill_timeout_sec=kill_timeout_sec,<EOL>debugging=self.debugging)<EOL>self.process_managers.append(pmgr)<EOL><DEDENT>for pmgr in self.process_managers:<EOL><INDENT>pmgr.start()<EOL><DEDENT>self.info("<STR_LIT>")<EOL>something_running = True<EOL>stop_requested = False<EOL>subproc_failed = False<EOL>while something_running and not stop_requested and not subproc_failed:<EOL><INDENT>if (win32event.WaitForSingleObject(<EOL>self.h_stop_event,<EOL>stop_event_timeout_ms) == win32event.WAIT_OBJECT_0):<EOL><INDENT>stop_requested = True<EOL>self.info("<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>something_running = False<EOL>for pmgr in self.process_managers:<EOL><INDENT>if subproc_failed:<EOL><INDENT>break<EOL><DEDENT>try:<EOL><INDENT>retcode = pmgr.wait(timeout_s=subproc_run_timeout_sec)<EOL>if retcode != <NUM_LIT:0>:<EOL><INDENT>subproc_failed = True<EOL><DEDENT><DEDENT>except subprocess.TimeoutExpired:<EOL><INDENT>something_running = True<EOL><DEDENT><DEDENT><DEDENT><DEDENT>for pmgr in self.process_managers:<EOL><INDENT>pmgr.stop()<EOL><DEDENT>self.info("<STR_LIT>")<EOL>
Run multiple child processes. Args: procdetails: list of :class:`ProcessDetails` objects (q.v.) subproc_run_timeout_sec: time (in seconds) to wait for each process when polling child processes to see how they're getting on (default ``1``) stop_event_timeout_ms: time to wait (in ms) while checking the Windows stop event for this service (default ``1000``) kill_timeout_sec: how long (in seconds) will we wait for the subprocesses to end peacefully, before we try to kill them? .. todo:: cardinal_pythonlib.winservice.WindowsService: NOT YET IMPLEMENTED: Windows service autorestart
f14659:c2:m10
def repr_parameter(param: inspect.Parameter) -> str:
return (<EOL>"<STR_LIT>"<EOL>"<STR_LIT>".format(<EOL>name=param.name, annotation=param.annotation, kind=param.kind,<EOL>default=param.default)<EOL>)<EOL>
Provides a ``repr``-style representation of a function parameter.
f14660:m0
def get_namespace(fn: Callable, namespace: Optional[str]) -> str:
<EOL><INDENT>hidden attributes with dir(fn)<EOL><DEDENT>nspection PyUnresolvedReferences<EOL>n "<STR_LIT>".format(<EOL>odule=fn.__module__,<EOL>ame=fn.__qualname__, <EOL>xtra="<STR_LIT>".format(namespace) if namespace is not None else "<STR_LIT>",<EOL>
Returns a representation of a function's name (perhaps within a namespace), like .. code-block:: none mymodule:MyClass.myclassfunc # with no namespace mymodule:MyClass.myclassfunc|somenamespace # with a namespace Args: fn: a function namespace: an optional namespace, which can be of any type but is normally a ``str``; if not ``None``, ``str(namespace)`` will be added to the result. See https://dogpilecache.readthedocs.io/en/latest/api.html#dogpile.cache.region.CacheRegion.cache_on_arguments
f14660:m1
def fkg_allowing_type_hints(<EOL>namespace: Optional[str],<EOL>fn: Callable,<EOL>to_str: Callable[[Any], str] = repr) -> Callable[[Any], str]:
namespace = get_namespace(fn, namespace)<EOL>sig = inspect.signature(fn)<EOL>argnames = [p.name for p in sig.parameters.values()<EOL>if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD]<EOL>has_self = bool(argnames and argnames[<NUM_LIT:0>] in ('<STR_LIT>', '<STR_LIT>'))<EOL>def generate_key(*args: Any, **kw: Any) -> str:<EOL><INDENT>"""<STR_LIT>"""<EOL>if kw:<EOL><INDENT>raise ValueError("<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT>if has_self:<EOL><INDENT>args = [hex(id(args[<NUM_LIT:0>]))] + list(args[<NUM_LIT:1>:])<EOL><DEDENT>key = namespace + "<STR_LIT:|>" + "<STR_LIT:U+0020>".join(map(to_str, args))<EOL>if DEBUG_INTERNALS:<EOL><INDENT>log.debug("<STR_LIT>", key)<EOL><DEDENT>return key<EOL><DEDENT>return generate_key<EOL>
Replacement for :func:`dogpile.cache.util.function_key_generator` that handles type-hinted functions like .. code-block:: python def testfunc(param: str) -> str: return param + "hello" ... at which :func:`inspect.getargspec` balks; plus :func:`inspect.getargspec` is deprecated in Python 3. Used as an argument to e.g. ``@cache_region_static.cache_on_arguments()``. Also modified to make the cached function unique per INSTANCE for normal methods of a class. Args: namespace: optional namespace, as per :func:`get_namespace` fn: function to generate a key for (usually the function being decorated) to_str: function to apply to map arguments to a string (to make a unique key for a particular call to the function); by default it is :func:`repr` Returns: a function that generates a string key, based on a given function as well as arguments to the returned function itself.
f14660:m2
def multikey_fkg_allowing_type_hints(<EOL>namespace: Optional[str],<EOL>fn: Callable,<EOL>to_str: Callable[[Any], str] = repr) -> Callable[[Any], List[str]]:
namespace = get_namespace(fn, namespace)<EOL>sig = inspect.signature(fn)<EOL>argnames = [p.name for p in sig.parameters.values()<EOL>if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD]<EOL>has_self = bool(argnames and argnames[<NUM_LIT:0>] in ('<STR_LIT>', '<STR_LIT>'))<EOL>def generate_keys(*args: Any, **kw: Any) -> List[str]:<EOL><INDENT>if kw:<EOL><INDENT>raise ValueError("<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT>if has_self:<EOL><INDENT>args = [hex(id(args[<NUM_LIT:0>]))] + list(args[<NUM_LIT:1>:])<EOL><DEDENT>keys = [namespace + "<STR_LIT:|>" + key for key in map(to_str, args)]<EOL>if DEBUG_INTERNALS:<EOL><INDENT>log.debug(<EOL>"<STR_LIT>",<EOL>keys)<EOL><DEDENT>return keys<EOL><DEDENT>return generate_keys<EOL>
Equivalent of :func:`dogpile.cache.util.function_multi_key_generator`, but using :func:`inspect.signature` instead. Also modified to make the cached function unique per INSTANCE for normal methods of a class.
f14660:m3
def kw_fkg_allowing_type_hints(<EOL>namespace: Optional[str],<EOL>fn: Callable,<EOL>to_str: Callable[[Any], str] = repr) -> Callable[[Any], str]:
namespace = get_namespace(fn, namespace)<EOL>sig = inspect.signature(fn)<EOL>parameters = list(sig.parameters.values()) <EOL>argnames = [p.name for p in parameters<EOL>if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD]<EOL>has_self = bool(argnames and argnames[<NUM_LIT:0>] in ('<STR_LIT>', '<STR_LIT>'))<EOL>if DEBUG_INTERNALS:<EOL><INDENT>log.debug(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>",<EOL>namespace=namespace,<EOL>parameters="<STR_LIT:U+002CU+0020>".join(repr_parameter(p) for p in parameters),<EOL>argnames=repr(argnames),<EOL>has_self=has_self,<EOL>fn=repr(fn),<EOL>)<EOL><DEDENT>def generate_key(*args: Any, **kwargs: Any) -> str:<EOL><INDENT>as_kwargs = {} <EOL>loose_args = [] <EOL>for idx, arg in enumerate(args):<EOL><INDENT>if idx >= len(argnames):<EOL><INDENT>loose_args.append(arg)<EOL><DEDENT>else:<EOL><INDENT>if has_self and idx == <NUM_LIT:0>: <EOL><INDENT>argvalue = hex(id(arg))<EOL><DEDENT>else:<EOL><INDENT>argvalue = arg<EOL><DEDENT>as_kwargs[argnames[idx]] = argvalue<EOL><DEDENT><DEDENT>if loose_args:<EOL><INDENT>as_kwargs['<STR_LIT>'] = loose_args<EOL><DEDENT>as_kwargs.update(kwargs)<EOL>for param in parameters:<EOL><INDENT>if param.default != inspect.Parameter.empty:<EOL><INDENT>if param.name not in as_kwargs:<EOL><INDENT>as_kwargs[param.name] = param.default<EOL><DEDENT><DEDENT><DEDENT>argument_values = ["<STR_LIT>".format(k=key, v=to_str(as_kwargs[key]))<EOL>for key in sorted(as_kwargs.keys())]<EOL>key = namespace + '<STR_LIT:|>' + "<STR_LIT:U+0020>".join(argument_values)<EOL>if DEBUG_INTERNALS:<EOL><INDENT>log.debug("<STR_LIT>", key)<EOL><DEDENT>return key<EOL><DEDENT>return generate_key<EOL>
As for :func:`fkg_allowing_type_hints`, but allowing keyword arguments. For ``kwargs`` passed in, we will build a ``dict`` of all argname (key) to argvalue (values) pairs, including default args from the argspec, and then alphabetize the list before generating the key. NOTE ALSO that once we have keyword arguments, we should be using :func:`repr`, because we need to distinguish .. code-block:: python kwargs = {'p': 'another', 'q': 'thing'} # ... which compat.string_type will make into # p=another q=thing # ... from kwargs = {'p': 'another q=thing'} Also modified to make the cached function unique per INSTANCE for normal methods of a class.
f14660:m4
def nhs_check_digit(ninedigits: Union[str, List[Union[str, int]]]) -> int:
if len(ninedigits) != <NUM_LIT:9> or not all(str(x).isdigit() for x in ninedigits):<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>check_digit = <NUM_LIT:11> - (sum([<EOL>int(d) * f<EOL>for (d, f) in zip(ninedigits, NHS_DIGIT_WEIGHTINGS)<EOL>]) % <NUM_LIT:11>)<EOL>if check_digit == <NUM_LIT:11>:<EOL><INDENT>check_digit = <NUM_LIT:0><EOL><DEDENT>return check_digit<EOL>
Calculates an NHS number check digit. Args: ninedigits: string or list Returns: check digit Method: 1. Multiply each of the first nine digits by the corresponding digit weighting (see :const:`NHS_DIGIT_WEIGHTINGS`). 2. Sum the results. 3. Take remainder after division by 11. 4. Subtract the remainder from 11 5. If this is 11, use 0 instead If it's 10, the number is invalid If it doesn't match the actual check digit, the number is invalid
f14661:m0
def is_valid_nhs_number(n: int) -> bool:
<EOL>t isinstance(n, int):<EOL>og.debug("<STR_LIT>")<EOL>eturn False<EOL>tr(n)<EOL><INDENT><NUM_LIT:10> digits long?<EOL><DEDENT>n(s) != <NUM_LIT:10>:<EOL>og.debug("<STR_LIT>")<EOL>eturn False<EOL>digits = [int(s[i]) for i in range(<NUM_LIT:9>)]<EOL>l_check_digit = int(s[<NUM_LIT:9>]) <EOL>ted_check_digit = nhs_check_digit(main_digits)<EOL>pected_check_digit == <NUM_LIT:10>:<EOL>og.debug("<STR_LIT>")<EOL>eturn False<EOL>pected_check_digit != actual_check_digit:<EOL>og.debug("<STR_LIT>")<EOL>eturn False<EOL>ray!<EOL>n True<EOL>
Validates an integer as an NHS number. Args: n: NHS number Returns: valid? Checksum details are at http://www.datadictionary.nhs.uk/version2/data_dictionary/data_field_notes/n/nhs_number_de.asp
f14661:m1
def generate_random_nhs_number() -> int:
check_digit = <NUM_LIT:10> <EOL>while check_digit == <NUM_LIT:10>:<EOL><INDENT>digits = [random.randint(<NUM_LIT:1>, <NUM_LIT:9>)] <EOL>digits.extend([random.randint(<NUM_LIT:0>, <NUM_LIT:9>) for _ in range(<NUM_LIT:8>)])<EOL>check_digit = nhs_check_digit(digits)<EOL><DEDENT>digits.append(check_digit)<EOL>return int("<STR_LIT>".join([str(d) for d in digits]))<EOL>
Returns a random valid NHS number, as an ``int``.
f14661:m2
def generate_nhs_number_from_first_9_digits(first9digits: str) -> Optional[int]:
if len(first9digits) != <NUM_LIT:9>:<EOL><INDENT>log.warning("<STR_LIT>")<EOL>return None<EOL><DEDENT>try:<EOL><INDENT>first9int = int(first9digits)<EOL><DEDENT>except (TypeError, ValueError):<EOL><INDENT>log.warning("<STR_LIT>")<EOL>return None <EOL><DEDENT>if len(str(first9int)) != len(first9digits):<EOL><INDENT>log.warning("<STR_LIT>")<EOL>return None<EOL><DEDENT>check_digit = nhs_check_digit(first9digits)<EOL>if check_digit == <NUM_LIT:10>: <EOL><INDENT>log.warning("<STR_LIT>")<EOL>return None<EOL><DEDENT>return int(first9digits + str(check_digit))<EOL>
Returns a valid NHS number, as an ``int``, given the first 9 digits. The particular purpose is to make NHS numbers that *look* fake (rather than truly random NHS numbers which might accidentally be real). For example: .. code-block:: none 123456789_ : no; checksum 10 987654321_ : yes, valid if completed to 9876543210 999999999_ : yes, valid if completed to 9999999999
f14661:m4
def nhs_number_from_text_or_none(s: str) -> Optional[int]:
<EOL>e in, None out.<EOL>ame = "<STR_LIT>"<EOL>t s:<EOL>og.debug(funcname + "<STR_LIT>")<EOL>eturn None<EOL><INDENT>If it's not a <NUM_LIT:10>-digit number, bye-bye.<EOL><DEDENT>ove whitespace<EOL>HITESPACE_REGEX.sub("<STR_LIT>", s) <EOL>tains non-numeric characters?<EOL>N_NUMERIC_REGEX.search(s):<EOL>og.debug(funcname + "<STR_LIT>")<EOL>eturn None<EOL><INDENT><NUM_LIT:10> digits long?<EOL><DEDENT>n(s) != <NUM_LIT:10>:<EOL>og.debug(funcname + "<STR_LIT>")<EOL>eturn None<EOL><INDENT>Validation<EOL><DEDENT>nt(s)<EOL>t is_valid_nhs_number(n):<EOL>og.debug(funcname + "<STR_LIT>")<EOL>eturn None<EOL>py!<EOL>n n<EOL>
Returns a validated NHS number (as an integer) from a string, or ``None`` if it is not valid. It's a 10-digit number, so note that database 32-bit INT values are insufficient; use BIGINT. Python will handle large integers happily. NHS number rules: http://www.datadictionary.nhs.uk/version2/data_dictionary/data_field_notes/n/nhs_number_de.asp?shownav=0
f14661:m5
def tee(infile: IO, *files: IO) -> Thread:
<EOL>anout(_infile: IO, *_files: IO):<EOL>or line in iter(_infile.readline, '<STR_LIT>'):<EOL><INDENT>for f in _files:<EOL><INDENT>f.write(line)<EOL><DEDENT><DEDENT>nfile.close()<EOL>hread(target=fanout, args=(infile,) + files)<EOL>mon = True<EOL>rt()<EOL>n t<EOL>
r""" Print the file-like object ``infile`` to the file-like object(s) ``files`` in a separate thread. Starts and returns that thread. The type (text, binary) must MATCH across all files. From https://stackoverflow.com/questions/4984428/python-subprocess-get-childrens-output-to-file-and-terminal A note on text versus binary IO: TEXT files include: - files opened in text mode (``"r"``, ``"rt"``, ``"w"``, ``"wt"``) - ``sys.stdin``, ``sys.stdout`` - ``io.StringIO()``; see https://docs.python.org/3/glossary.html#term-text-file BINARY files include: - files opened in binary mode (``"rb"``, ``"wb"``, ``"rb+"``...) - ``sys.stdin.buffer``, ``sys.stdout.buffer`` - ``io.BytesIO()`` - ``gzip.GzipFile()``; see https://docs.python.org/3/glossary.html#term-binary-file .. code-block:: bash $ python3 # don't get confused and use Python 2 by mistake! .. code-block:: python t = open("/tmp/text.txt", "r+t") # text mode is default b = open("/tmp/bin.bin", "r+b") t.write("hello\n") # OK # b.write("hello\n") # raises TypeError # t.write(b"world\n") # raises TypeError b.write(b"world\n") # OK t.flush() b.flush() t.seek(0) b.seek(0) x = t.readline() # "hello\n" y = b.readline() # b"world\n"
f14662:m0
def teed_call(cmd_args,<EOL>stdout_targets: List[TextIO] = None,<EOL>stderr_targets: List[TextIO] = None,<EOL>encoding: str = sys.getdefaultencoding(),<EOL>**kwargs):
<EOL>e a copy so we can append without damaging the original:<EOL>t_targets = stdout_targets.copy() if stdout_targets else [] <EOL>r_targets = stderr_targets.copy() if stderr_targets else [] <EOL>open(cmd_args, stdout=PIPE, stderr=PIPE, **kwargs)<EOL>ds = [] <EOL>open(os.devnull, "<STR_LIT:w>") as null: <EOL>f not stdout_targets:<EOL><INDENT>stdout_targets.append(null)<EOL><DEDENT>f not stderr_targets:<EOL><INDENT>stderr_targets.append(null)<EOL>
Runs a command and captures its output via :func:`tee` to one or more destinations. The output is always captured (otherwise we would lose control of the output and ability to ``tee`` it); if no destination is specified, we add a null handler. We insist on ``TextIO`` output files to match ``sys.stdout`` (etc.). A variation on: https://stackoverflow.com/questions/4984428/python-subprocess-get-childrens-output-to-file-and-terminal Args: cmd_args: arguments for the command to run stdout_targets: file-like objects to write ``stdout`` to stderr_targets: file-like objects to write ``stderr`` to encoding: encoding to apply to ``stdout`` and ``stderr`` kwargs: additional arguments for :class:`subprocess.Popen`
f14662:m1
@contextmanager<EOL>def tee_log(tee_file: TextIO, loglevel: int) -> None:
handler = get_monochrome_handler(stream=tee_file)<EOL>handler.setLevel(loglevel)<EOL>rootlogger = logging.getLogger()<EOL>rootlogger.addHandler(handler)<EOL>with TeeContextManager(tee_file, capture_stdout=True):<EOL><INDENT>with TeeContextManager(tee_file, capture_stderr=True):<EOL><INDENT>try:<EOL><INDENT>yield<EOL><DEDENT>except Exception:<EOL><INDENT>exc_type, exc_value, exc_traceback = sys.exc_info()<EOL>lines = traceback.format_exception(exc_type, exc_value,<EOL>exc_traceback)<EOL>log.critical("<STR_LIT:\n>" + "<STR_LIT>".join(lines))<EOL>raise<EOL><DEDENT><DEDENT><DEDENT>
Context manager to add a file output stream to our logging system. Args: tee_file: file-like object to write to loglevel: log level (e.g. ``logging.DEBUG``) to use for this stream
f14662:m2
def __init__(self,<EOL>file: TextIO,<EOL>capture_stdout: bool = False,<EOL>capture_stderr: bool = False) -> None:
<EOL>assert capture_stdout != capture_stderr, (<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>)<EOL>self.using_stdout = capture_stdout<EOL>self.file = file<EOL>self.filename = file.name<EOL>self.output_description = "<STR_LIT>" if capture_stdout else "<STR_LIT>"<EOL>log.debug("<STR_LIT>",<EOL>self.output_description, self.filename)<EOL>if self.using_stdout:<EOL><INDENT>self.underlying_stream = sys.stdout<EOL>sys.stdout = self <EOL><DEDENT>else:<EOL><INDENT>self.underlying_stream = sys.stderr<EOL>sys.stderr = self<EOL><DEDENT>
Args: file: file-like object to write to. We take a file object, not a filename, so we can apply multiple tee filters going to the same file. capture_stdout: capture ``stdout``? Use this or ``capture_stderr`` capture_stderr: capture ``stderr``? Use this or ``capture_stdout`` We read the filename from ``file.name`` but this is purely cosmetic.
f14662:c0:m0
def __enter__(self) -> None:
pass<EOL>
To act as a context manager.
f14662:c0:m1
def __exit__(self, *args) -> None:
self.close()<EOL>
To act as a context manager.
f14662:c0:m2
def write(self, message: str) -> None:
self.underlying_stream.write(message)<EOL>self.file.write(message)<EOL>
To act as a file.
f14662:c0:m3
def flush(self) -> None:
self.underlying_stream.flush()<EOL>self.file.flush()<EOL>os.fsync(self.file.fileno())<EOL>
To act as a file.
f14662:c0:m4
def close(self) -> None:
if self.underlying_stream:<EOL><INDENT>if self.using_stdout:<EOL><INDENT>sys.stdout = self.underlying_stream<EOL><DEDENT>else:<EOL><INDENT>sys.stderr = self.underlying_stream<EOL><DEDENT>self.underlying_stream = None<EOL><DEDENT>if self.file:<EOL><INDENT>self.file = None<EOL>log.debug("<STR_LIT>",<EOL>self.output_description, self.filename)<EOL><DEDENT>
To act as a file.
f14662:c0:m5
def __init__(self, start: bool = True) -> None:
self._timing = start<EOL>self._overallstart = get_now_utc_pendulum()<EOL>self._starttimes = OrderedDict() <EOL>self._totaldurations = OrderedDict() <EOL>self._count = OrderedDict() <EOL>self._stack = []<EOL>
Args: start: start the timer immediately?
f14663:c0:m0
def reset(self) -> None:
self._overallstart = get_now_utc_pendulum()<EOL>self._starttimes.clear()<EOL>self._totaldurations.clear()<EOL>self._count.clear()<EOL>self._stack.clear()<EOL>
Reset the timers.
f14663:c0:m1
def set_timing(self, timing: bool, reset: bool = False) -> None:
self._timing = timing<EOL>if reset:<EOL><INDENT>self.reset()<EOL><DEDENT>
Manually set the ``timing`` parameter, and optionally reset the timers. Args: timing: should we be timing? reset: reset the timers?
f14663:c0:m2
def start(self, name: str, increment_count: bool = True) -> None:
if not self._timing:<EOL><INDENT>return<EOL><DEDENT>now = get_now_utc_pendulum()<EOL>if self._stack:<EOL><INDENT>last = self._stack[-<NUM_LIT:1>]<EOL>self._totaldurations[last] += now - self._starttimes[last]<EOL><DEDENT>if name not in self._starttimes:<EOL><INDENT>self._totaldurations[name] = datetime.timedelta()<EOL>self._count[name] = <NUM_LIT:0><EOL><DEDENT>self._starttimes[name] = now<EOL>if increment_count:<EOL><INDENT>self._count[name] += <NUM_LIT:1><EOL><DEDENT>self._stack.append(name)<EOL>
Start a named timer. Args: name: name of the timer increment_count: increment the start count for this timer
f14663:c0:m3
def stop(self, name: str) -> None:
if not self._timing:<EOL><INDENT>return<EOL><DEDENT>now = get_now_utc_pendulum()<EOL>if not self._stack:<EOL><INDENT>raise AssertionError("<STR_LIT>")<EOL><DEDENT>if self._stack[-<NUM_LIT:1>] != name:<EOL><INDENT>raise AssertionError(<EOL>"<STR_LIT>".format(<EOL>repr(name), repr(self._stack[-<NUM_LIT:1>])))<EOL><DEDENT>self._totaldurations[name] += now - self._starttimes[name]<EOL>self._stack.pop()<EOL>if self._stack:<EOL><INDENT>last = self._stack[-<NUM_LIT:1>]<EOL>self._starttimes[last] = now<EOL><DEDENT>
Stop a named timer. Args: name: timer to stop
f14663:c0:m4
def report(self) -> None:
while self._stack:<EOL><INDENT>self.stop(self._stack[-<NUM_LIT:1>])<EOL><DEDENT>now = get_now_utc_pendulum()<EOL>grand_total = datetime.timedelta()<EOL>overall_duration = now - self._overallstart<EOL>for name, duration in self._totaldurations.items():<EOL><INDENT>grand_total += duration<EOL><DEDENT>log.info("<STR_LIT>")<EOL>summaries = []<EOL>for name, duration in self._totaldurations.items():<EOL><INDENT>n = self._count[name]<EOL>total_sec = duration.total_seconds()<EOL>mean = total_sec / n if n > <NUM_LIT:0> else None<EOL>summaries.append({<EOL>'<STR_LIT>': total_sec,<EOL>'<STR_LIT:description>': (<EOL>"<STR_LIT>".format(<EOL>name,<EOL>total_sec,<EOL>(<NUM_LIT:100> * total_sec / grand_total.total_seconds()),<EOL>n,<EOL>mean)),<EOL>})<EOL><DEDENT>summaries.sort(key=lambda x: x['<STR_LIT>'], reverse=True)<EOL>for s in summaries:<EOL><INDENT>log.info(s["<STR_LIT:description>"])<EOL><DEDENT>if not self._totaldurations:<EOL><INDENT>log.info("<STR_LIT>")<EOL><DEDENT>unmetered = overall_duration - grand_total<EOL>log.info(<EOL>"<STR_LIT>",<EOL>unmetered.total_seconds(),<EOL><NUM_LIT:100> * unmetered.total_seconds() / overall_duration.total_seconds()<EOL>)<EOL>log.info("<STR_LIT>", grand_total.total_seconds())<EOL>
Finish and report to the log.
f14663:c0:m5
def __init__(self, multitimer: MultiTimer, name: str) -> None:
self.timer = multitimer<EOL>self.name = name<EOL>
Args: multitimer: :class:`MultiTimer` to use name: name of timer to start as we enter, and stop as we exit
f14663:c1:m0
def ask_user(prompt: str, default: str = None) -> Optional[str]:
if default is None:<EOL><INDENT>prompt += "<STR_LIT>"<EOL><DEDENT>else:<EOL><INDENT>prompt += "<STR_LIT>" + default + "<STR_LIT>"<EOL><DEDENT>result = input(prompt)<EOL>return result if len(result) > <NUM_LIT:0> else default<EOL>
Prompts the user, with a default. Returns user input from ``stdin``.
f14665:m0
def ask_user_password(prompt: str) -> str:
return getpass.getpass(prompt + "<STR_LIT>")<EOL>
Read a password from the console.
f14665:m1
def get_save_as_filename(defaultfilename: str,<EOL>defaultextension: str,<EOL>title: str = "<STR_LIT>") -> str:
root = tkinter.Tk() <EOL>root.withdraw() <EOL>root.attributes('<STR_LIT>', True) <EOL>filename = filedialog.asksaveasfilename(<EOL>initialfile=defaultfilename,<EOL>defaultextension=defaultextension,<EOL>parent=root,<EOL>title=title<EOL>)<EOL>root.attributes('<STR_LIT>', False) <EOL>return filename<EOL>
Provides a GUI "Save As" dialogue (via ``tkinter``) and returns the filename.
f14665:m2
def get_open_filename(defaultfilename: str,<EOL>defaultextension: str,<EOL>title: str = "<STR_LIT>") -> str:
root = tkinter.Tk() <EOL>root.withdraw() <EOL>root.attributes('<STR_LIT>', True) <EOL>filename = filedialog.askopenfilename(<EOL>initialfile=defaultfilename,<EOL>defaultextension=defaultextension,<EOL>parent=root,<EOL>title=title<EOL>)<EOL>root.attributes('<STR_LIT>', False) <EOL>return filename<EOL>
Provides a GUI "Open" dialogue (via ``tkinter``) and returns the filename.
f14665:m3
def softmax(x: np.ndarray,<EOL>b: float = <NUM_LIT:1.0>) -> np.ndarray:
constant = np.mean(x)<EOL>products = x * b - constant<EOL>if products.max() > sys.float_info.max_exp:<EOL><INDENT>log.warning("<STR_LIT>"<EOL>"<STR_LIT>".format(x, b, constant, products))<EOL>n = len(x)<EOL>index_of_max = np.argmax(products)<EOL>answer = np.zeros(n)<EOL>answer[index_of_max] = <NUM_LIT:1.0><EOL><DEDENT>else:<EOL><INDENT>exponented = np.exp(products)<EOL>answer = exponented / np.sum(exponented)<EOL><DEDENT>return answer<EOL>
r""" Standard softmax function: .. math:: P_i = \frac {e ^ {\beta \cdot x_i}} { \sum_{i}{\beta \cdot x_i} } Args: x: vector (``numpy.array``) of values b: exploration parameter :math:`\beta`, or inverse temperature [Daw2009], or :math:`1/t`; see below Returns: vector of probabilities corresponding to the input values where: - :math:`t` is temperature (towards infinity: all actions equally likely; towards zero: probability of action with highest value tends to 1) - Temperature is not used directly as optimizers may take it to zero, giving an infinity; use inverse temperature instead. - [Daw2009] Daw ND, "Trial-by-trial data analysis using computational methods", 2009/2011; in "Decision Making, Affect, and Learning: Attention and Performance XXIII"; Delgado MR, Phelps EA, Robbins TW (eds), Oxford University Press.
f14666:m0
def logistic(x: Union[float, np.ndarray],<EOL>k: float,<EOL>theta: float) -> Optional[float]:
<EOL>if x is None or k is None or theta is None:<EOL><INDENT>return None<EOL><DEDENT>return <NUM_LIT:1> / (<NUM_LIT:1> + np.exp(-k * (x - theta)))<EOL>
r""" Standard logistic function. .. math:: y = \frac {1} {1 + e^{-k (x - \theta)}} Args: x: :math:`x` k: :math:`k` theta: :math:`\theta` Returns: :math:`y`
f14666:m1
def inv_logistic(y: Union[float, np.ndarray],<EOL>k: float,<EOL>theta: float) -> Optional[float]:
if y is None or k is None or theta is None:<EOL><INDENT>return None<EOL><DEDENT>return (np.log((<NUM_LIT:1> / y) - <NUM_LIT:1>) / -k) + theta<EOL>
r""" Inverse standard logistic function: .. math:: x = ( log( \frac {1} {y} - 1) / -k ) + \theta Args: y: :math:`y` k: :math:`k` theta: :math:`\theta` Returns: :math:`x`
f14666:m2
def get_monochrome_handler(<EOL>extranames: List[str] = None,<EOL>with_process_id: bool = False,<EOL>with_thread_id: bool = False,<EOL>stream: TextIO = None) -> logging.StreamHandler:
fmt = "<STR_LIT>"<EOL>if with_process_id or with_thread_id:<EOL><INDENT>procinfo = [] <EOL>if with_process_id:<EOL><INDENT>procinfo.append("<STR_LIT>")<EOL><DEDENT>if with_thread_id:<EOL><INDENT>procinfo.append("<STR_LIT>")<EOL><DEDENT>fmt += "<STR_LIT>".format("<STR_LIT:.>".join(procinfo))<EOL><DEDENT>extras = "<STR_LIT::>" + "<STR_LIT::>".join(extranames) if extranames else "<STR_LIT>"<EOL>fmt += "<STR_LIT>".format(extras=extras)<EOL>fmt += "<STR_LIT>"<EOL>f = logging.Formatter(fmt, datefmt=LOG_DATEFMT, style='<STR_LIT:%>')<EOL>h = logging.StreamHandler(stream)<EOL>h.setFormatter(f)<EOL>return h<EOL>
Gets a monochrome log handler using a standard format. Args: extranames: additional names to append to the logger's name with_process_id: include the process ID in the logger's name? with_thread_id: include the thread ID in the logger's name? stream: ``TextIO`` stream to send log output to Returns: the :class:`logging.StreamHandler`
f14667:m0
def get_colour_handler(extranames: List[str] = None,<EOL>with_process_id: bool = False,<EOL>with_thread_id: bool = False,<EOL>stream: TextIO = None) -> logging.StreamHandler:
fmt = "<STR_LIT>" <EOL>if with_process_id or with_thread_id:<EOL><INDENT>procinfo = [] <EOL>if with_process_id:<EOL><INDENT>procinfo.append("<STR_LIT>")<EOL><DEDENT>if with_thread_id:<EOL><INDENT>procinfo.append("<STR_LIT>")<EOL><DEDENT>fmt += "<STR_LIT>".format("<STR_LIT:.>".join(procinfo))<EOL><DEDENT>extras = "<STR_LIT::>" + "<STR_LIT::>".join(extranames) if extranames else "<STR_LIT>"<EOL>fmt += "<STR_LIT>".format(extras=extras)<EOL>fmt += "<STR_LIT>"<EOL>cf = ColoredFormatter(fmt,<EOL>datefmt=LOG_DATEFMT,<EOL>reset=True,<EOL>log_colors=LOG_COLORS,<EOL>secondary_log_colors={},<EOL>style='<STR_LIT:%>')<EOL>ch = logging.StreamHandler(stream)<EOL>ch.setFormatter(cf)<EOL>return ch<EOL>
Gets a colour log handler using a standard format. Args: extranames: additional names to append to the logger's name with_process_id: include the process ID in the logger's name? with_thread_id: include the thread ID in the logger's name? stream: ``TextIO`` stream to send log output to Returns: the :class:`logging.StreamHandler`
f14667:m1
def configure_logger_for_colour(logger: logging.Logger,<EOL>level: int = logging.INFO,<EOL>remove_existing: bool = False,<EOL>extranames: List[str] = None,<EOL>with_process_id: bool = False,<EOL>with_thread_id: bool = False) -> None:
if remove_existing:<EOL><INDENT>logger.handlers = [] <EOL><DEDENT>handler = get_colour_handler(extranames,<EOL>with_process_id=with_process_id,<EOL>with_thread_id=with_thread_id)<EOL>handler.setLevel(level)<EOL>logger.addHandler(handler)<EOL>logger.setLevel(level)<EOL>
Applies a preconfigured datetime/colour scheme to a logger. Should ONLY be called from the ``if __name__ == 'main'`` script; see https://docs.python.org/3.4/howto/logging.html#library-config. Args: logger: logger to modify level: log level to set remove_existing: remove existing handlers from logger first? extranames: additional names to append to the logger's name with_process_id: include the process ID in the logger's name? with_thread_id: include the thread ID in the logger's name?
f14667:m2
def main_only_quicksetup_rootlogger(level: int = logging.DEBUG,<EOL>with_process_id: bool = False,<EOL>with_thread_id: bool = False) -> None:
<EOL>rootlogger = logging.getLogger()<EOL>configure_logger_for_colour(rootlogger, level, remove_existing=True,<EOL>with_process_id=with_process_id,<EOL>with_thread_id=with_thread_id)<EOL>
Quick function to set up the root logger for colour. Should ONLY be called from the ``if __name__ == 'main'`` script; see https://docs.python.org/3.4/howto/logging.html#library-config. Args: level: log level to set with_process_id: include the process ID in the logger's name? with_thread_id: include the thread ID in the logger's name?
f14667:m3
def remove_all_logger_handlers(logger: logging.Logger) -> None:
while logger.handlers:<EOL><INDENT>h = logger.handlers[<NUM_LIT:0>]<EOL>logger.removeHandler(h)<EOL><DEDENT>
Remove all handlers from a logger. Args: logger: logger to modify
f14667:m4
def reset_logformat(logger: logging.Logger,<EOL>fmt: str,<EOL>datefmt: str = '<STR_LIT>') -> None:
handler = logging.StreamHandler()<EOL>formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)<EOL>handler.setFormatter(formatter)<EOL>remove_all_logger_handlers(logger)<EOL>logger.addHandler(handler)<EOL>logger.propagate = False<EOL>
Create a new formatter and apply it to the logger. :func:`logging.basicConfig` won't reset the formatter if another module has called it, so always set the formatter like this. Args: logger: logger to modify fmt: passed to the ``fmt=`` argument of :class:`logging.Formatter` datefmt: passed to the ``datefmt=`` argument of :class:`logging.Formatter`
f14667:m5
def reset_logformat_timestamped(logger: logging.Logger,<EOL>extraname: str = "<STR_LIT>",<EOL>level: int = logging.INFO) -> None:
namebit = extraname + "<STR_LIT::>" if extraname else "<STR_LIT>"<EOL>fmt = ("<STR_LIT>" + namebit +<EOL>"<STR_LIT>")<EOL>reset_logformat(logger, fmt=fmt)<EOL>logger.setLevel(level)<EOL>
Apply a simple time-stamped log format to an existing logger, and set its loglevel to either ``logging.DEBUG`` or ``logging.INFO``. Args: logger: logger to modify extraname: additional name to append to the logger's name level: log level to set
f14667:m6
def configure_all_loggers_for_colour(remove_existing: bool = True) -> None:
handler = get_colour_handler()<EOL>apply_handler_to_all_logs(handler, remove_existing=remove_existing)<EOL>
Applies a preconfigured datetime/colour scheme to ALL logger. Should ONLY be called from the ``if __name__ == 'main'`` script; see https://docs.python.org/3.4/howto/logging.html#library-config. Generally MORE SENSIBLE just to apply a handler to the root logger. Args: remove_existing: remove existing handlers from logger first?
f14667:m7
def apply_handler_to_root_log(handler: logging.Handler,<EOL>remove_existing: bool = False) -> None:
rootlog = logging.getLogger()<EOL>if remove_existing:<EOL><INDENT>rootlog.handlers = []<EOL><DEDENT>rootlog.addHandler(handler)<EOL>
Applies a handler to all logs, optionally removing existing handlers. Should ONLY be called from the ``if __name__ == 'main'`` script; see https://docs.python.org/3.4/howto/logging.html#library-config. Generally MORE SENSIBLE just to apply a handler to the root logger. Args: handler: the handler to apply remove_existing: remove existing handlers from logger first?
f14667:m8
def apply_handler_to_all_logs(handler: logging.Handler,<EOL>remove_existing: bool = False) -> None:
<EOL>for name, obj in logging.Logger.manager.loggerDict.items():<EOL><INDENT>if remove_existing:<EOL><INDENT>obj.handlers = [] <EOL><DEDENT>obj.addHandler(handler)<EOL><DEDENT>
Applies a handler to all logs, optionally removing existing handlers. Should ONLY be called from the ``if __name__ == 'main'`` script; see https://docs.python.org/3.4/howto/logging.html#library-config. Generally MORE SENSIBLE just to apply a handler to the root logger. Args: handler: the handler to apply remove_existing: remove existing handlers from logger first?
f14667:m9
def copy_root_log_to_file(filename: str,<EOL>fmt: str = LOG_FORMAT,<EOL>datefmt: str = LOG_DATEFMT) -> None:
fh = logging.FileHandler(filename)<EOL>formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)<EOL>fh.setFormatter(formatter)<EOL>apply_handler_to_root_log(fh)<EOL>
Copy all currently configured logs to the specified file. Should ONLY be called from the ``if __name__ == 'main'`` script; see https://docs.python.org/3.4/howto/logging.html#library-config.
f14667:m10
def copy_all_logs_to_file(filename: str,<EOL>fmt: str = LOG_FORMAT,<EOL>datefmt: str = LOG_DATEFMT) -> None:
fh = logging.FileHandler(filename)<EOL>formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)<EOL>fh.setFormatter(formatter)<EOL>apply_handler_to_all_logs(fh)<EOL>
Copy all currently configured logs to the specified file. Should ONLY be called from the ``if __name__ == 'main'`` script; see https://docs.python.org/3.4/howto/logging.html#library-config. Args: filename: file to send log output to fmt: passed to the ``fmt=`` argument of :class:`logging.Formatter` datefmt: passed to the ``datefmt=`` argument of :class:`logging.Formatter`
f14667:m11
def get_formatter_report(f: logging.Formatter) -> Optional[Dict[str, str]]:
if f is None:<EOL><INDENT>return None<EOL><DEDENT>return {<EOL>'<STR_LIT>': f._fmt,<EOL>'<STR_LIT>': f.datefmt,<EOL>'<STR_LIT>': str(f._style),<EOL>}<EOL>
Returns information on a log formatter, as a dictionary. For debugging.
f14667:m12
def get_handler_report(h: logging.Handler) -> Dict[str, Any]:
return {<EOL>'<STR_LIT>': h.get_name(),<EOL>'<STR_LIT>': h.level,<EOL>'<STR_LIT>': get_formatter_report(h.formatter),<EOL>'<STR_LIT>': h.filters,<EOL>}<EOL>
Returns information on a log handler, as a dictionary. For debugging.
f14667:m13