hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9acba3f3e984b404f52702964805732f03965048
| 213,308
|
py
|
Python
|
Lib/site-packages/pyparsing/core.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | 1
|
2020-10-05T05:38:26.000Z
|
2020-10-05T05:38:26.000Z
|
Lib/site-packages/pyparsing/core.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/pyparsing/core.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
#
# core.py
#
import os
import typing
from typing import (
NamedTuple,
Union,
Callable,
Any,
Generator,
Tuple,
List,
TextIO,
Set,
Sequence,
)
from abc import ABC, abstractmethod
from enum import Enum
import string
import copy
import warnings
import re
import sys
from collections.abc import Iterable
import traceback
import types
from operator import itemgetter
from functools import wraps
from threading import RLock
from pathlib import Path
from .util import (
_FifoCache,
_UnboundedCache,
__config_flags,
_collapse_string_to_ranges,
_escape_regex_range_chars,
_bslash,
_flatten,
LRUMemo as _LRUMemo,
UnboundedMemo as _UnboundedMemo,
)
from .exceptions import *
from .actions import *
from .results import ParseResults, _ParseResultsWithOffset
from .unicode import pyparsing_unicode
_MAX_INT = sys.maxsize
str_type: Tuple[type, ...] = (str, bytes)
#
# Copyright (c) 2003-2022 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
if sys.version_info >= (3, 8):
from functools import cached_property
else:
class cached_property:
def __init__(self, func):
self._func = func
def __get__(self, instance, owner=None):
ret = instance.__dict__[self._func.__name__] = self._func(instance)
return ret
class __compat__(__config_flags):
"""
A cross-version compatibility configuration for pyparsing features that will be
released in a future version. By setting values in this configuration to True,
those features can be enabled in prior versions for compatibility development
and testing.
- ``collect_all_And_tokens`` - flag to enable fix for Issue #63 that fixes erroneous grouping
of results names when an :class:`And` expression is nested within an :class:`Or` or :class:`MatchFirst`;
maintained for compatibility, but setting to ``False`` no longer restores pre-2.3.1
behavior
"""
_type_desc = "compatibility"
collect_all_And_tokens = True
_all_names = [__ for __ in locals() if not __.startswith("_")]
_fixed_names = """
collect_all_And_tokens
""".split()
class __diag__(__config_flags):
_type_desc = "diagnostic"
warn_multiple_tokens_in_named_alternation = False
warn_ungrouped_named_tokens_in_collection = False
warn_name_set_on_empty_Forward = False
warn_on_parse_using_empty_Forward = False
warn_on_assignment_to_Forward = False
warn_on_multiple_string_args_to_oneof = False
warn_on_match_first_with_lshift_operator = False
enable_debug_on_named_expressions = False
_all_names = [__ for __ in locals() if not __.startswith("_")]
_warning_names = [name for name in _all_names if name.startswith("warn")]
_debug_names = [name for name in _all_names if name.startswith("enable_debug")]
@classmethod
def enable_all_warnings(cls) -> None:
for name in cls._warning_names:
cls.enable(name)
class Diagnostics(Enum):
"""
Diagnostic configuration (all default to disabled)
- ``warn_multiple_tokens_in_named_alternation`` - flag to enable warnings when a results
name is defined on a :class:`MatchFirst` or :class:`Or` expression with one or more :class:`And` subexpressions
- ``warn_ungrouped_named_tokens_in_collection`` - flag to enable warnings when a results
name is defined on a containing expression with ungrouped subexpressions that also
have results names
- ``warn_name_set_on_empty_Forward`` - flag to enable warnings when a :class:`Forward` is defined
with a results name, but has no contents defined
- ``warn_on_parse_using_empty_Forward`` - flag to enable warnings when a :class:`Forward` is
defined in a grammar but has never had an expression attached to it
- ``warn_on_assignment_to_Forward`` - flag to enable warnings when a :class:`Forward` is defined
but is overwritten by assigning using ``'='`` instead of ``'<<='`` or ``'<<'``
- ``warn_on_multiple_string_args_to_oneof`` - flag to enable warnings when :class:`one_of` is
incorrectly called with multiple str arguments
- ``enable_debug_on_named_expressions`` - flag to auto-enable debug on all subsequent
calls to :class:`ParserElement.set_name`
Diagnostics are enabled/disabled by calling :class:`enable_diag` and :class:`disable_diag`.
All warnings can be enabled by calling :class:`enable_all_warnings`.
"""
warn_multiple_tokens_in_named_alternation = 0
warn_ungrouped_named_tokens_in_collection = 1
warn_name_set_on_empty_Forward = 2
warn_on_parse_using_empty_Forward = 3
warn_on_assignment_to_Forward = 4
warn_on_multiple_string_args_to_oneof = 5
warn_on_match_first_with_lshift_operator = 6
enable_debug_on_named_expressions = 7
def enable_diag(diag_enum: Diagnostics) -> None:
"""
Enable a global pyparsing diagnostic flag (see :class:`Diagnostics`).
"""
__diag__.enable(diag_enum.name)
def disable_diag(diag_enum: Diagnostics) -> None:
"""
Disable a global pyparsing diagnostic flag (see :class:`Diagnostics`).
"""
__diag__.disable(diag_enum.name)
def enable_all_warnings() -> None:
"""
Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`).
"""
__diag__.enable_all_warnings()
# hide abstract class
del __config_flags
def _should_enable_warnings(
cmd_line_warn_options: typing.Iterable[str], warn_env_var: typing.Optional[str]
) -> bool:
enable = bool(warn_env_var)
for warn_opt in cmd_line_warn_options:
w_action, w_message, w_category, w_module, w_line = (warn_opt + "::::").split(
":"
)[:5]
if not w_action.lower().startswith("i") and (
not (w_message or w_category or w_module) or w_module == "pyparsing"
):
enable = True
elif w_action.lower().startswith("i") and w_module in ("pyparsing", ""):
enable = False
return enable
if _should_enable_warnings(
sys.warnoptions, os.environ.get("PYPARSINGENABLEALLWARNINGS")
):
enable_all_warnings()
# build list of single arg builtins, that can be used as parse actions
_single_arg_builtins = {
sum,
len,
sorted,
reversed,
list,
tuple,
set,
any,
all,
min,
max,
}
_generatorType = types.GeneratorType
ParseAction = Union[
Callable[[], Any],
Callable[[ParseResults], Any],
Callable[[int, ParseResults], Any],
Callable[[str, int, ParseResults], Any],
]
ParseCondition = Union[
Callable[[], bool],
Callable[[ParseResults], bool],
Callable[[int, ParseResults], bool],
Callable[[str, int, ParseResults], bool],
]
ParseFailAction = Callable[[str, int, "ParserElement", Exception], None]
DebugStartAction = Callable[[str, int, "ParserElement", bool], None]
DebugSuccessAction = Callable[
[str, int, int, "ParserElement", ParseResults, bool], None
]
DebugExceptionAction = Callable[[str, int, "ParserElement", Exception, bool], None]
alphas = string.ascii_uppercase + string.ascii_lowercase
identchars = pyparsing_unicode.Latin1.identchars
identbodychars = pyparsing_unicode.Latin1.identbodychars
nums = "0123456789"
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
printables = "".join([c for c in string.printable if c not in string.whitespace])
_trim_arity_call_line: traceback.StackSummary = None
def _trim_arity(func, max_limit=3):
"""decorator to trim function calls to match the arity of the target"""
global _trim_arity_call_line
if func in _single_arg_builtins:
return lambda s, l, t: func(t)
limit = 0
found_arity = False
def extract_tb(tb, limit=0):
frames = traceback.extract_tb(tb, limit=limit)
frame_summary = frames[-1]
return [frame_summary[:2]]
# synthesize what would be returned by traceback.extract_stack at the call to
# user's parse action 'func', so that we don't incur call penalty at parse time
# fmt: off
LINE_DIFF = 7
# IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
# THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
_trim_arity_call_line = (_trim_arity_call_line or traceback.extract_stack(limit=2)[-1])
pa_call_line_synth = (_trim_arity_call_line[0], _trim_arity_call_line[1] + LINE_DIFF)
def wrapper(*args):
nonlocal found_arity, limit
while 1:
try:
ret = func(*args[limit:])
found_arity = True
return ret
except TypeError as te:
# re-raise TypeErrors if they did not come from our arity testing
if found_arity:
raise
else:
tb = te.__traceback__
trim_arity_type_error = (
extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth
)
del tb
if trim_arity_type_error:
if limit < max_limit:
limit += 1
continue
raise
# fmt: on
# copy func name to wrapper for sensible debug output
# (can't use functools.wraps, since that messes with function signature)
func_name = getattr(func, "__name__", getattr(func, "__class__").__name__)
wrapper.__name__ = func_name
wrapper.__doc__ = func.__doc__
return wrapper
def condition_as_parse_action(
fn: ParseCondition, message: str = None, fatal: bool = False
) -> ParseAction:
"""
Function to convert a simple predicate function that returns ``True`` or ``False``
into a parse action. Can be used in places when a parse action is required
and :class:`ParserElement.add_condition` cannot be used (such as when adding a condition
to an operator level in :class:`infix_notation`).
Optional keyword arguments:
- ``message`` - define a custom message to be used in the raised exception
- ``fatal`` - if True, will raise :class:`ParseFatalException` to stop parsing immediately;
otherwise will raise :class:`ParseException`
"""
msg = message if message is not None else "failed user-defined condition"
exc_type = ParseFatalException if fatal else ParseException
fn = _trim_arity(fn)
@wraps(fn)
def pa(s, l, t):
if not bool(fn(s, l, t)):
raise exc_type(s, l, msg)
return pa
def _default_start_debug_action(
instring: str, loc: int, expr: "ParserElement", cache_hit: bool = False
):
cache_hit_str = "*" if cache_hit else ""
print(
(
"{}Match {} at loc {}({},{})\n {}\n {}^".format(
cache_hit_str,
expr,
loc,
lineno(loc, instring),
col(loc, instring),
line(loc, instring),
" " * (col(loc, instring) - 1),
)
)
)
def _default_success_debug_action(
instring: str,
startloc: int,
endloc: int,
expr: "ParserElement",
toks: ParseResults,
cache_hit: bool = False,
):
cache_hit_str = "*" if cache_hit else ""
print("{}Matched {} -> {}".format(cache_hit_str, expr, toks.as_list()))
def _default_exception_debug_action(
instring: str,
loc: int,
expr: "ParserElement",
exc: Exception,
cache_hit: bool = False,
):
cache_hit_str = "*" if cache_hit else ""
print(
"{}Match {} failed, {} raised: {}".format(
cache_hit_str, expr, type(exc).__name__, exc
)
)
def null_debug_action(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
class ParserElement(ABC):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS: str = " \n\t\r"
verbose_stacktrace: bool = False
_literalStringClass: typing.Optional[type] = None
@staticmethod
def set_default_whitespace_chars(chars: str) -> None:
r"""
Overrides the default whitespace chars
Example::
# default whitespace chars are space, <TAB> and newline
Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
# change to just treat newline as significant
ParserElement.set_default_whitespace_chars(" \t")
Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def']
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
# update whitespace all parse expressions defined in this module
for expr in _builtin_exprs:
if expr.copyDefaultWhiteChars:
expr.whiteChars = set(chars)
@staticmethod
def inline_literals_using(cls: type) -> None:
"""
Set class to be used for inclusion of string literals into a parser.
Example::
# default literal class used is Literal
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parse_string("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# change to Suppress
ParserElement.inline_literals_using(Suppress)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parse_string("1999/12/31") # -> ['1999', '12', '31']
"""
ParserElement._literalStringClass = cls
class DebugActions(NamedTuple):
debug_try: typing.Optional[DebugStartAction]
debug_match: typing.Optional[DebugSuccessAction]
debug_fail: typing.Optional[DebugExceptionAction]
def __init__(self, savelist: bool = False):
self.parseAction: List[ParseAction] = list()
self.failAction: typing.Optional[ParseFailAction] = None
self.customName = None
self._defaultName = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
self.copyDefaultWhiteChars = True
# used when checking for left-recursion
self.mayReturnEmpty = False
self.keepTabs = False
self.ignoreExprs: List["ParserElement"] = list()
self.debug = False
self.streamlined = False
# optimize exception handling for subclasses that don't advance parse index
self.mayIndexError = True
self.errmsg = ""
# mark results names as modal (report only last) or cumulative (list all)
self.modalResults = True
# custom debug actions
self.debugActions = self.DebugActions(None, None, None)
# avoid redundant calls to preParse
self.callPreparse = True
self.callDuringTry = False
self.suppress_warnings_: List[Diagnostics] = []
def suppress_warning(self, warning_type: Diagnostics) -> "ParserElement":
"""
Suppress warnings emitted for a particular diagnostic on this expression.
Example::
base = pp.Forward()
base.suppress_warning(Diagnostics.warn_on_parse_using_empty_Forward)
# statement would normally raise a warning, but is now suppressed
print(base.parseString("x"))
"""
self.suppress_warnings_.append(warning_type)
return self
def copy(self) -> "ParserElement":
"""
Make a copy of this :class:`ParserElement`. Useful for defining
different parse actions for the same parsing pattern, using copies of
the original parse element.
Example::
integer = Word(nums).set_parse_action(lambda toks: int(toks[0]))
integerK = integer.copy().add_parse_action(lambda toks: toks[0] * 1024) + Suppress("K")
integerM = integer.copy().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
print((integerK | integerM | integer)[1, ...].parse_string("5K 100 640K 256M"))
prints::
[5120, 100, 655360, 268435456]
Equivalent form of ``expr.copy()`` is just ``expr()``::
integerM = integer().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
"""
cpy = copy.copy(self)
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
return cpy
def set_results_name(
self, name: str, list_all_matches: bool = False, *, listAllMatches: bool = False
) -> "ParserElement":
"""
Define name for referencing matching tokens as a nested attribute
of the returned parse results.
Normally, results names are assigned as you would assign keys in a dict:
any existing value is overwritten by later values. If it is necessary to
keep all values captured for a particular results name, call ``set_results_name``
with ``list_all_matches`` = True.
NOTE: ``set_results_name`` returns a *copy* of the original :class:`ParserElement` object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
``expr("name")`` in place of ``expr.set_results_name("name")``
- see :class:`__call__`. If ``list_all_matches`` is required, use
``expr("name*")``.
Example::
date_str = (integer.set_results_name("year") + '/'
+ integer.set_results_name("month") + '/'
+ integer.set_results_name("day"))
# equivalent form:
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
"""
listAllMatches = listAllMatches or list_all_matches
return self._setResultsName(name, listAllMatches)
def _setResultsName(self, name, listAllMatches=False):
if name is None:
return self
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches = True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def set_break(self, break_flag: bool = True) -> "ParserElement":
"""
Method to invoke the Python pdb debugger when this element is
about to be parsed. Set ``break_flag`` to ``True`` to enable, ``False`` to
disable.
"""
if break_flag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
# this call to pdb.set_trace() is intentional, not a checkin error
pdb.set_trace()
return _parseMethod(instring, loc, doActions, callPreParse)
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse, "_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def set_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement":
"""
Define one or more actions to perform when successfully matching parse element definition.
Parse actions can be called to perform data conversions, do extra validation,
update external data structures, or enhance or replace the parsed tokens.
Each parse action ``fn`` is a callable method with 0-3 arguments, called as
``fn(s, loc, toks)`` , ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a :class:`ParseResults` object
The parsed tokens are passed to the parse action as ParseResults. They can be
modified in place using list-style append, extend, and pop operations to update
the parsed list elements; and with dictionary-style item set and del operations
to add, update, or remove any named results. If the tokens are modified in place,
it is not necessary to return them with a return statement.
Parse actions can also completely replace the given tokens, with another ``ParseResults``
object, or with some entirely different object (common for parse actions that perform data
conversions). A convenient way to build a new parse result is to define the values
using a dict, and then create the return value using :class:`ParseResults.from_dict`.
If None is passed as the ``fn`` parse action, all previously added parse actions for this
expression are cleared.
Optional keyword arguments:
- call_during_try = (default= ``False``) indicate if parse action should be run during
lookaheads and alternate testing. For parse actions that have side effects, it is
important to only call the parse action once it is determined that it is being
called as part of a successful parse. For parse actions that perform additional
validation, then call_during_try should be passed as True, so that the validation
code is included in the preliminary "try" parses.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See :class:`parse_string` for more
information on parsing strings containing ``<TAB>`` s, and suggested
methods to maintain a consistent view of the parsed string, the parse
location, and line and column positions within the parsed string.
Example::
# parse dates in the form YYYY/MM/DD
# use parse action to convert toks from str to int at parse time
def convert_to_int(toks):
return int(toks[0])
# use a parse action to verify that the date is a valid date
def is_valid_date(instring, loc, toks):
from datetime import date
year, month, day = toks[::2]
try:
date(year, month, day)
except ValueError:
raise ParseException(instring, loc, "invalid date given")
integer = Word(nums)
date_str = integer + '/' + integer + '/' + integer
# add parse actions
integer.set_parse_action(convert_to_int)
date_str.set_parse_action(is_valid_date)
# note that integer fields are now ints, not strings
date_str.run_tests('''
# successful parse - note that integer fields were converted to ints
1999/12/31
# fail - invalid date
1999/13/31
''')
"""
if list(fns) == [None]:
self.parseAction = []
else:
if not all(callable(fn) for fn in fns):
raise TypeError("parse actions must be callable")
self.parseAction = [_trim_arity(fn) for fn in fns]
self.callDuringTry = kwargs.get(
"call_during_try", kwargs.get("callDuringTry", False)
)
return self
def add_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement":
"""
Add one or more parse actions to expression's list of parse actions. See :class:`set_parse_action`.
See examples in :class:`copy`.
"""
self.parseAction += [_trim_arity(fn) for fn in fns]
self.callDuringTry = self.callDuringTry or kwargs.get(
"call_during_try", kwargs.get("callDuringTry", False)
)
return self
def add_condition(self, *fns: ParseCondition, **kwargs) -> "ParserElement":
"""Add a boolean predicate function to expression's list of parse actions. See
:class:`set_parse_action` for function call signatures. Unlike ``set_parse_action``,
functions passed to ``add_condition`` need to return boolean success/fail of the condition.
Optional keyword arguments:
- message = define a custom message to be used in the raised exception
- fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise
ParseException
- call_during_try = boolean to indicate if this method should be called during internal tryParse calls,
default=False
Example::
integer = Word(nums).set_parse_action(lambda toks: int(toks[0]))
year_int = integer.copy()
year_int.add_condition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
date_str = year_int + '/' + integer + '/' + integer
result = date_str.parse_string("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0),
(line:1, col:1)
"""
for fn in fns:
self.parseAction.append(
condition_as_parse_action(
fn, message=kwargs.get("message"), fatal=kwargs.get("fatal", False)
)
)
self.callDuringTry = self.callDuringTry or kwargs.get(
"call_during_try", kwargs.get("callDuringTry", False)
)
return self
def set_fail_action(self, fn: ParseFailAction) -> "ParserElement":
"""
Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
``fn(s, loc, expr, err)`` where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw :class:`ParseFatalException`
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables(self, instring, loc):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc, dummy = e._parse(instring, loc)
exprsFound = True
except ParseException:
pass
return loc
def preParse(self, instring, loc):
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
if self.skipWhitespace:
instrlen = len(instring)
white_chars = self.whiteChars
while loc < instrlen and instring[loc] in white_chars:
loc += 1
return loc
def parseImpl(self, instring, loc, doActions=True):
return loc, []
def postParse(self, instring, loc, tokenlist):
return tokenlist
# @profile
def _parseNoCache(
self, instring, loc, doActions=True, callPreParse=True
) -> Tuple[int, ParseResults]:
TRY, MATCH, FAIL = 0, 1, 2
debugging = self.debug # and doActions)
len_instring = len(instring)
if debugging or self.failAction:
# print("Match {} at loc {}({}, {})".format(self, loc, lineno(loc, instring), col(loc, instring)))
try:
if callPreParse and self.callPreparse:
pre_loc = self.preParse(instring, loc)
else:
pre_loc = loc
tokens_start = pre_loc
if self.debugActions.debug_try:
self.debugActions.debug_try(instring, tokens_start, self, False)
if self.mayIndexError or pre_loc >= len_instring:
try:
loc, tokens = self.parseImpl(instring, pre_loc, doActions)
except IndexError:
raise ParseException(instring, len_instring, self.errmsg, self)
else:
loc, tokens = self.parseImpl(instring, pre_loc, doActions)
except Exception as err:
# print("Exception raised:", err)
if self.debugActions.debug_fail:
self.debugActions.debug_fail(
instring, tokens_start, self, err, False
)
if self.failAction:
self.failAction(instring, tokens_start, self, err)
raise
else:
if callPreParse and self.callPreparse:
pre_loc = self.preParse(instring, loc)
else:
pre_loc = loc
tokens_start = pre_loc
if self.mayIndexError or pre_loc >= len_instring:
try:
loc, tokens = self.parseImpl(instring, pre_loc, doActions)
except IndexError:
raise ParseException(instring, len_instring, self.errmsg, self)
else:
loc, tokens = self.parseImpl(instring, pre_loc, doActions)
tokens = self.postParse(instring, loc, tokens)
ret_tokens = ParseResults(
tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults
)
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
try:
tokens = fn(instring, tokens_start, ret_tokens)
except IndexError as parse_action_exc:
exc = ParseException("exception raised in parse action")
raise exc from parse_action_exc
if tokens is not None and tokens is not ret_tokens:
ret_tokens = ParseResults(
tokens,
self.resultsName,
asList=self.saveAsList
and isinstance(tokens, (ParseResults, list)),
modal=self.modalResults,
)
except Exception as err:
# print "Exception raised in user parse action:", err
if self.debugActions.debug_fail:
self.debugActions.debug_fail(
instring, tokens_start, self, err, False
)
raise
else:
for fn in self.parseAction:
try:
tokens = fn(instring, tokens_start, ret_tokens)
except IndexError as parse_action_exc:
exc = ParseException("exception raised in parse action")
raise exc from parse_action_exc
if tokens is not None and tokens is not ret_tokens:
ret_tokens = ParseResults(
tokens,
self.resultsName,
asList=self.saveAsList
and isinstance(tokens, (ParseResults, list)),
modal=self.modalResults,
)
if debugging:
# print("Matched", self, "->", ret_tokens.as_list())
if self.debugActions.debug_match:
self.debugActions.debug_match(
instring, tokens_start, loc, self, ret_tokens, False
)
return loc, ret_tokens
def try_parse(self, instring: str, loc: int, raise_fatal: bool = False) -> int:
try:
return self._parse(instring, loc, doActions=False)[0]
except ParseFatalException:
if raise_fatal:
raise
raise ParseException(instring, loc, self.errmsg, self)
def can_parse_next(self, instring: str, loc: int) -> bool:
try:
self.try_parse(instring, loc)
except (ParseException, IndexError):
return False
else:
return True
# cache for left-recursion in Forward references
recursion_lock = RLock()
recursion_memos: typing.Dict[
Tuple[int, "Forward", bool], Tuple[int, Union[ParseResults, Exception]]
] = {}
# argument cache for optimizing repeated calls when backtracking through recursive expressions
packrat_cache = (
{}
) # this is set later by enabled_packrat(); this is here so that reset_cache() doesn't fail
packrat_cache_lock = RLock()
packrat_cache_stats = [0, 0]
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache(
self, instring, loc, doActions=True, callPreParse=True
) -> Tuple[int, ParseResults]:
HIT, MISS = 0, 1
TRY, MATCH, FAIL = 0, 1, 2
lookup = (self, instring, loc, callPreParse, doActions)
with ParserElement.packrat_cache_lock:
cache = ParserElement.packrat_cache
value = cache.get(lookup)
if value is cache.not_in_cache:
ParserElement.packrat_cache_stats[MISS] += 1
try:
value = self._parseNoCache(instring, loc, doActions, callPreParse)
except ParseBaseException as pe:
# cache a copy of the exception, without the traceback
cache.set(lookup, pe.__class__(*pe.args))
raise
else:
cache.set(lookup, (value[0], value[1].copy(), loc))
return value
else:
ParserElement.packrat_cache_stats[HIT] += 1
if self.debug and self.debugActions.debug_try:
try:
self.debugActions.debug_try(instring, loc, self, cache_hit=True)
except TypeError:
pass
if isinstance(value, Exception):
if self.debug and self.debugActions.debug_fail:
try:
self.debugActions.debug_fail(
instring, loc, self, value, cache_hit=True
)
except TypeError:
pass
raise value
loc_, result, endloc = value[0], value[1].copy(), value[2]
if self.debug and self.debugActions.debug_match:
try:
self.debugActions.debug_match(
instring, loc_, endloc, self, result, cache_hit=True
)
except TypeError:
pass
return loc_, result
_parse = _parseNoCache
@staticmethod
def reset_cache() -> None:
ParserElement.packrat_cache.clear()
ParserElement.packrat_cache_stats[:] = [0] * len(
ParserElement.packrat_cache_stats
)
ParserElement.recursion_memos.clear()
_packratEnabled = False
_left_recursion_enabled = False
@staticmethod
def disable_memoization() -> None:
"""
Disables active Packrat or Left Recursion parsing and their memoization
This method also works if neither Packrat nor Left Recursion are enabled.
This makes it safe to call before activating Packrat nor Left Recursion
to clear any previous settings.
"""
ParserElement.reset_cache()
ParserElement._left_recursion_enabled = False
ParserElement._packratEnabled = False
ParserElement._parse = ParserElement._parseNoCache
@staticmethod
def enable_left_recursion(
cache_size_limit: typing.Optional[int] = None, *, force=False
) -> None:
"""
Enables "bounded recursion" parsing, which allows for both direct and indirect
left-recursion. During parsing, left-recursive :class:`Forward` elements are
repeatedly matched with a fixed recursion depth that is gradually increased
until finding the longest match.
Example::
import pyparsing as pp
pp.ParserElement.enable_left_recursion()
E = pp.Forward("E")
num = pp.Word(pp.nums)
# match `num`, or `num '+' num`, or `num '+' num '+' num`, ...
E <<= E + '+' - num | num
print(E.parse_string("1+2+3"))
Recursion search naturally memoizes matches of ``Forward`` elements and may
thus skip reevaluation of parse actions during backtracking. This may break
programs with parse actions which rely on strict ordering of side-effects.
Parameters:
- cache_size_limit - (default=``None``) - memoize at most this many
``Forward`` elements during matching; if ``None`` (the default),
memoize all ``Forward`` elements.
Bounded Recursion parsing works similar but not identical to Packrat parsing,
thus the two cannot be used together. Use ``force=True`` to disable any
previous, conflicting settings.
"""
if force:
ParserElement.disable_memoization()
elif ParserElement._packratEnabled:
raise RuntimeError("Packrat and Bounded Recursion are not compatible")
if cache_size_limit is None:
ParserElement.recursion_memos = _UnboundedMemo()
elif cache_size_limit > 0:
ParserElement.recursion_memos = _LRUMemo(capacity=cache_size_limit)
else:
raise NotImplementedError("Memo size of %s" % cache_size_limit)
ParserElement._left_recursion_enabled = True
@staticmethod
def enable_packrat(cache_size_limit: int = 128, *, force: bool = False) -> None:
"""
Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
Parameters:
- cache_size_limit - (default= ``128``) - if an integer value is provided
will limit the size of the packrat cache; if None is passed, then
the cache size will be unbounded; if 0 is passed, the cache will
be effectively disabled.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method :class:`ParserElement.enable_packrat`.
For best results, call ``enable_packrat()`` immediately after
importing pyparsing.
Example::
import pyparsing
pyparsing.ParserElement.enable_packrat()
Packrat parsing works similar but not identical to Bounded Recursion parsing,
thus the two cannot be used together. Use ``force=True`` to disable any
previous, conflicting settings.
"""
if force:
ParserElement.disable_memoization()
elif ParserElement._left_recursion_enabled:
raise RuntimeError("Packrat and Bounded Recursion are not compatible")
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
if cache_size_limit is None:
ParserElement.packrat_cache = _UnboundedCache()
else:
ParserElement.packrat_cache = _FifoCache(cache_size_limit)
ParserElement._parse = ParserElement._parseCache
def parse_string(
self, instring: str, parse_all: bool = False, *, parseAll: bool = False
) -> ParseResults:
"""
Parse a string with respect to the parser definition. This function is intended as the primary interface to the
client code.
:param instring: The input string to be parsed.
:param parse_all: If set, the entire input string must match the grammar.
:param parseAll: retained for pre-PEP8 compatibility, will be removed in a future release.
:raises ParseException: Raised if ``parse_all`` is set and the input string does not match the whole grammar.
:returns: the parsed data as a :class:`ParseResults` object, which may be accessed as a `list`, a `dict`, or
an object with attributes if the given parser includes results names.
If the input string is required to match the entire grammar, ``parse_all`` flag must be set to ``True``. This
is also equivalent to ending the grammar with :class:`StringEnd`().
To report proper column numbers, ``parse_string`` operates on a copy of the input string where all tabs are
converted to spaces (8 spaces per tab, as per the default in ``string.expandtabs``). If the input string
contains tabs and the grammar uses parse actions that use the ``loc`` argument to index into the string
being parsed, one can ensure a consistent view of the input string by doing one of the following:
- calling ``parse_with_tabs`` on your grammar before calling ``parse_string`` (see :class:`parse_with_tabs`),
- define your parse action using the full ``(s,loc,toks)`` signature, and reference the input string using the
parse action's ``s`` argument, or
- explicitly expand the tabs in your input string before calling ``parse_string``.
Examples:
By default, partial matches are OK.
>>> res = Word('a').parse_string('aaaaabaaa')
>>> print(res)
['aaaaa']
The parsing behavior varies by the inheriting class of this abstract class. Please refer to the children
directly to see more examples.
It raises an exception if parse_all flag is set and instring does not match the whole grammar.
>>> res = Word('a').parse_string('aaaaabaaa', parse_all=True)
Traceback (most recent call last):
...
pyparsing.ParseException: Expected end of text, found 'b' (at char 5), (line:1, col:6)
"""
parseAll = parse_all or parseAll
ParserElement.reset_cache()
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse(instring, 0)
if parseAll:
loc = self.preParse(instring, loc)
se = Empty() + StringEnd()
se._parse(instring, loc)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clearing out pyparsing internal stack trace
raise exc.with_traceback(None)
else:
return tokens
def scan_string(
self,
instring: str,
max_matches: int = _MAX_INT,
overlap: bool = False,
*,
debug: bool = False,
maxMatches: int = _MAX_INT,
) -> Generator[Tuple[ParseResults, int, int], None, None]:
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
``max_matches`` argument, to clip scanning after 'n' matches are found. If
``overlap`` is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See :class:`parse_string` for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens, start, end in Word(alphas).scan_string(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
maxMatches = min(maxMatches, max_matches)
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = str(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn(instring, loc)
nextLoc, tokens = parseFn(instring, preloc, callPreParse=False)
except ParseException:
loc = preloc + 1
else:
if nextLoc > loc:
matches += 1
if debug:
print(
{
"tokens": tokens.asList(),
"start": preloc,
"end": nextLoc,
}
)
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn(instring, loc)
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc + 1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc.with_traceback(None)
def transform_string(self, instring: str, *, debug: bool = False) -> str:
"""
Extension to :class:`scan_string`, to modify matching text with modified tokens that may
be returned from a parse action. To use ``transform_string``, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking ``transform_string()`` on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. ``transform_string()`` returns the resulting transformed string.
Example::
wd = Word(alphas)
wd.set_parse_action(lambda toks: toks[0].title())
print(wd.transform_string("now is the winter of our discontent made glorious summer by this sun of york."))
prints::
Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
"""
out: List[str] = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transform_string and scan_string
self.keepTabs = True
try:
for t, s, e in self.scan_string(instring, debug=debug):
out.append(instring[lastE:s])
if t:
if isinstance(t, ParseResults):
out += t.as_list()
elif isinstance(t, Iterable) and not isinstance(t, str_type):
out.extend(t)
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join([str(s) for s in _flatten(out)])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc.with_traceback(None)
def search_string(
self,
instring: str,
max_matches: int = _MAX_INT,
*,
debug: bool = False,
maxMatches: int = _MAX_INT,
) -> ParseResults:
"""
Another extension to :class:`scan_string`, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
``max_matches`` argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity"))
# the sum() builtin can be used to merge results into a single ParseResults object
print(sum(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity")))
prints::
[['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
"""
maxMatches = min(maxMatches, max_matches)
try:
return ParseResults(
[t for t, s, e in self.scan_string(instring, maxMatches, debug=debug)]
)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc.with_traceback(None)
def split(
self,
instring: str,
maxsplit: int = _MAX_INT,
include_separators: bool = False,
*,
includeSeparators=False,
) -> Generator[str, None, None]:
"""
Generator method to split a string using the given expression as a separator.
May be called with optional ``maxsplit`` argument, to limit the number of splits;
and the optional ``include_separators`` argument (default= ``False``), if the separating
matching text should be included in the split results.
Example::
punc = one_of(list(".,;:/-!?"))
print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
prints::
['This', ' this', '', ' this sentence', ' is badly punctuated', '']
"""
includeSeparators = includeSeparators or include_separators
last = 0
for t, s, e in self.scan_string(instring, max_matches=maxsplit):
yield instring[last:s]
if includeSeparators:
yield t[0]
last = e
yield instring[last:]
def __add__(self, other) -> "ParserElement":
"""
Implementation of ``+`` operator - returns :class:`And`. Adding strings to a :class:`ParserElement`
converts them to :class:`Literal`s by default.
Example::
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print(hello, "->", greet.parse_string(hello))
prints::
Hello, World! -> ['Hello', ',', 'World', '!']
``...`` may be used as a parse expression as a short form of :class:`SkipTo`.
Literal('start') + ... + Literal('end')
is equivalent to:
Literal('start') + SkipTo('end')("_skipped*") + Literal('end')
Note that the skipped text is returned with '_skipped' as a results name,
and to support having multiple skips in the same parser, the value returned is
a list of all skipped text.
"""
if other is Ellipsis:
return _PendingSkip(self)
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return And([self, other])
def __radd__(self, other) -> "ParserElement":
"""
Implementation of ``+`` operator when left operand is not a :class:`ParserElement`
"""
if other is Ellipsis:
return SkipTo(self)("_skipped*") + self
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return other + self
def __sub__(self, other) -> "ParserElement":
"""
Implementation of ``-`` operator, returns :class:`And` with error stop
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return self + And._ErrorStop() + other
def __rsub__(self, other) -> "ParserElement":
"""
Implementation of ``-`` operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return other - self
def __mul__(self, other) -> "ParserElement":
"""
Implementation of ``*`` operator, allows use of ``expr * 3`` in place of
``expr + expr + expr``. Expressions may also be multiplied by a 2-integer
tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples
may also include ``None`` as in:
- ``expr*(n, None)`` or ``expr*(n, )`` is equivalent
to ``expr*n + ZeroOrMore(expr)``
(read as "at least n instances of ``expr``")
- ``expr*(None, n)`` is equivalent to ``expr*(0, n)``
(read as "0 to n instances of ``expr``")
- ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)``
- ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)``
Note that ``expr*(None, n)`` does not raise an exception if
more than n exprs exist in the input stream; that is,
``expr*(None, n)`` does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
``expr*(None, n) + ~expr``
"""
if other is Ellipsis:
other = (0, None)
elif isinstance(other, tuple) and other[:1] == (Ellipsis,):
other = ((0,) + other[1:] + (None,))[:2]
if isinstance(other, int):
minElements, optElements = other, 0
elif isinstance(other, tuple):
other = tuple(o if o is not Ellipsis else None for o in other)
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0], int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self * other[0] + ZeroOrMore(self)
elif isinstance(other[0], int) and isinstance(other[1], int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError(
"cannot multiply ParserElement and ({}) objects".format(
",".join(type(item).__name__ for item in other)
)
)
else:
raise TypeError(
"cannot multiply ParserElement and {} objects".format(
type(other).__name__
)
)
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError(
"second tuple value must be greater or equal to first tuple value"
)
if minElements == optElements == 0:
return And([])
if optElements:
def makeOptionalList(n):
if n > 1:
return Opt(self + makeOptionalList(n - 1))
else:
return Opt(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self] * minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self] * minElements)
return ret
def __rmul__(self, other) -> "ParserElement":
return self.__mul__(other)
def __or__(self, other) -> "ParserElement":
"""
Implementation of ``|`` operator - returns :class:`MatchFirst`
"""
if other is Ellipsis:
return _PendingSkip(self, must_skip=True)
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return MatchFirst([self, other])
def __ror__(self, other) -> "ParserElement":
"""
Implementation of ``|`` operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return other | self
def __xor__(self, other) -> "ParserElement":
"""
Implementation of ``^`` operator - returns :class:`Or`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return Or([self, other])
def __rxor__(self, other) -> "ParserElement":
"""
Implementation of ``^`` operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return other ^ self
def __and__(self, other) -> "ParserElement":
"""
Implementation of ``&`` operator - returns :class:`Each`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return Each([self, other])
def __rand__(self, other) -> "ParserElement":
"""
Implementation of ``&`` operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return other & self
def __invert__(self) -> "ParserElement":
"""
Implementation of ``~`` operator - returns :class:`NotAny`
"""
return NotAny(self)
# disable __iter__ to override legacy use of sequential access to __getitem__ to
# iterate over a sequence
__iter__ = None
def __getitem__(self, key):
"""
use ``[]`` indexing notation as a short form for expression repetition:
- ``expr[n]`` is equivalent to ``expr*n``
- ``expr[m, n]`` is equivalent to ``expr*(m, n)``
- ``expr[n, ...]`` or ``expr[n,]`` is equivalent
to ``expr*n + ZeroOrMore(expr)``
(read as "at least n instances of ``expr``")
- ``expr[..., n]`` is equivalent to ``expr*(0, n)``
(read as "0 to n instances of ``expr``")
- ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)``
- ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)``
``None`` may be used in place of ``...``.
Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception
if more than ``n`` ``expr``s exist in the input stream. If this behavior is
desired, then write ``expr[..., n] + ~expr``.
"""
# convert single arg keys to tuples
try:
if isinstance(key, str_type):
key = (key,)
iter(key)
except TypeError:
key = (key, key)
if len(key) > 2:
raise TypeError(
"only 1 or 2 index arguments supported ({}{})".format(
key[:5], "... [{}]".format(len(key)) if len(key) > 5 else ""
)
)
# clip to 2 elements
ret = self * tuple(key[:2])
return ret
def __call__(self, name: str = None) -> "ParserElement":
"""
Shortcut for :class:`set_results_name`, with ``list_all_matches=False``.
If ``name`` is given with a trailing ``'*'`` character, then ``list_all_matches`` will be
passed as ``True``.
If ``name` is omitted, same as calling :class:`copy`.
Example::
# these are equivalent
userdata = Word(alphas).set_results_name("name") + Word(nums + "-").set_results_name("socsecno")
userdata = Word(alphas)("name") + Word(nums + "-")("socsecno")
"""
if name is not None:
return self._setResultsName(name)
else:
return self.copy()
def suppress(self) -> "ParserElement":
"""
Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress(self)
def ignore_whitespace(self, recursive: bool = True) -> "ParserElement":
"""
Enables the skipping of whitespace before matching the characters in the
:class:`ParserElement`'s defined pattern.
:param recursive: If ``True`` (the default), also enable whitespace skipping in child elements (if any)
"""
self.skipWhitespace = True
return self
def leave_whitespace(self, recursive: bool = True) -> "ParserElement":
"""
Disables the skipping of whitespace before matching the characters in the
:class:`ParserElement`'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
:param recursive: If true (the default), also disable whitespace skipping in child elements (if any)
"""
self.skipWhitespace = False
return self
def set_whitespace_chars(
self, chars: Union[Set[str], str], copy_defaults: bool = False
) -> "ParserElement":
"""
Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = set(chars)
self.copyDefaultWhiteChars = copy_defaults
return self
def parse_with_tabs(self) -> "ParserElement":
"""
Overrides default behavior to expand ``<TAB>`` s to spaces before parsing the input string.
Must be called before ``parse_string`` when the input grammar contains elements that
match ``<TAB>`` characters.
"""
self.keepTabs = True
return self
def ignore(self, other: "ParserElement") -> "ParserElement":
"""
Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
Example::
patt = Word(alphas)[1, ...]
patt.parse_string('ablaj /* comment */ lskjd')
# -> ['ablaj']
patt.ignore(c_style_comment)
patt.parse_string('ablaj /* comment */ lskjd')
# -> ['ablaj', 'lskjd']
"""
import typing
if isinstance(other, str_type):
other = Suppress(other)
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append(Suppress(other.copy()))
return self
def set_debug_actions(
self,
start_action: DebugStartAction,
success_action: DebugSuccessAction,
exception_action: DebugExceptionAction,
) -> "ParserElement":
"""
Customize display of debugging messages while doing pattern matching:
- ``start_action`` - method to be called when an expression is about to be parsed;
should have the signature ``fn(input_string: str, location: int, expression: ParserElement, cache_hit: bool)``
- ``success_action`` - method to be called when an expression has successfully parsed;
should have the signature ``fn(input_string: str, start_location: int, end_location: int, expression: ParserELement, parsed_tokens: ParseResults, cache_hit: bool)``
- ``exception_action`` - method to be called when expression fails to parse;
should have the signature ``fn(input_string: str, location: int, expression: ParserElement, exception: Exception, cache_hit: bool)``
"""
self.debugActions = self.DebugActions(
start_action or _default_start_debug_action,
success_action or _default_success_debug_action,
exception_action or _default_exception_debug_action,
)
self.debug = True
return self
def set_debug(self, flag: bool = True) -> "ParserElement":
"""
Enable display of debugging messages while doing pattern matching.
Set ``flag`` to ``True`` to enable, ``False`` to disable.
Example::
wd = Word(alphas).set_name("alphaword")
integer = Word(nums).set_name("numword")
term = wd | integer
# turn on debugging for wd
wd.set_debug()
term[1, ...].parse_string("abc 123 xyz 890")
prints::
Match alphaword at loc 0(1,1)
Matched alphaword -> ['abc']
Match alphaword at loc 3(1,4)
Exception raised:Expected alphaword (at char 4), (line:1, col:5)
Match alphaword at loc 7(1,8)
Matched alphaword -> ['xyz']
Match alphaword at loc 11(1,12)
Exception raised:Expected alphaword (at char 12), (line:1, col:13)
Match alphaword at loc 15(1,16)
Exception raised:Expected alphaword (at char 15), (line:1, col:16)
The output shown is that produced by the default debug actions - custom debug actions can be
specified using :class:`set_debug_actions`. Prior to attempting
to match the ``wd`` expression, the debugging message ``"Match <exprname> at loc <n>(<line>,<col>)"``
is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"``
message is shown. Also note the use of :class:`set_name` to assign a human-readable name to the expression,
which makes debugging and exception messages easier to understand - for instance, the default
name created for the :class:`Word` expression without calling ``set_name`` is ``"W:(A-Za-z)"``.
"""
if flag:
self.set_debug_actions(
_default_start_debug_action,
_default_success_debug_action,
_default_exception_debug_action,
)
else:
self.debug = False
return self
@property
def default_name(self) -> str:
if self._defaultName is None:
self._defaultName = self._generateDefaultName()
return self._defaultName
@abstractmethod
def _generateDefaultName(self):
"""
Child classes must define this method, which defines how the ``default_name`` is set.
"""
def set_name(self, name: str) -> "ParserElement":
"""
Define name for this expression, makes debugging and exception messages clearer.
Example::
Word(nums).parse_string("ABC") # -> Exception: Expected W:(0-9) (at char 0), (line:1, col:1)
Word(nums).set_name("integer").parse_string("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
"""
self.customName = name
self.errmsg = "Expected " + self.name
if __diag__.enable_debug_on_named_expressions:
self.set_debug()
return self
@property
def name(self) -> str:
# This will use a user-defined name if available, but otherwise defaults back to the auto-generated name
return self.customName if self.customName is not None else self.default_name
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return str(self)
def streamline(self) -> "ParserElement":
self.streamlined = True
self._defaultName = None
return self
def recurse(self) -> Sequence["ParserElement"]:
return []
def _checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.recurse():
e._checkRecursion(subRecCheckList)
def validate(self, validateTrace=None) -> None:
"""
Check defined expressions for valid structure, check for infinite recursive definitions.
"""
self._checkRecursion([])
def parse_file(
self,
file_or_filename: Union[str, Path, TextIO],
encoding: str = "utf-8",
parse_all: bool = False,
*,
parseAll: bool = False,
) -> ParseResults:
"""
Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
parseAll = parseAll or parse_all
try:
file_contents = file_or_filename.read()
except AttributeError:
with open(file_or_filename, "r", encoding=encoding) as f:
file_contents = f.read()
try:
return self.parse_string(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc.with_traceback(None)
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, str_type):
return self.matches(other, parse_all=True)
elif isinstance(other, ParserElement):
return vars(self) == vars(other)
return False
def __hash__(self):
return id(self)
def matches(
self, test_string: str, parse_all: bool = True, *, parseAll: bool = True
) -> bool:
"""
Method for quick testing of a parser against a test string. Good for simple
inline microtests of sub expressions while building up larger parser.
Parameters:
- ``test_string`` - to test against this expression for a match
- ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests
Example::
expr = Word(nums)
assert expr.matches("100")
"""
parseAll = parseAll and parse_all
try:
self.parse_string(str(test_string), parse_all=parseAll)
return True
except ParseBaseException:
return False
def run_tests(
self,
tests: Union[str, List[str]],
parse_all: bool = True,
comment: typing.Optional[Union["ParserElement", str]] = "#",
full_dump: bool = True,
print_results: bool = True,
failure_tests: bool = False,
post_parse: Callable[[str, ParseResults], str] = None,
file: typing.Optional[TextIO] = None,
with_line_numbers: bool = False,
*,
parseAll: bool = True,
fullDump: bool = True,
printResults: bool = True,
failureTests: bool = False,
postParse: Callable[[str, ParseResults], str] = None,
) -> Tuple[bool, List[Tuple[str, Union[ParseResults, Exception]]]]:
"""
Execute the parse expression on a series of test strings, showing each
test, the parsed results or where the parse failed. Quick and easy way to
run a parse expression against a list of sample strings.
Parameters:
- ``tests`` - a list of separate test strings, or a multiline string of test strings
- ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests
- ``comment`` - (default= ``'#'``) - expression for indicating embedded comments in the test
string; pass None to disable comment filtering
- ``full_dump`` - (default= ``True``) - dump results as list followed by results names in nested outline;
if False, only dump nested list
- ``print_results`` - (default= ``True``) prints test output to stdout
- ``failure_tests`` - (default= ``False``) indicates if these tests are expected to fail parsing
- ``post_parse`` - (default= ``None``) optional callback for successful parse results; called as
`fn(test_string, parse_results)` and returns a string to be added to the test output
- ``file`` - (default= ``None``) optional file-like object to which test output will be written;
if None, will default to ``sys.stdout``
- ``with_line_numbers`` - default= ``False``) show test strings with line and column numbers
Returns: a (success, results) tuple, where success indicates that all tests succeeded
(or failed if ``failure_tests`` is True), and the results contain a list of lines of each
test's output
Example::
number_expr = pyparsing_common.number.copy()
result = number_expr.run_tests('''
# unsigned integer
100
# negative integer
-100
# float with scientific notation
6.02e23
# integer with scientific notation
1e-12
''')
print("Success" if result[0] else "Failed!")
result = number_expr.run_tests('''
# stray character
100Z
# missing leading digit before '.'
-.100
# too many '.'
3.14.159
''', failure_tests=True)
print("Success" if result[0] else "Failed!")
prints::
# unsigned integer
100
[100]
# negative integer
-100
[-100]
# float with scientific notation
6.02e23
[6.02e+23]
# integer with scientific notation
1e-12
[1e-12]
Success
# stray character
100Z
^
FAIL: Expected end of text (at char 3), (line:1, col:4)
# missing leading digit before '.'
-.100
^
FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
# too many '.'
3.14.159
^
FAIL: Expected end of text (at char 4), (line:1, col:5)
Success
Each test string must be on a single line. If you want to test a string that spans multiple
lines, create a test like this::
expr.run_tests(r"this is a test\\n of strings that spans \\n 3 lines")
(Note that this is a raw string literal, you must include the leading ``'r'``.)
"""
from .testing import pyparsing_test
parseAll = parseAll and parse_all
fullDump = fullDump and full_dump
printResults = printResults and print_results
failureTests = failureTests or failure_tests
postParse = postParse or post_parse
if isinstance(tests, str_type):
line_strip = type(tests).strip
tests = [line_strip(test_line) for test_line in tests.rstrip().splitlines()]
if isinstance(comment, str_type):
comment = Literal(comment)
if file is None:
file = sys.stdout
print_ = file.write
result: Union[ParseResults, Exception]
allResults = []
comments = []
success = True
NL = Literal(r"\n").add_parse_action(replace_with("\n")).ignore(quoted_string)
BOM = "\ufeff"
for t in tests:
if comment is not None and comment.matches(t, False) or comments and not t:
comments.append(
pyparsing_test.with_line_numbers(t) if with_line_numbers else t
)
continue
if not t:
continue
out = [
"\n" + "\n".join(comments) if comments else "",
pyparsing_test.with_line_numbers(t) if with_line_numbers else t,
]
comments = []
try:
# convert newline marks to actual newlines, and strip leading BOM if present
t = NL.transform_string(t.lstrip(BOM))
result = self.parse_string(t, parse_all=parseAll)
except ParseBaseException as pe:
fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
out.append(pe.explain())
out.append("FAIL: " + str(pe))
if ParserElement.verbose_stacktrace:
out.extend(traceback.format_tb(pe.__traceback__))
success = success and failureTests
result = pe
except Exception as exc:
out.append("FAIL-EXCEPTION: {}: {}".format(type(exc).__name__, exc))
if ParserElement.verbose_stacktrace:
out.extend(traceback.format_tb(exc.__traceback__))
success = success and failureTests
result = exc
else:
success = success and not failureTests
if postParse is not None:
try:
pp_value = postParse(t, result)
if pp_value is not None:
if isinstance(pp_value, ParseResults):
out.append(pp_value.dump())
else:
out.append(str(pp_value))
else:
out.append(result.dump())
except Exception as e:
out.append(result.dump(full=fullDump))
out.append(
"{} failed: {}: {}".format(
postParse.__name__, type(e).__name__, e
)
)
else:
out.append(result.dump(full=fullDump))
out.append("")
if printResults:
print_("\n".join(out))
allResults.append((t, result))
return success, allResults
def create_diagram(
self,
output_html: Union[TextIO, Path, str],
vertical: int = 3,
show_results_names: bool = False,
show_groups: bool = False,
**kwargs,
) -> None:
"""
Create a railroad diagram for the parser.
Parameters:
- output_html (str or file-like object) - output target for generated
diagram HTML
- vertical (int) - threshold for formatting multiple alternatives vertically
instead of horizontally (default=3)
- show_results_names - bool flag whether diagram should show annotations for
defined results names
- show_groups - bool flag whether groups should be highlighted with an unlabeled surrounding box
Additional diagram-formatting keyword arguments can also be included;
see railroad.Diagram class.
"""
try:
from .diagram import to_railroad, railroad_to_html
except ImportError as ie:
raise Exception(
"must ``pip install pyparsing[diagrams]`` to generate parser railroad diagrams"
) from ie
self.streamline()
railroad = to_railroad(
self,
vertical=vertical,
show_results_names=show_results_names,
show_groups=show_groups,
diagram_kwargs=kwargs,
)
if isinstance(output_html, (str, Path)):
with open(output_html, "w", encoding="utf-8") as diag_file:
diag_file.write(railroad_to_html(railroad))
else:
# we were passed a file-like object, just write to it
output_html.write(railroad_to_html(railroad))
setDefaultWhitespaceChars = set_default_whitespace_chars
inlineLiteralsUsing = inline_literals_using
setResultsName = set_results_name
setBreak = set_break
setParseAction = set_parse_action
addParseAction = add_parse_action
addCondition = add_condition
setFailAction = set_fail_action
tryParse = try_parse
canParseNext = can_parse_next
resetCache = reset_cache
enableLeftRecursion = enable_left_recursion
enablePackrat = enable_packrat
parseString = parse_string
scanString = scan_string
searchString = search_string
transformString = transform_string
setWhitespaceChars = set_whitespace_chars
parseWithTabs = parse_with_tabs
setDebugActions = set_debug_actions
setDebug = set_debug
defaultName = default_name
setName = set_name
parseFile = parse_file
runTests = run_tests
ignoreWhitespace = ignore_whitespace
leaveWhitespace = leave_whitespace
class _PendingSkip(ParserElement):
# internal placeholder class to hold a place were '...' is added to a parser element,
# once another ParserElement is added, this placeholder will be replaced with a SkipTo
def __init__(self, expr: ParserElement, must_skip: bool = False):
super().__init__()
self.anchor = expr
self.must_skip = must_skip
def _generateDefaultName(self):
return str(self.anchor + Empty()).replace("Empty", "...")
def __add__(self, other) -> "ParserElement":
skipper = SkipTo(other).set_name("...")("_skipped*")
if self.must_skip:
def must_skip(t):
if not t._skipped or t._skipped.as_list() == [""]:
del t[0]
t.pop("_skipped", None)
def show_skip(t):
if t._skipped.as_list()[-1:] == [""]:
t.pop("_skipped")
t["_skipped"] = "missing <" + repr(self.anchor) + ">"
return (
self.anchor + skipper().add_parse_action(must_skip)
| skipper().add_parse_action(show_skip)
) + other
return self.anchor + skipper + other
def __repr__(self):
return self.defaultName
def parseImpl(self, *args):
raise Exception(
"use of `...` expression without following SkipTo target expression"
)
class Token(ParserElement):
"""Abstract :class:`ParserElement` subclass, for defining atomic
matching patterns.
"""
def __init__(self):
super().__init__(savelist=False)
def _generateDefaultName(self):
return type(self).__name__
class Empty(Token):
"""
An empty token, will always match.
"""
def __init__(self):
super().__init__()
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""
A token that will never match.
"""
def __init__(self):
super().__init__()
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl(self, instring, loc, doActions=True):
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""
Token to exactly match a specified string.
Example::
Literal('blah').parse_string('blah') # -> ['blah']
Literal('blah').parse_string('blahfooblah') # -> ['blah']
Literal('blah').parse_string('bla') # -> Exception: Expected "blah"
For case-insensitive matching, use :class:`CaselessLiteral`.
For keyword matching (force word break before and after the matched string),
use :class:`Keyword` or :class:`CaselessKeyword`.
"""
def __init__(self, match_string: str = "", *, matchString: str = ""):
super().__init__()
match_string = matchString or match_string
self.match = match_string
self.matchLen = len(match_string)
try:
self.firstMatchChar = match_string[0]
except IndexError:
raise ValueError("null string passed to Literal; use Empty() instead")
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: modify __class__ to select
# a parseImpl optimized for single-character check
if self.matchLen == 1 and type(self) is Literal:
self.__class__ = _SingleCharLiteral
def _generateDefaultName(self):
return repr(self.match)
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] == self.firstMatchChar and instring.startswith(
self.match, loc
):
return loc + self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
class _SingleCharLiteral(Literal):
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] == self.firstMatchChar:
return loc + 1, self.match
raise ParseException(instring, loc, self.errmsg, self)
ParserElement._literalStringClass = Literal
class Keyword(Token):
"""
Token to exactly match a specified string as a keyword, that is,
it must be immediately followed by a non-keyword character. Compare
with :class:`Literal`:
- ``Literal("if")`` will match the leading ``'if'`` in
``'ifAndOnlyIf'``.
- ``Keyword("if")`` will not; it will only match the leading
``'if'`` in ``'if x=1'``, or ``'if(y==2)'``
Accepts two optional constructor arguments in addition to the
keyword string:
- ``identChars`` is a string of characters that would be valid
identifier characters, defaulting to all alphanumerics + "_" and
"$"
- ``caseless`` allows case-insensitive matching, default is ``False``.
Example::
Keyword("start").parse_string("start") # -> ['start']
Keyword("start").parse_string("starting") # -> Exception
For case-insensitive matching, use :class:`CaselessKeyword`.
"""
DEFAULT_KEYWORD_CHARS = alphanums + "_$"
def __init__(
self,
match_string: str = "",
ident_chars: typing.Optional[str] = None,
caseless: bool = False,
*,
matchString: str = "",
identChars: typing.Optional[str] = None,
):
super().__init__()
identChars = identChars or ident_chars
if identChars is None:
identChars = Keyword.DEFAULT_KEYWORD_CHARS
match_string = matchString or match_string
self.match = match_string
self.matchLen = len(match_string)
try:
self.firstMatchChar = match_string[0]
except IndexError:
raise ValueError("null string passed to Keyword; use Empty() instead")
self.errmsg = "Expected {} {}".format(type(self).__name__, self.name)
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = match_string.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def _generateDefaultName(self):
return repr(self.match)
def parseImpl(self, instring, loc, doActions=True):
errmsg = self.errmsg
errloc = loc
if self.caseless:
if instring[loc : loc + self.matchLen].upper() == self.caselessmatch:
if loc == 0 or instring[loc - 1].upper() not in self.identChars:
if (
loc >= len(instring) - self.matchLen
or instring[loc + self.matchLen].upper() not in self.identChars
):
return loc + self.matchLen, self.match
else:
# followed by keyword char
errmsg += ", was immediately followed by keyword character"
errloc = loc + self.matchLen
else:
# preceded by keyword char
errmsg += ", keyword was immediately preceded by keyword character"
errloc = loc - 1
# else no match just raise plain exception
else:
if (
instring[loc] == self.firstMatchChar
and self.matchLen == 1
or instring.startswith(self.match, loc)
):
if loc == 0 or instring[loc - 1] not in self.identChars:
if (
loc >= len(instring) - self.matchLen
or instring[loc + self.matchLen] not in self.identChars
):
return loc + self.matchLen, self.match
else:
# followed by keyword char
errmsg += (
", keyword was immediately followed by keyword character"
)
errloc = loc + self.matchLen
else:
# preceded by keyword char
errmsg += ", keyword was immediately preceded by keyword character"
errloc = loc - 1
# else no match just raise plain exception
raise ParseException(instring, errloc, errmsg, self)
@staticmethod
def set_default_keyword_chars(chars) -> None:
"""
Overrides the default characters used by :class:`Keyword` expressions.
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
setDefaultKeywordChars = set_default_keyword_chars
class CaselessLiteral(Literal):
"""
Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
Example::
CaselessLiteral("CMD")[1, ...].parse_string("cmd CMD Cmd10")
# -> ['CMD', 'CMD', 'CMD']
(Contrast with example for :class:`CaselessKeyword`.)
"""
def __init__(self, match_string: str = "", *, matchString: str = ""):
match_string = matchString or match_string
super().__init__(match_string.upper())
# Preserve the defining literal.
self.returnString = match_string
self.errmsg = "Expected " + self.name
def parseImpl(self, instring, loc, doActions=True):
if instring[loc : loc + self.matchLen].upper() == self.match:
return loc + self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
class CaselessKeyword(Keyword):
"""
Caseless version of :class:`Keyword`.
Example::
CaselessKeyword("CMD")[1, ...].parse_string("cmd CMD Cmd10")
# -> ['CMD', 'CMD']
(Contrast with example for :class:`CaselessLiteral`.)
"""
def __init__(
self,
match_string: str = "",
ident_chars: typing.Optional[str] = None,
*,
matchString: str = "",
identChars: typing.Optional[str] = None,
):
identChars = identChars or ident_chars
match_string = matchString or match_string
super().__init__(match_string, identChars, caseless=True)
class CloseMatch(Token):
"""A variation on :class:`Literal` which matches "close" matches,
that is, strings with at most 'n' mismatching characters.
:class:`CloseMatch` takes parameters:
- ``match_string`` - string to be matched
- ``caseless`` - a boolean indicating whether to ignore casing when comparing characters
- ``max_mismatches`` - (``default=1``) maximum number of
mismatches allowed to count as a match
The results from a successful parse will contain the matched text
from the input string and the following named results:
- ``mismatches`` - a list of the positions within the
match_string where mismatches were found
- ``original`` - the original match_string used to compare
against the input string
If ``mismatches`` is an empty list, then the match was an exact
match.
Example::
patt = CloseMatch("ATCATCGAATGGA")
patt.parse_string("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
patt.parse_string("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
# exact match
patt.parse_string("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
# close match allowing up to 2 mismatches
patt = CloseMatch("ATCATCGAATGGA", max_mismatches=2)
patt.parse_string("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
"""
def __init__(
self,
match_string: str,
max_mismatches: int = None,
*,
maxMismatches: int = 1,
caseless=False,
):
maxMismatches = max_mismatches if max_mismatches is not None else maxMismatches
super().__init__()
self.match_string = match_string
self.maxMismatches = maxMismatches
self.errmsg = "Expected {!r} (with up to {} mismatches)".format(
self.match_string, self.maxMismatches
)
self.caseless = caseless
self.mayIndexError = False
self.mayReturnEmpty = False
def _generateDefaultName(self):
return "{}:{!r}".format(type(self).__name__, self.match_string)
def parseImpl(self, instring, loc, doActions=True):
start = loc
instrlen = len(instring)
maxloc = start + len(self.match_string)
if maxloc <= instrlen:
match_string = self.match_string
match_stringloc = 0
mismatches = []
maxMismatches = self.maxMismatches
for match_stringloc, s_m in enumerate(
zip(instring[loc:maxloc], match_string)
):
src, mat = s_m
if self.caseless:
src, mat = src.lower(), mat.lower()
if src != mat:
mismatches.append(match_stringloc)
if len(mismatches) > maxMismatches:
break
else:
loc = start + match_stringloc + 1
results = ParseResults([instring[start:loc]])
results["original"] = match_string
results["mismatches"] = mismatches
return loc, results
raise ParseException(instring, loc, self.errmsg, self)
class Word(Token):
"""Token for matching words composed of allowed character sets.
Parameters:
- ``init_chars`` - string of all characters that should be used to
match as a word; "ABC" will match "AAA", "ABAB", "CBAC", etc.;
if ``body_chars`` is also specified, then this is the string of
initial characters
- ``body_chars`` - string of characters that
can be used for matching after a matched initial character as
given in ``init_chars``; if omitted, same as the initial characters
(default=``None``)
- ``min`` - minimum number of characters to match (default=1)
- ``max`` - maximum number of characters to match (default=0)
- ``exact`` - exact number of characters to match (default=0)
- ``as_keyword`` - match as a keyword (default=``False``)
- ``exclude_chars`` - characters that might be
found in the input ``body_chars`` string but which should not be
accepted for matching ;useful to define a word of all
printables except for one or two characters, for instance
(default=``None``)
:class:`srange` is useful for defining custom character set strings
for defining :class:`Word` expressions, using range notation from
regular expression character sets.
A common mistake is to use :class:`Word` to match a specific literal
string, as in ``Word("Address")``. Remember that :class:`Word`
uses the string argument to define *sets* of matchable characters.
This expression would match "Add", "AAA", "dAred", or any other word
made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an
exact literal string, use :class:`Literal` or :class:`Keyword`.
pyparsing includes helper strings for building Words:
- :class:`alphas`
- :class:`nums`
- :class:`alphanums`
- :class:`hexnums`
- :class:`alphas8bit` (alphabetic characters in ASCII range 128-255
- accented, tilded, umlauted, etc.)
- :class:`punc8bit` (non-alphabetic characters in ASCII range
128-255 - currency, symbols, superscripts, diacriticals, etc.)
- :class:`printables` (any non-whitespace character)
``alphas``, ``nums``, and ``printables`` are also defined in several
Unicode sets - see :class:`pyparsing_unicode``.
Example::
# a word composed of digits
integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
# a word with a leading capital, and zero or more lowercase
capital_word = Word(alphas.upper(), alphas.lower())
# hostnames are alphanumeric, with leading alpha, and '-'
hostname = Word(alphas, alphanums + '-')
# roman numeral (not a strict parser, accepts invalid mix of characters)
roman = Word("IVXLCDM")
# any string of non-whitespace characters, except for ','
csv_value = Word(printables, exclude_chars=",")
"""
def __init__(
self,
init_chars: str = "",
body_chars: typing.Optional[str] = None,
min: int = 1,
max: int = 0,
exact: int = 0,
as_keyword: bool = False,
exclude_chars: typing.Optional[str] = None,
*,
initChars: typing.Optional[str] = None,
bodyChars: typing.Optional[str] = None,
asKeyword: bool = False,
excludeChars: typing.Optional[str] = None,
):
initChars = initChars or init_chars
bodyChars = bodyChars or body_chars
asKeyword = asKeyword or as_keyword
excludeChars = excludeChars or exclude_chars
super().__init__()
if not initChars:
raise ValueError(
"invalid {}, initChars cannot be empty string".format(
type(self).__name__
)
)
initChars = set(initChars)
self.initChars = initChars
if excludeChars:
excludeChars = set(excludeChars)
initChars -= excludeChars
if bodyChars:
bodyChars = set(bodyChars) - excludeChars
self.initCharsOrig = "".join(sorted(initChars))
if bodyChars:
self.bodyCharsOrig = "".join(sorted(bodyChars))
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = "".join(sorted(initChars))
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError(
"cannot specify a minimum length < 1; use Opt(Word()) if zero-length word is permitted"
)
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
# see if we can make a regex for this Word
if " " not in self.initChars | self.bodyChars and (min == 1 and exact == 0):
if self.bodyChars == self.initChars:
if max == 0:
repeat = "+"
elif max == 1:
repeat = ""
else:
repeat = "{{{},{}}}".format(
self.minLen, "" if self.maxLen == _MAX_INT else self.maxLen
)
self.reString = "[{}]{}".format(
_collapse_string_to_ranges(self.initChars),
repeat,
)
elif len(self.initChars) == 1:
if max == 0:
repeat = "*"
else:
repeat = "{{0,{}}}".format(max - 1)
self.reString = "{}[{}]{}".format(
re.escape(self.initCharsOrig),
_collapse_string_to_ranges(self.bodyChars),
repeat,
)
else:
if max == 0:
repeat = "*"
elif max == 2:
repeat = ""
else:
repeat = "{{0,{}}}".format(max - 1)
self.reString = "[{}][{}]{}".format(
_collapse_string_to_ranges(self.initChars),
_collapse_string_to_ranges(self.bodyChars),
repeat,
)
if self.asKeyword:
self.reString = r"\b" + self.reString + r"\b"
try:
self.re = re.compile(self.reString)
except re.error:
self.re = None
else:
self.re_match = self.re.match
self.__class__ = _WordRegex
def _generateDefaultName(self):
def charsAsStr(s):
max_repr_len = 16
s = _collapse_string_to_ranges(s, re_escape=False)
if len(s) > max_repr_len:
return s[: max_repr_len - 3] + "..."
else:
return s
if self.initChars != self.bodyChars:
base = "W:({}, {})".format(
charsAsStr(self.initChars), charsAsStr(self.bodyChars)
)
else:
base = "W:({})".format(charsAsStr(self.initChars))
# add length specification
if self.minLen > 1 or self.maxLen != _MAX_INT:
if self.minLen == self.maxLen:
if self.minLen == 1:
return base[2:]
else:
return base + "{{{}}}".format(self.minLen)
elif self.maxLen == _MAX_INT:
return base + "{{{},...}}".format(self.minLen)
else:
return base + "{{{},{}}}".format(self.minLen, self.maxLen)
return base
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] not in self.initChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min(maxloc, instrlen)
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
elif self.asKeyword:
if (
start > 0
and instring[start - 1] in bodychars
or loc < instrlen
and instring[loc] in bodychars
):
throwException = True
if throwException:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class _WordRegex(Word):
def parseImpl(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
class Char(_WordRegex):
"""A short-cut class for defining :class:`Word` ``(characters, exact=1)``,
when defining a match of any single character in a string of
characters.
"""
def __init__(
self,
charset: str,
as_keyword: bool = False,
exclude_chars: typing.Optional[str] = None,
*,
asKeyword: bool = False,
excludeChars: typing.Optional[str] = None,
):
asKeyword = asKeyword or as_keyword
excludeChars = excludeChars or exclude_chars
super().__init__(
charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars
)
self.reString = "[{}]".format(_collapse_string_to_ranges(self.initChars))
if asKeyword:
self.reString = r"\b{}\b".format(self.reString)
self.re = re.compile(self.reString)
self.re_match = self.re.match
class Regex(Token):
r"""Token for matching strings that match a given regular
expression. Defined with string specifying the regular expression in
a form recognized by the stdlib Python `re module <https://docs.python.org/3/library/re.html>`_.
If the given regex contains named groups (defined using ``(?P<name>...)``),
these will be preserved as named :class:`ParseResults`.
If instead of the Python stdlib ``re`` module you wish to use a different RE module
(such as the ``regex`` module), you can do so by building your ``Regex`` object with
a compiled RE that was compiled using ``regex``.
Example::
realnum = Regex(r"[+-]?\d+\.\d*")
# ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
# named fields in a regex will be returned as named results
date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
# the Regex class will accept re's compiled using the regex module
import regex
parser = pp.Regex(regex.compile(r'[0-9]'))
"""
def __init__(
self,
pattern: Any,
flags: Union[re.RegexFlag, int] = 0,
as_group_list: bool = False,
as_match: bool = False,
*,
asGroupList: bool = False,
asMatch: bool = False,
):
"""The parameters ``pattern`` and ``flags`` are passed
to the ``re.compile()`` function as-is. See the Python
`re module <https://docs.python.org/3/library/re.html>`_ module for an
explanation of the acceptable patterns and flags.
"""
super().__init__()
asGroupList = asGroupList or as_group_list
asMatch = asMatch or as_match
if isinstance(pattern, str_type):
if not pattern:
raise ValueError("null string passed to Regex; use Empty() instead")
self._re = None
self.reString = self.pattern = pattern
self.flags = flags
elif hasattr(pattern, "pattern") and hasattr(pattern, "match"):
self._re = pattern
self.pattern = self.reString = pattern.pattern
self.flags = flags
else:
raise TypeError(
"Regex may only be constructed with a string or a compiled RE object"
)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asGroupList = asGroupList
self.asMatch = asMatch
if self.asGroupList:
self.parseImpl = self.parseImplAsGroupList
if self.asMatch:
self.parseImpl = self.parseImplAsMatch
@cached_property
def re(self):
if self._re:
return self._re
else:
try:
return re.compile(self.pattern, self.flags)
except re.error:
raise ValueError(
"invalid pattern ({!r}) passed to Regex".format(self.pattern)
)
@cached_property
def re_match(self):
return self.re.match
@cached_property
def mayReturnEmpty(self):
return self.re_match("") is not None
def _generateDefaultName(self):
return "Re:({})".format(repr(self.pattern).replace("\\\\", "\\"))
def parseImpl(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = ParseResults(result.group())
d = result.groupdict()
if d:
for k, v in d.items():
ret[k] = v
return loc, ret
def parseImplAsGroupList(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.groups()
return loc, ret
def parseImplAsMatch(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result
return loc, ret
def sub(self, repl: str) -> ParserElement:
r"""
Return :class:`Regex` with an attached parse action to transform the parsed
result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_.
Example::
make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2</\1>")
print(make_html.transform_string("h1:main title:"))
# prints "<h1>main title</h1>"
"""
if self.asGroupList:
raise TypeError("cannot use sub() with Regex(asGroupList=True)")
if self.asMatch and callable(repl):
raise TypeError("cannot use sub() with a callable with Regex(asMatch=True)")
if self.asMatch:
def pa(tokens):
return tokens[0].expand(repl)
else:
def pa(tokens):
return self.re.sub(repl, tokens[0])
return self.add_parse_action(pa)
class QuotedString(Token):
r"""
Token for matching strings that are delimited by quoting characters.
Defined with the following parameters:
- ``quote_char`` - string of one or more characters defining the
quote delimiting string
- ``esc_char`` - character to re_escape quotes, typically backslash
(default= ``None``)
- ``esc_quote`` - special quote sequence to re_escape an embedded quote
string (such as SQL's ``""`` to re_escape an embedded ``"``)
(default= ``None``)
- ``multiline`` - boolean indicating whether quotes can span
multiple lines (default= ``False``)
- ``unquote_results`` - boolean indicating whether the matched text
should be unquoted (default= ``True``)
- ``end_quote_char`` - string of one or more characters defining the
end of the quote delimited string (default= ``None`` => same as
quote_char)
- ``convert_whitespace_escapes`` - convert escaped whitespace
(``'\t'``, ``'\n'``, etc.) to actual whitespace
(default= ``True``)
Example::
qs = QuotedString('"')
print(qs.search_string('lsjdf "This is the quote" sldjf'))
complex_qs = QuotedString('{{', end_quote_char='}}')
print(complex_qs.search_string('lsjdf {{This is the "quote"}} sldjf'))
sql_qs = QuotedString('"', esc_quote='""')
print(sql_qs.search_string('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
prints::
[['This is the quote']]
[['This is the "quote"']]
[['This is the quote with "embedded" quotes']]
"""
ws_map = ((r"\t", "\t"), (r"\n", "\n"), (r"\f", "\f"), (r"\r", "\r"))
def __init__(
self,
quote_char: str = "",
esc_char: typing.Optional[str] = None,
esc_quote: typing.Optional[str] = None,
multiline: bool = False,
unquote_results: bool = True,
end_quote_char: typing.Optional[str] = None,
convert_whitespace_escapes: bool = True,
*,
quoteChar: str = "",
escChar: typing.Optional[str] = None,
escQuote: typing.Optional[str] = None,
unquoteResults: bool = True,
endQuoteChar: typing.Optional[str] = None,
convertWhitespaceEscapes: bool = True,
):
super().__init__()
escChar = escChar or esc_char
escQuote = escQuote or esc_quote
unquoteResults = unquoteResults and unquote_results
endQuoteChar = endQuoteChar or end_quote_char
convertWhitespaceEscapes = (
convertWhitespaceEscapes and convert_whitespace_escapes
)
quote_char = quoteChar or quote_char
# remove white space from quote chars - wont work anyway
quote_char = quote_char.strip()
if not quote_char:
raise ValueError("quote_char cannot be the empty string")
if endQuoteChar is None:
endQuoteChar = quote_char
else:
endQuoteChar = endQuoteChar.strip()
if not endQuoteChar:
raise ValueError("endQuoteChar cannot be the empty string")
self.quoteChar = quote_char
self.quoteCharLen = len(quote_char)
self.firstQuoteChar = quote_char[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
self.convertWhitespaceEscapes = convertWhitespaceEscapes
sep = ""
inner_pattern = ""
if escQuote:
inner_pattern += r"{}(?:{})".format(sep, re.escape(escQuote))
sep = "|"
if escChar:
inner_pattern += r"{}(?:{}.)".format(sep, re.escape(escChar))
sep = "|"
self.escCharReplacePattern = re.escape(self.escChar) + "(.)"
if len(self.endQuoteChar) > 1:
inner_pattern += (
"{}(?:".format(sep)
+ "|".join(
"(?:{}(?!{}))".format(
re.escape(self.endQuoteChar[:i]),
re.escape(self.endQuoteChar[i:]),
)
for i in range(len(self.endQuoteChar) - 1, 0, -1)
)
+ ")"
)
sep = "|"
if multiline:
self.flags = re.MULTILINE | re.DOTALL
inner_pattern += r"{}(?:[^{}{}])".format(
sep,
_escape_regex_range_chars(self.endQuoteChar[0]),
(_escape_regex_range_chars(escChar) if escChar is not None else ""),
)
else:
self.flags = 0
inner_pattern += r"{}(?:[^{}\n\r{}])".format(
sep,
_escape_regex_range_chars(self.endQuoteChar[0]),
(_escape_regex_range_chars(escChar) if escChar is not None else ""),
)
self.pattern = "".join(
[
re.escape(self.quoteChar),
"(?:",
inner_pattern,
")*",
re.escape(self.endQuoteChar),
]
)
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
self.re_match = self.re.match
except re.error:
raise ValueError(
"invalid pattern {!r} passed to Regex".format(self.pattern)
)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def _generateDefaultName(self):
if self.quoteChar == self.endQuoteChar and isinstance(self.quoteChar, str_type):
return "string enclosed in {!r}".format(self.quoteChar)
return "quoted string, starting with {} ending with {}".format(
self.quoteChar, self.endQuoteChar
)
def parseImpl(self, instring, loc, doActions=True):
result = (
instring[loc] == self.firstQuoteChar
and self.re_match(instring, loc)
or None
)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen : -self.endQuoteCharLen]
if isinstance(ret, str_type):
# replace escaped whitespace
if "\\" in ret and self.convertWhitespaceEscapes:
for wslit, wschar in self.ws_map:
ret = ret.replace(wslit, wschar)
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given
set (will include whitespace in matched characters if not listed in
the provided exclusion set - see example). Defined with string
containing all disallowed characters, and an optional minimum,
maximum, and/or exact length. The default value for ``min`` is
1 (a minimum value < 1 is not valid); the default values for
``max`` and ``exact`` are 0, meaning no maximum or exact
length restriction.
Example::
# define a comma-separated-value as anything that is not a ','
csv_value = CharsNotIn(',')
print(delimited_list(csv_value).parse_string("dkls,lsdkjf,s12 34,@!#,213"))
prints::
['dkls', 'lsdkjf', 's12 34', '@!#', '213']
"""
def __init__(
self,
not_chars: str = "",
min: int = 1,
max: int = 0,
exact: int = 0,
*,
notChars: str = "",
):
super().__init__()
self.skipWhitespace = False
self.notChars = not_chars or notChars
self.notCharsSet = set(self.notChars)
if min < 1:
raise ValueError(
"cannot specify a minimum length < 1; use "
"Opt(CharsNotIn()) if zero-length char group is permitted"
)
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = self.minLen == 0
self.mayIndexError = False
def _generateDefaultName(self):
not_chars_str = _collapse_string_to_ranges(self.notChars)
if len(not_chars_str) > 16:
return "!W:({}...)".format(self.notChars[: 16 - 3])
else:
return "!W:({})".format(self.notChars)
def parseImpl(self, instring, loc, doActions=True):
notchars = self.notCharsSet
if instring[loc] in notchars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxlen = min(start + self.maxLen, len(instring))
while loc < maxlen and instring[loc] not in notchars:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class White(Token):
"""Special matching class for matching whitespace. Normally,
whitespace is ignored by pyparsing grammars. This class is included
when some whitespace structures are significant. Define with
a string containing the whitespace characters to be matched; default
is ``" \\t\\r\\n"``. Also takes optional ``min``,
``max``, and ``exact`` arguments, as defined for the
:class:`Word` class.
"""
whiteStrs = {
" ": "<SP>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
"\u00A0": "<NBSP>",
"\u1680": "<OGHAM_SPACE_MARK>",
"\u180E": "<MONGOLIAN_VOWEL_SEPARATOR>",
"\u2000": "<EN_QUAD>",
"\u2001": "<EM_QUAD>",
"\u2002": "<EN_SPACE>",
"\u2003": "<EM_SPACE>",
"\u2004": "<THREE-PER-EM_SPACE>",
"\u2005": "<FOUR-PER-EM_SPACE>",
"\u2006": "<SIX-PER-EM_SPACE>",
"\u2007": "<FIGURE_SPACE>",
"\u2008": "<PUNCTUATION_SPACE>",
"\u2009": "<THIN_SPACE>",
"\u200A": "<HAIR_SPACE>",
"\u200B": "<ZERO_WIDTH_SPACE>",
"\u202F": "<NNBSP>",
"\u205F": "<MMSP>",
"\u3000": "<IDEOGRAPHIC_SPACE>",
}
def __init__(self, ws: str = " \t\r\n", min: int = 1, max: int = 0, exact: int = 0):
super().__init__()
self.matchWhite = ws
self.set_whitespace_chars(
"".join(c for c in self.whiteStrs if c not in self.matchWhite),
copy_defaults=True,
)
# self.leave_whitespace()
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def _generateDefaultName(self):
return "".join(White.whiteStrs[c] for c in self.matchWhite)
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] not in self.matchWhite:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min(maxloc, len(instring))
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class PositionToken(Token):
def __init__(self):
super().__init__()
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(PositionToken):
"""Token to advance to a specific column of input text; useful for
tabular report scraping.
"""
def __init__(self, colno: int):
super().__init__()
self.col = colno
def preParse(self, instring, loc):
if col(loc, instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
while (
loc < instrlen
and instring[loc].isspace()
and col(loc, instring) != self.col
):
loc += 1
return loc
def parseImpl(self, instring, loc, doActions=True):
thiscol = col(loc, instring)
if thiscol > self.col:
raise ParseException(instring, loc, "Text not in expected column", self)
newloc = loc + self.col - thiscol
ret = instring[loc:newloc]
return newloc, ret
class LineStart(PositionToken):
r"""Matches if current position is at the beginning of a line within
the parse string
Example::
test = '''\
AAA this line
AAA and this line
AAA but not this one
B AAA and definitely not this one
'''
for t in (LineStart() + 'AAA' + restOfLine).search_string(test):
print(t)
prints::
['AAA', ' this line']
['AAA', ' and this line']
"""
def __init__(self):
super().__init__()
self.leave_whitespace()
self.orig_whiteChars = set() | self.whiteChars
self.whiteChars.discard("\n")
self.skipper = Empty().set_whitespace_chars(self.whiteChars)
self.errmsg = "Expected start of line"
def preParse(self, instring, loc):
if loc == 0:
return loc
else:
ret = self.skipper.preParse(instring, loc)
if "\n" in self.orig_whiteChars:
while instring[ret : ret + 1] == "\n":
ret = self.skipper.preParse(instring, ret + 1)
return ret
def parseImpl(self, instring, loc, doActions=True):
if col(loc, instring) == 1:
return loc, []
raise ParseException(instring, loc, self.errmsg, self)
class LineEnd(PositionToken):
"""Matches if current position is at the end of a line within the
parse string
"""
def __init__(self):
super().__init__()
self.whiteChars.discard("\n")
self.set_whitespace_chars(self.whiteChars, copy_defaults=False)
self.errmsg = "Expected end of line"
def parseImpl(self, instring, loc, doActions=True):
if loc < len(instring):
if instring[loc] == "\n":
return loc + 1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc + 1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class StringStart(PositionToken):
"""Matches if current position is at the beginning of the parse
string
"""
def __init__(self):
super().__init__()
self.errmsg = "Expected start of text"
def parseImpl(self, instring, loc, doActions=True):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse(instring, 0):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class StringEnd(PositionToken):
"""
Matches if current position is at the end of the parse string
"""
def __init__(self):
super().__init__()
self.errmsg = "Expected end of text"
def parseImpl(self, instring, loc, doActions=True):
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc + 1, []
elif loc > len(instring):
return loc, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class WordStart(PositionToken):
"""Matches if the current position is at the beginning of a
:class:`Word`, and is not preceded by any character in a given
set of ``word_chars`` (default= ``printables``). To emulate the
``\b`` behavior of regular expressions, use
``WordStart(alphanums)``. ``WordStart`` will also match at
the beginning of the string being parsed, or at the beginning of
a line.
"""
def __init__(self, word_chars: str = printables, *, wordChars: str = printables):
wordChars = word_chars if wordChars == printables else wordChars
super().__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True):
if loc != 0:
if (
instring[loc - 1] in self.wordChars
or instring[loc] not in self.wordChars
):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class WordEnd(PositionToken):
"""Matches if the current position is at the end of a :class:`Word`,
and is not followed by any character in a given set of ``word_chars``
(default= ``printables``). To emulate the ``\b`` behavior of
regular expressions, use ``WordEnd(alphanums)``. ``WordEnd``
will also match at the end of the string being parsed, or at the end
of a line.
"""
def __init__(self, word_chars: str = printables, *, wordChars: str = printables):
wordChars = word_chars if wordChars == printables else wordChars
super().__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True):
instrlen = len(instring)
if instrlen > 0 and loc < instrlen:
if (
instring[loc] in self.wordChars
or instring[loc - 1] not in self.wordChars
):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and
post-processing parsed tokens.
"""
def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False):
super().__init__(savelist)
self.exprs: List[ParserElement]
if isinstance(exprs, _generatorType):
exprs = list(exprs)
if isinstance(exprs, str_type):
self.exprs = [self._literalStringClass(exprs)]
elif isinstance(exprs, ParserElement):
self.exprs = [exprs]
elif isinstance(exprs, Iterable):
exprs = list(exprs)
# if sequence of strings provided, wrap with Literal
if any(isinstance(expr, str_type) for expr in exprs):
exprs = (
self._literalStringClass(e) if isinstance(e, str_type) else e
for e in exprs
)
self.exprs = list(exprs)
else:
try:
self.exprs = list(exprs)
except TypeError:
self.exprs = [exprs]
self.callPreparse = False
def recurse(self) -> Sequence[ParserElement]:
return self.exprs[:]
def append(self, other) -> ParserElement:
self.exprs.append(other)
self._defaultName = None
return self
def leave_whitespace(self, recursive: bool = True) -> ParserElement:
"""
Extends ``leave_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on
all contained expressions.
"""
super().leave_whitespace(recursive)
if recursive:
self.exprs = [e.copy() for e in self.exprs]
for e in self.exprs:
e.leave_whitespace(recursive)
return self
def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
"""
Extends ``ignore_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on
all contained expressions.
"""
super().ignore_whitespace(recursive)
if recursive:
self.exprs = [e.copy() for e in self.exprs]
for e in self.exprs:
e.ignore_whitespace(recursive)
return self
def ignore(self, other) -> ParserElement:
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
super().ignore(other)
for e in self.exprs:
e.ignore(self.ignoreExprs[-1])
else:
super().ignore(other)
for e in self.exprs:
e.ignore(self.ignoreExprs[-1])
return self
def _generateDefaultName(self):
return "{}:({})".format(self.__class__.__name__, str(self.exprs))
def streamline(self) -> ParserElement:
if self.streamlined:
return self
super().streamline()
for e in self.exprs:
e.streamline()
# collapse nested :class:`And`'s of the form ``And(And(And(a, b), c), d)`` to ``And(a, b, c, d)``
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for :class:`Or`'s and :class:`MatchFirst`'s)
if len(self.exprs) == 2:
other = self.exprs[0]
if (
isinstance(other, self.__class__)
and not other.parseAction
and other.resultsName is None
and not other.debug
):
self.exprs = other.exprs[:] + [self.exprs[1]]
self._defaultName = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if (
isinstance(other, self.__class__)
and not other.parseAction
and other.resultsName is None
and not other.debug
):
self.exprs = self.exprs[:-1] + other.exprs[:]
self._defaultName = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
self.errmsg = "Expected " + str(self)
return self
def validate(self, validateTrace=None) -> None:
tmp = (validateTrace if validateTrace is not None else [])[:] + [self]
for e in self.exprs:
e.validate(tmp)
self._checkRecursion([])
def copy(self) -> ParserElement:
ret = super().copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
def _setResultsName(self, name, listAllMatches=False):
if (
__diag__.warn_ungrouped_named_tokens_in_collection
and Diagnostics.warn_ungrouped_named_tokens_in_collection
not in self.suppress_warnings_
):
for e in self.exprs:
if (
isinstance(e, ParserElement)
and e.resultsName
and Diagnostics.warn_ungrouped_named_tokens_in_collection
not in e.suppress_warnings_
):
warnings.warn(
"{}: setting results name {!r} on {} expression "
"collides with {!r} on contained expression".format(
"warn_ungrouped_named_tokens_in_collection",
name,
type(self).__name__,
e.resultsName,
),
stacklevel=3,
)
return super()._setResultsName(name, listAllMatches)
ignoreWhitespace = ignore_whitespace
leaveWhitespace = leave_whitespace
class And(ParseExpression):
"""
Requires all given :class:`ParseExpression` s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the ``'+'`` operator.
May also be constructed using the ``'-'`` operator, which will
suppress backtracking.
Example::
integer = Word(nums)
name_expr = Word(alphas)[1, ...]
expr = And([integer("id"), name_expr("name"), integer("age")])
# more easily written as:
expr = integer("id") + name_expr("name") + integer("age")
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.leave_whitespace()
def _generateDefaultName(self):
return "-"
def __init__(
self, exprs_arg: typing.Iterable[ParserElement], savelist: bool = True
):
exprs: List[ParserElement] = list(exprs_arg)
if exprs and Ellipsis in exprs:
tmp = []
for i, expr in enumerate(exprs):
if expr is Ellipsis:
if i < len(exprs) - 1:
skipto_arg: ParserElement = (Empty() + exprs[i + 1]).exprs[-1]
tmp.append(SkipTo(skipto_arg)("_skipped*"))
else:
raise Exception(
"cannot construct And with sequence ending in ..."
)
else:
tmp.append(expr)
exprs[:] = tmp
super().__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
if not isinstance(self.exprs[0], White):
self.set_whitespace_chars(
self.exprs[0].whiteChars,
copy_defaults=self.exprs[0].copyDefaultWhiteChars,
)
self.skipWhitespace = self.exprs[0].skipWhitespace
else:
self.skipWhitespace = False
else:
self.mayReturnEmpty = True
self.callPreparse = True
def streamline(self) -> ParserElement:
# collapse any _PendingSkip's
if self.exprs:
if any(
isinstance(e, ParseExpression)
and e.exprs
and isinstance(e.exprs[-1], _PendingSkip)
for e in self.exprs[:-1]
):
for i, e in enumerate(self.exprs[:-1]):
if e is None:
continue
if (
isinstance(e, ParseExpression)
and e.exprs
and isinstance(e.exprs[-1], _PendingSkip)
):
e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1]
self.exprs[i + 1] = None
self.exprs = [e for e in self.exprs if e is not None]
super().streamline()
# link any IndentedBlocks to the prior expression
for prev, cur in zip(self.exprs, self.exprs[1:]):
# traverse cur or any first embedded expr of cur looking for an IndentedBlock
# (but watch out for recursive grammar)
seen = set()
while cur:
if id(cur) in seen:
break
seen.add(id(cur))
if isinstance(cur, IndentedBlock):
prev.add_parse_action(
lambda s, l, t, cur_=cur: setattr(
cur_, "parent_anchor", col(l, s)
)
)
break
subs = cur.recurse()
cur = next(iter(subs), None)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
return self
def parseImpl(self, instring, loc, doActions=True):
# pass False as callPreParse arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse(
instring, loc, doActions, callPreParse=False
)
errorStop = False
for e in self.exprs[1:]:
# if isinstance(e, And._ErrorStop):
if type(e) is And._ErrorStop:
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse(instring, loc, doActions)
except ParseSyntaxException:
raise
except ParseBaseException as pe:
pe.__traceback__ = None
raise ParseSyntaxException._from_exception(pe)
except IndexError:
raise ParseSyntaxException(
instring, len(instring), self.errmsg, self
)
else:
loc, exprtokens = e._parse(instring, loc, doActions)
if exprtokens or exprtokens.haskeys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other):
if isinstance(other, str_type):
other = self._literalStringClass(other)
return self.append(other) # And([self, other])
def _checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e._checkRecursion(subRecCheckList)
if not e.mayReturnEmpty:
break
def _generateDefaultName(self):
inner = " ".join(str(e) for e in self.exprs)
# strip off redundant inner {}'s
while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}":
inner = inner[1:-1]
return "{" + inner + "}"
class Or(ParseExpression):
"""Requires that at least one :class:`ParseExpression` is found. If
two expressions match, the expression that matches the longest
string will be used. May be constructed using the ``'^'``
operator.
Example::
# construct Or using '^' operator
number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
print(number.search_string("123 3.1416 789"))
prints::
[['123'], ['3.1416'], ['789']]
"""
def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False):
super().__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = all(e.skipWhitespace for e in self.exprs)
else:
self.mayReturnEmpty = True
def streamline(self) -> ParserElement:
super().streamline()
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
self.saveAsList = any(e.saveAsList for e in self.exprs)
self.skipWhitespace = all(
e.skipWhitespace and not isinstance(e, White) for e in self.exprs
)
else:
self.saveAsList = False
return self
def parseImpl(self, instring, loc, doActions=True):
maxExcLoc = -1
maxException = None
matches = []
fatals = []
if all(e.callPreparse for e in self.exprs):
loc = self.preParse(instring, loc)
for e in self.exprs:
try:
loc2 = e.try_parse(instring, loc, raise_fatal=True)
except ParseFatalException as pfe:
pfe.__traceback__ = None
pfe.parserElement = e
fatals.append(pfe)
maxException = None
maxExcLoc = -1
except ParseException as err:
if not fatals:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(
instring, len(instring), e.errmsg, self
)
maxExcLoc = len(instring)
else:
# save match among all matches, to retry longest to shortest
matches.append((loc2, e))
if matches:
# re-evaluate all matches in descending order of length of match, in case attached actions
# might change whether or how much they match of the input.
matches.sort(key=itemgetter(0), reverse=True)
if not doActions:
# no further conditions or parse actions to change the selection of
# alternative, so the first match will be the best match
best_expr = matches[0][1]
return best_expr._parse(instring, loc, doActions)
longest = -1, None
for loc1, expr1 in matches:
if loc1 <= longest[0]:
# already have a longer match than this one will deliver, we are done
return longest
try:
loc2, toks = expr1._parse(instring, loc, doActions)
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
else:
if loc2 >= loc1:
return loc2, toks
# didn't match as much as before
elif loc2 > longest[0]:
longest = loc2, toks
if longest != (-1, None):
return longest
if fatals:
if len(fatals) > 1:
fatals.sort(key=lambda e: -e.loc)
if fatals[0].loc == fatals[1].loc:
fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement))))
max_fatal = fatals[0]
raise max_fatal
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(
instring, loc, "no defined alternatives to match", self
)
def __ixor__(self, other):
if isinstance(other, str_type):
other = self._literalStringClass(other)
return self.append(other) # Or([self, other])
def _generateDefaultName(self):
return "{" + " ^ ".join(str(e) for e in self.exprs) + "}"
def _setResultsName(self, name, listAllMatches=False):
if (
__diag__.warn_multiple_tokens_in_named_alternation
and Diagnostics.warn_multiple_tokens_in_named_alternation
not in self.suppress_warnings_
):
if any(
isinstance(e, And)
and Diagnostics.warn_multiple_tokens_in_named_alternation
not in e.suppress_warnings_
for e in self.exprs
):
warnings.warn(
"{}: setting results name {!r} on {} expression "
"will return a list of all parsed tokens in an And alternative, "
"in prior versions only the first token was returned; enclose "
"contained argument in Group".format(
"warn_multiple_tokens_in_named_alternation",
name,
type(self).__name__,
),
stacklevel=3,
)
return super()._setResultsName(name, listAllMatches)
class MatchFirst(ParseExpression):
"""Requires that at least one :class:`ParseExpression` is found. If
more than one expression matches, the first one listed is the one that will
match. May be constructed using the ``'|'`` operator.
Example::
# construct MatchFirst using '|' operator
# watch the order of expressions to match
number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
print(number.search_string("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
# put more selective expression first
number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
print(number.search_string("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
"""
def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False):
super().__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = all(e.skipWhitespace for e in self.exprs)
else:
self.mayReturnEmpty = True
def streamline(self) -> ParserElement:
if self.streamlined:
return self
super().streamline()
if self.exprs:
self.saveAsList = any(e.saveAsList for e in self.exprs)
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = all(
e.skipWhitespace and not isinstance(e, White) for e in self.exprs
)
else:
self.saveAsList = False
self.mayReturnEmpty = True
return self
def parseImpl(self, instring, loc, doActions=True):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
return e._parse(
instring,
loc,
doActions,
)
except ParseFatalException as pfe:
pfe.__traceback__ = None
pfe.parserElement = e
raise
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(
instring, len(instring), e.errmsg, self
)
maxExcLoc = len(instring)
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(
instring, loc, "no defined alternatives to match", self
)
def __ior__(self, other):
if isinstance(other, str_type):
other = self._literalStringClass(other)
return self.append(other) # MatchFirst([self, other])
def _generateDefaultName(self):
return "{" + " | ".join(str(e) for e in self.exprs) + "}"
def _setResultsName(self, name, listAllMatches=False):
if (
__diag__.warn_multiple_tokens_in_named_alternation
and Diagnostics.warn_multiple_tokens_in_named_alternation
not in self.suppress_warnings_
):
if any(
isinstance(e, And)
and Diagnostics.warn_multiple_tokens_in_named_alternation
not in e.suppress_warnings_
for e in self.exprs
):
warnings.warn(
"{}: setting results name {!r} on {} expression "
"will return a list of all parsed tokens in an And alternative, "
"in prior versions only the first token was returned; enclose "
"contained argument in Group".format(
"warn_multiple_tokens_in_named_alternation",
name,
type(self).__name__,
),
stacklevel=3,
)
return super()._setResultsName(name, listAllMatches)
class Each(ParseExpression):
"""Requires all given :class:`ParseExpression` s to be found, but in
any order. Expressions may be separated by whitespace.
May be constructed using the ``'&'`` operator.
Example::
color = one_of("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
shape_type = one_of("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
integer = Word(nums)
shape_attr = "shape:" + shape_type("shape")
posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
color_attr = "color:" + color("color")
size_attr = "size:" + integer("size")
# use Each (using operator '&') to accept attributes in any order
# (shape and posn are required, color and size are optional)
shape_spec = shape_attr & posn_attr & Opt(color_attr) & Opt(size_attr)
shape_spec.run_tests('''
shape: SQUARE color: BLACK posn: 100, 120
shape: CIRCLE size: 50 color: BLUE posn: 50,80
color:GREEN size:20 shape:TRIANGLE posn:20,40
'''
)
prints::
shape: SQUARE color: BLACK posn: 100, 120
['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
- color: BLACK
- posn: ['100', ',', '120']
- x: 100
- y: 120
- shape: SQUARE
shape: CIRCLE size: 50 color: BLUE posn: 50,80
['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
- color: BLUE
- posn: ['50', ',', '80']
- x: 50
- y: 80
- shape: CIRCLE
- size: 50
color: GREEN size: 20 shape: TRIANGLE posn: 20,40
['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
- color: GREEN
- posn: ['20', ',', '40']
- x: 20
- y: 40
- shape: TRIANGLE
- size: 20
"""
def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = True):
super().__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
self.skipWhitespace = True
self.initExprGroups = True
self.saveAsList = True
def streamline(self) -> ParserElement:
super().streamline()
if self.exprs:
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
return self
def parseImpl(self, instring, loc, doActions=True):
if self.initExprGroups:
self.opt1map = dict(
(id(e.expr), e) for e in self.exprs if isinstance(e, Opt)
)
opt1 = [e.expr for e in self.exprs if isinstance(e, Opt)]
opt2 = [
e
for e in self.exprs
if e.mayReturnEmpty and not isinstance(e, (Opt, Regex, ZeroOrMore))
]
self.optionals = opt1 + opt2
self.multioptionals = [
e.expr.set_results_name(e.resultsName, list_all_matches=True)
for e in self.exprs
if isinstance(e, _MultipleMatch)
]
self.multirequired = [
e.expr.set_results_name(e.resultsName, list_all_matches=True)
for e in self.exprs
if isinstance(e, OneOrMore)
]
self.required = [
e for e in self.exprs if not isinstance(e, (Opt, ZeroOrMore, OneOrMore))
]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
multis = self.multioptionals[:]
matchOrder = []
keepMatching = True
failed = []
fatals = []
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + multis
failed.clear()
fatals.clear()
for e in tmpExprs:
try:
tmpLoc = e.try_parse(instring, tmpLoc, raise_fatal=True)
except ParseFatalException as pfe:
pfe.__traceback__ = None
pfe.parserElement = e
fatals.append(pfe)
failed.append(e)
except ParseException:
failed.append(e)
else:
matchOrder.append(self.opt1map.get(id(e), e))
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
# look for any ParseFatalExceptions
if fatals:
if len(fatals) > 1:
fatals.sort(key=lambda e: -e.loc)
if fatals[0].loc == fatals[1].loc:
fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement))))
max_fatal = fatals[0]
raise max_fatal
if tmpReqd:
missing = ", ".join([str(e) for e in tmpReqd])
raise ParseException(
instring,
loc,
"Missing one or more required elements ({})".format(missing),
)
# add any unmatched Opts, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e, Opt) and e.expr in tmpOpt]
total_results = ParseResults([])
for e in matchOrder:
loc, results = e._parse(instring, loc, doActions)
total_results += results
return loc, total_results
def _generateDefaultName(self):
return "{" + " & ".join(str(e) for e in self.exprs) + "}"
class ParseElementEnhance(ParserElement):
"""Abstract subclass of :class:`ParserElement`, for combining and
post-processing parsed tokens.
"""
def __init__(self, expr: Union[ParserElement, str], savelist: bool = False):
super().__init__(savelist)
if isinstance(expr, str_type):
if issubclass(self._literalStringClass, Token):
expr = self._literalStringClass(expr)
elif issubclass(type(self), self._literalStringClass):
expr = Literal(expr)
else:
expr = self._literalStringClass(Literal(expr))
self.expr = expr
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.set_whitespace_chars(
expr.whiteChars, copy_defaults=expr.copyDefaultWhiteChars
)
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def recurse(self) -> Sequence[ParserElement]:
return [self.expr] if self.expr is not None else []
def parseImpl(self, instring, loc, doActions=True):
if self.expr is not None:
return self.expr._parse(instring, loc, doActions, callPreParse=False)
else:
raise ParseException(instring, loc, "No expression defined", self)
def leave_whitespace(self, recursive: bool = True) -> ParserElement:
super().leave_whitespace(recursive)
if recursive:
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leave_whitespace(recursive)
return self
def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
super().ignore_whitespace(recursive)
if recursive:
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.ignore_whitespace(recursive)
return self
def ignore(self, other) -> ParserElement:
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
super().ignore(other)
if self.expr is not None:
self.expr.ignore(self.ignoreExprs[-1])
else:
super().ignore(other)
if self.expr is not None:
self.expr.ignore(self.ignoreExprs[-1])
return self
def streamline(self) -> ParserElement:
super().streamline()
if self.expr is not None:
self.expr.streamline()
return self
def _checkRecursion(self, parseElementList):
if self in parseElementList:
raise RecursiveGrammarException(parseElementList + [self])
subRecCheckList = parseElementList[:] + [self]
if self.expr is not None:
self.expr._checkRecursion(subRecCheckList)
def validate(self, validateTrace=None) -> None:
if validateTrace is None:
validateTrace = []
tmp = validateTrace[:] + [self]
if self.expr is not None:
self.expr.validate(tmp)
self._checkRecursion([])
def _generateDefaultName(self):
return "{}:({})".format(self.__class__.__name__, str(self.expr))
ignoreWhitespace = ignore_whitespace
leaveWhitespace = leave_whitespace
class IndentedBlock(ParseElementEnhance):
"""
Expression to match one or more expressions at a given indentation level.
Useful for parsing text where structure is implied by indentation (like Python source code).
"""
class _Indent(Empty):
def __init__(self, ref_col: int):
super().__init__()
self.errmsg = "expected indent at column {}".format(ref_col)
self.add_condition(lambda s, l, t: col(l, s) == ref_col)
class _IndentGreater(Empty):
def __init__(self, ref_col: int):
super().__init__()
self.errmsg = "expected indent at column greater than {}".format(ref_col)
self.add_condition(lambda s, l, t: col(l, s) > ref_col)
def __init__(
self, expr: ParserElement, *, recursive: bool = False, grouped: bool = True
):
super().__init__(expr, savelist=True)
# if recursive:
# raise NotImplementedError("IndentedBlock with recursive is not implemented")
self._recursive = recursive
self._grouped = grouped
self.parent_anchor = 1
def parseImpl(self, instring, loc, doActions=True):
# advance parse position to non-whitespace by using an Empty()
# this should be the column to be used for all subsequent indented lines
anchor_loc = Empty().preParse(instring, loc)
# see if self.expr matches at the current location - if not it will raise an exception
# and no further work is necessary
self.expr.try_parse(instring, anchor_loc, doActions)
indent_col = col(anchor_loc, instring)
peer_detect_expr = self._Indent(indent_col)
inner_expr = Empty() + peer_detect_expr + self.expr
if self._recursive:
sub_indent = self._IndentGreater(indent_col)
nested_block = IndentedBlock(
self.expr, recursive=self._recursive, grouped=self._grouped
)
nested_block.set_debug(self.debug)
nested_block.parent_anchor = indent_col
inner_expr += Opt(sub_indent + nested_block)
inner_expr.set_name(f"inner {hex(id(inner_expr))[-4:].upper()}@{indent_col}")
block = OneOrMore(inner_expr)
trailing_undent = self._Indent(self.parent_anchor) | StringEnd()
if self._grouped:
wrapper = Group
else:
wrapper = lambda expr: expr
return (wrapper(block) + Optional(trailing_undent)).parseImpl(
instring, anchor_loc, doActions
)
class AtStringStart(ParseElementEnhance):
"""Matches if expression matches at the beginning of the parse
string::
AtStringStart(Word(nums)).parse_string("123")
# prints ["123"]
AtStringStart(Word(nums)).parse_string(" 123")
# raises ParseException
"""
def __init__(self, expr: Union[ParserElement, str]):
super().__init__(expr)
self.callPreparse = False
def parseImpl(self, instring, loc, doActions=True):
if loc != 0:
raise ParseException(instring, loc, "not found at string start")
return super().parseImpl(instring, loc, doActions)
class AtLineStart(ParseElementEnhance):
r"""Matches if an expression matches at the beginning of a line within
the parse string
Example::
test = '''\
AAA this line
AAA and this line
AAA but not this one
B AAA and definitely not this one
'''
for t in (AtLineStart('AAA') + restOfLine).search_string(test):
print(t)
prints::
['AAA', ' this line']
['AAA', ' and this line']
"""
def __init__(self, expr: Union[ParserElement, str]):
super().__init__(expr)
self.callPreparse = False
def parseImpl(self, instring, loc, doActions=True):
if col(loc, instring) != 1:
raise ParseException(instring, loc, "not found at line start")
return super().parseImpl(instring, loc, doActions)
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression.
``FollowedBy`` does *not* advance the parsing position within
the input string, it only verifies that the specified parse
expression matches at the current position. ``FollowedBy``
always returns a null token list. If any results names are defined
in the lookahead expression, those *will* be returned for access by
name.
Example::
# use FollowedBy to match a label only if it is followed by a ':'
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
attr_expr[1, ...].parse_string("shape: SQUARE color: BLACK posn: upper left").pprint()
prints::
[['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
"""
def __init__(self, expr: Union[ParserElement, str]):
super().__init__(expr)
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
# by using self._expr.parse and deleting the contents of the returned ParseResults list
# we keep any named results that were defined in the FollowedBy expression
_, ret = self.expr._parse(instring, loc, doActions=doActions)
del ret[:]
return loc, ret
class PrecededBy(ParseElementEnhance):
"""Lookbehind matching of the given parse expression.
``PrecededBy`` does not advance the parsing position within the
input string, it only verifies that the specified parse expression
matches prior to the current position. ``PrecededBy`` always
returns a null token list, but if a results name is defined on the
given expression, it is returned.
Parameters:
- expr - expression that must match prior to the current parse
location
- retreat - (default= ``None``) - (int) maximum number of characters
to lookbehind prior to the current parse location
If the lookbehind expression is a string, :class:`Literal`,
:class:`Keyword`, or a :class:`Word` or :class:`CharsNotIn`
with a specified exact or maximum length, then the retreat
parameter is not required. Otherwise, retreat must be specified to
give a maximum number of characters to look back from
the current parse position for a lookbehind match.
Example::
# VB-style variable names with type prefixes
int_var = PrecededBy("#") + pyparsing_common.identifier
str_var = PrecededBy("$") + pyparsing_common.identifier
"""
def __init__(
self, expr: Union[ParserElement, str], retreat: typing.Optional[int] = None
):
super().__init__(expr)
self.expr = self.expr().leave_whitespace()
self.mayReturnEmpty = True
self.mayIndexError = False
self.exact = False
if isinstance(expr, str_type):
retreat = len(expr)
self.exact = True
elif isinstance(expr, (Literal, Keyword)):
retreat = expr.matchLen
self.exact = True
elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT:
retreat = expr.maxLen
self.exact = True
elif isinstance(expr, PositionToken):
retreat = 0
self.exact = True
self.retreat = retreat
self.errmsg = "not preceded by " + str(expr)
self.skipWhitespace = False
self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None)))
def parseImpl(self, instring, loc=0, doActions=True):
if self.exact:
if loc < self.retreat:
raise ParseException(instring, loc, self.errmsg)
start = loc - self.retreat
_, ret = self.expr._parse(instring, start)
else:
# retreat specified a maximum lookbehind window, iterate
test_expr = self.expr + StringEnd()
instring_slice = instring[max(0, loc - self.retreat) : loc]
last_expr = ParseException(instring, loc, self.errmsg)
for offset in range(1, min(loc, self.retreat + 1) + 1):
try:
# print('trying', offset, instring_slice, repr(instring_slice[loc - offset:]))
_, ret = test_expr._parse(
instring_slice, len(instring_slice) - offset
)
except ParseBaseException as pbe:
last_expr = pbe
else:
break
else:
raise last_expr
return loc, ret
class Located(ParseElementEnhance):
"""
Decorates a returned token with its starting and ending
locations in the input string.
This helper adds the following results names:
- ``locn_start`` - location where matched expression begins
- ``locn_end`` - location where matched expression ends
- ``value`` - the actual parsed results
Be careful if the input text contains ``<TAB>`` characters, you
may want to call :class:`ParserElement.parse_with_tabs`
Example::
wd = Word(alphas)
for match in Located(wd).search_string("ljsdf123lksdjjf123lkkjj1222"):
print(match)
prints::
[0, ['ljsdf'], 5]
[8, ['lksdjjf'], 15]
[18, ['lkkjj'], 23]
"""
def parseImpl(self, instring, loc, doActions=True):
start = loc
loc, tokens = self.expr._parse(instring, start, doActions, callPreParse=False)
ret_tokens = ParseResults([start, tokens, loc])
ret_tokens["locn_start"] = start
ret_tokens["value"] = tokens
ret_tokens["locn_end"] = loc
if self.resultsName:
# must return as a list, so that the name will be attached to the complete group
return loc, [ret_tokens]
else:
return loc, ret_tokens
class NotAny(ParseElementEnhance):
"""
Lookahead to disallow matching with the given parse expression.
``NotAny`` does *not* advance the parsing position within the
input string, it only verifies that the specified parse expression
does *not* match at the current position. Also, ``NotAny`` does
*not* skip over leading whitespace. ``NotAny`` always returns
a null token list. May be constructed using the ``'~'`` operator.
Example::
AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split())
# take care not to mistake keywords for identifiers
ident = ~(AND | OR | NOT) + Word(alphas)
boolean_term = Opt(NOT) + ident
# very crude boolean expression - to support parenthesis groups and
# operation hierarchy, use infix_notation
boolean_expr = boolean_term + ((AND | OR) + boolean_term)[...]
# integers that are followed by "." are actually floats
integer = Word(nums) + ~Char(".")
"""
def __init__(self, expr: Union[ParserElement, str]):
super().__init__(expr)
# do NOT use self.leave_whitespace(), don't want to propagate to exprs
# self.leave_whitespace()
self.skipWhitespace = False
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, " + str(self.expr)
def parseImpl(self, instring, loc, doActions=True):
if self.expr.can_parse_next(instring, loc):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
def _generateDefaultName(self):
return "~{" + str(self.expr) + "}"
class _MultipleMatch(ParseElementEnhance):
def __init__(
self,
expr: ParserElement,
stop_on: typing.Optional[Union[ParserElement, str]] = None,
*,
stopOn: typing.Optional[Union[ParserElement, str]] = None,
):
super().__init__(expr)
stopOn = stopOn or stop_on
self.saveAsList = True
ender = stopOn
if isinstance(ender, str_type):
ender = self._literalStringClass(ender)
self.stopOn(ender)
def stopOn(self, ender) -> ParserElement:
if isinstance(ender, str_type):
ender = self._literalStringClass(ender)
self.not_ender = ~ender if ender is not None else None
return self
def parseImpl(self, instring, loc, doActions=True):
self_expr_parse = self.expr._parse
self_skip_ignorables = self._skipIgnorables
check_ender = self.not_ender is not None
if check_ender:
try_not_ender = self.not_ender.tryParse
# must be at least one (but first see if we are the stopOn sentinel;
# if so, fail)
if check_ender:
try_not_ender(instring, loc)
loc, tokens = self_expr_parse(instring, loc, doActions)
try:
hasIgnoreExprs = not not self.ignoreExprs
while 1:
if check_ender:
try_not_ender(instring, loc)
if hasIgnoreExprs:
preloc = self_skip_ignorables(instring, loc)
else:
preloc = loc
loc, tmptokens = self_expr_parse(instring, preloc, doActions)
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException, IndexError):
pass
return loc, tokens
def _setResultsName(self, name, listAllMatches=False):
if (
__diag__.warn_ungrouped_named_tokens_in_collection
and Diagnostics.warn_ungrouped_named_tokens_in_collection
not in self.suppress_warnings_
):
for e in [self.expr] + self.expr.recurse():
if (
isinstance(e, ParserElement)
and e.resultsName
and Diagnostics.warn_ungrouped_named_tokens_in_collection
not in e.suppress_warnings_
):
warnings.warn(
"{}: setting results name {!r} on {} expression "
"collides with {!r} on contained expression".format(
"warn_ungrouped_named_tokens_in_collection",
name,
type(self).__name__,
e.resultsName,
),
stacklevel=3,
)
return super()._setResultsName(name, listAllMatches)
class OneOrMore(_MultipleMatch):
"""
Repetition of one or more of the given expression.
Parameters:
- expr - expression that must match one or more times
- stop_on - (default= ``None``) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).set_parse_action(' '.join))
text = "shape: SQUARE posn: upper left color: BLACK"
attr_expr[1, ...].parse_string(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
# use stop_on attribute for OneOrMore to avoid reading label string as part of the data
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
OneOrMore(attr_expr).parse_string(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
# could also be written as
(attr_expr * (1,)).parse_string(text).pprint()
"""
def _generateDefaultName(self):
return "{" + str(self.expr) + "}..."
class ZeroOrMore(_MultipleMatch):
"""
Optional repetition of zero or more of the given expression.
Parameters:
- ``expr`` - expression that must match zero or more times
- ``stop_on`` - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression) - (default= ``None``)
Example: similar to :class:`OneOrMore`
"""
def __init__(
self,
expr: ParserElement,
stop_on: typing.Optional[Union[ParserElement, str]] = None,
*,
stopOn: typing.Optional[Union[ParserElement, str]] = None,
):
super().__init__(expr, stopOn=stopOn or stop_on)
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
try:
return super().parseImpl(instring, loc, doActions)
except (ParseException, IndexError):
return loc, ParseResults([], name=self.resultsName)
def _generateDefaultName(self):
return "[" + str(self.expr) + "]..."
class _NullToken:
def __bool__(self):
return False
def __str__(self):
return ""
class Opt(ParseElementEnhance):
"""
Optional matching of the given expression.
Parameters:
- ``expr`` - expression that must match zero or more times
- ``default`` (optional) - value to be returned if the optional expression is not found.
Example::
# US postal code can be a 5-digit zip, plus optional 4-digit qualifier
zip = Combine(Word(nums, exact=5) + Opt('-' + Word(nums, exact=4)))
zip.run_tests('''
# traditional ZIP code
12345
# ZIP+4 form
12101-0001
# invalid ZIP
98765-
''')
prints::
# traditional ZIP code
12345
['12345']
# ZIP+4 form
12101-0001
['12101-0001']
# invalid ZIP
98765-
^
FAIL: Expected end of text (at char 5), (line:1, col:6)
"""
__optionalNotMatched = _NullToken()
def __init__(
self, expr: Union[ParserElement, str], default: Any = __optionalNotMatched
):
super().__init__(expr, savelist=False)
self.saveAsList = self.expr.saveAsList
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
self_expr = self.expr
try:
loc, tokens = self_expr._parse(instring, loc, doActions, callPreParse=False)
except (ParseException, IndexError):
default_value = self.defaultValue
if default_value is not self.__optionalNotMatched:
if self_expr.resultsName:
tokens = ParseResults([default_value])
tokens[self_expr.resultsName] = default_value
else:
tokens = [default_value]
else:
tokens = []
return loc, tokens
def _generateDefaultName(self):
inner = str(self.expr)
# strip off redundant inner {}'s
while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}":
inner = inner[1:-1]
return "[" + inner + "]"
Optional = Opt
class SkipTo(ParseElementEnhance):
"""
Token for skipping over all undefined text until the matched
expression is found.
Parameters:
- ``expr`` - target expression marking the end of the data to be skipped
- ``include`` - if ``True``, the target expression is also parsed
(the skipped text and target expression are returned as a 2-element
list) (default= ``False``).
- ``ignore`` - (default= ``None``) used to define grammars (typically quoted strings and
comments) that might contain false matches to the target expression
- ``fail_on`` - (default= ``None``) define expressions that are not allowed to be
included in the skipped test; if found before the target expression is found,
the :class:`SkipTo` is not a match
Example::
report = '''
Outstanding Issues Report - 1 Jan 2000
# | Severity | Description | Days Open
-----+----------+-------------------------------------------+-----------
101 | Critical | Intermittent system crash | 6
94 | Cosmetic | Spelling error on Login ('log|n') | 14
79 | Minor | System slow when running too many reports | 47
'''
integer = Word(nums)
SEP = Suppress('|')
# use SkipTo to simply match everything up until the next SEP
# - ignore quoted strings, so that a '|' character inside a quoted string does not match
# - parse action will call token.strip() for each matched token, i.e., the description body
string_data = SkipTo(SEP, ignore=quoted_string)
string_data.set_parse_action(token_map(str.strip))
ticket_expr = (integer("issue_num") + SEP
+ string_data("sev") + SEP
+ string_data("desc") + SEP
+ integer("days_open"))
for tkt in ticket_expr.search_string(report):
print tkt.dump()
prints::
['101', 'Critical', 'Intermittent system crash', '6']
- days_open: '6'
- desc: 'Intermittent system crash'
- issue_num: '101'
- sev: 'Critical'
['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
- days_open: '14'
- desc: "Spelling error on Login ('log|n')"
- issue_num: '94'
- sev: 'Cosmetic'
['79', 'Minor', 'System slow when running too many reports', '47']
- days_open: '47'
- desc: 'System slow when running too many reports'
- issue_num: '79'
- sev: 'Minor'
"""
def __init__(
self,
other: Union[ParserElement, str],
include: bool = False,
ignore: bool = None,
fail_on: typing.Optional[Union[ParserElement, str]] = None,
*,
failOn: Union[ParserElement, str] = None,
):
super().__init__(other)
failOn = failOn or fail_on
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.saveAsList = False
if isinstance(failOn, str_type):
self.failOn = self._literalStringClass(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for " + str(self.expr)
def parseImpl(self, instring, loc, doActions=True):
startloc = loc
instrlen = len(instring)
self_expr_parse = self.expr._parse
self_failOn_canParseNext = (
self.failOn.canParseNext if self.failOn is not None else None
)
self_ignoreExpr_tryParse = (
self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
)
tmploc = loc
while tmploc <= instrlen:
if self_failOn_canParseNext is not None:
# break if failOn expression matches
if self_failOn_canParseNext(instring, tmploc):
break
if self_ignoreExpr_tryParse is not None:
# advance past ignore expressions
while 1:
try:
tmploc = self_ignoreExpr_tryParse(instring, tmploc)
except ParseBaseException:
break
try:
self_expr_parse(instring, tmploc, doActions=False, callPreParse=False)
except (ParseException, IndexError):
# no match, advance loc in string
tmploc += 1
else:
# matched skipto expr, done
break
else:
# ran off the end of the input string without matching skipto expr, fail
raise ParseException(instring, loc, self.errmsg, self)
# build up return values
loc = tmploc
skiptext = instring[startloc:loc]
skipresult = ParseResults(skiptext)
if self.includeMatch:
loc, mat = self_expr_parse(instring, loc, doActions, callPreParse=False)
skipresult += mat
return loc, skipresult
class Forward(ParseElementEnhance):
"""
Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the ``Forward``
variable using the ``'<<'`` operator.
Note: take care when assigning to ``Forward`` not to overlook
precedence of operators.
Specifically, ``'|'`` has a lower precedence than ``'<<'``, so that::
fwd_expr << a | b | c
will actually be evaluated as::
(fwd_expr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the ``Forward``::
fwd_expr << (a | b | c)
Converting to use the ``'<<='`` operator instead will avoid this problem.
See :class:`ParseResults.pprint` for an example of a recursive
parser created using ``Forward``.
"""
def __init__(self, other: typing.Optional[Union[ParserElement, str]] = None):
self.caller_frame = traceback.extract_stack(limit=2)[0]
super().__init__(other, savelist=False)
self.lshift_line = None
def __lshift__(self, other):
if hasattr(self, "caller_frame"):
del self.caller_frame
if isinstance(other, str_type):
other = self._literalStringClass(other)
self.expr = other
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.set_whitespace_chars(
self.expr.whiteChars, copy_defaults=self.expr.copyDefaultWhiteChars
)
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
self.lshift_line = traceback.extract_stack(limit=2)[-2]
return self
def __ilshift__(self, other):
return self << other
def __or__(self, other):
caller_line = traceback.extract_stack(limit=2)[-2]
if (
__diag__.warn_on_match_first_with_lshift_operator
and caller_line == self.lshift_line
and Diagnostics.warn_on_match_first_with_lshift_operator
not in self.suppress_warnings_
):
warnings.warn(
"using '<<' operator with '|' is probably an error, use '<<='",
stacklevel=2,
)
ret = super().__or__(other)
return ret
def __del__(self):
# see if we are getting dropped because of '=' reassignment of var instead of '<<=' or '<<'
if (
self.expr is None
and __diag__.warn_on_assignment_to_Forward
and Diagnostics.warn_on_assignment_to_Forward not in self.suppress_warnings_
):
warnings.warn_explicit(
"Forward defined here but no expression attached later using '<<=' or '<<'",
UserWarning,
filename=self.caller_frame.filename,
lineno=self.caller_frame.lineno,
)
def parseImpl(self, instring, loc, doActions=True):
if (
self.expr is None
and __diag__.warn_on_parse_using_empty_Forward
and Diagnostics.warn_on_parse_using_empty_Forward
not in self.suppress_warnings_
):
# walk stack until parse_string, scan_string, search_string, or transform_string is found
parse_fns = [
"parse_string",
"scan_string",
"search_string",
"transform_string",
]
tb = traceback.extract_stack(limit=200)
for i, frm in enumerate(reversed(tb), start=1):
if frm.name in parse_fns:
stacklevel = i + 1
break
else:
stacklevel = 2
warnings.warn(
"Forward expression was never assigned a value, will not parse any input",
stacklevel=stacklevel,
)
if not ParserElement._left_recursion_enabled:
return super().parseImpl(instring, loc, doActions)
# ## Bounded Recursion algorithm ##
# Recursion only needs to be processed at ``Forward`` elements, since they are
# the only ones that can actually refer to themselves. The general idea is
# to handle recursion stepwise: We start at no recursion, then recurse once,
# recurse twice, ..., until more recursion offers no benefit (we hit the bound).
#
# The "trick" here is that each ``Forward`` gets evaluated in two contexts
# - to *match* a specific recursion level, and
# - to *search* the bounded recursion level
# and the two run concurrently. The *search* must *match* each recursion level
# to find the best possible match. This is handled by a memo table, which
# provides the previous match to the next level match attempt.
#
# See also "Left Recursion in Parsing Expression Grammars", Medeiros et al.
#
# There is a complication since we not only *parse* but also *transform* via
# actions: We do not want to run the actions too often while expanding. Thus,
# we expand using `doActions=False` and only run `doActions=True` if the next
# recursion level is acceptable.
with ParserElement.recursion_lock:
memo = ParserElement.recursion_memos
try:
# we are parsing at a specific recursion expansion - use it as-is
prev_loc, prev_result = memo[loc, self, doActions]
if isinstance(prev_result, Exception):
raise prev_result
return prev_loc, prev_result.copy()
except KeyError:
act_key = (loc, self, True)
peek_key = (loc, self, False)
# we are searching for the best recursion expansion - keep on improving
# both `doActions` cases must be tracked separately here!
prev_loc, prev_peek = memo[peek_key] = (
loc - 1,
ParseException(
instring, loc, "Forward recursion without base case", self
),
)
if doActions:
memo[act_key] = memo[peek_key]
while True:
try:
new_loc, new_peek = super().parseImpl(instring, loc, False)
except ParseException:
# we failed before getting any match – do not hide the error
if isinstance(prev_peek, Exception):
raise
new_loc, new_peek = prev_loc, prev_peek
# the match did not get better: we are done
if new_loc <= prev_loc:
if doActions:
# replace the match for doActions=False as well,
# in case the action did backtrack
prev_loc, prev_result = memo[peek_key] = memo[act_key]
del memo[peek_key], memo[act_key]
return prev_loc, prev_result.copy()
del memo[peek_key]
return prev_loc, prev_peek.copy()
# the match did get better: see if we can improve further
else:
if doActions:
try:
memo[act_key] = super().parseImpl(instring, loc, True)
except ParseException as e:
memo[peek_key] = memo[act_key] = (new_loc, e)
raise
prev_loc, prev_peek = memo[peek_key] = new_loc, new_peek
def leave_whitespace(self, recursive: bool = True) -> ParserElement:
self.skipWhitespace = False
return self
def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
self.skipWhitespace = True
return self
def streamline(self) -> ParserElement:
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate(self, validateTrace=None) -> None:
if validateTrace is None:
validateTrace = []
if self not in validateTrace:
tmp = validateTrace[:] + [self]
if self.expr is not None:
self.expr.validate(tmp)
self._checkRecursion([])
def _generateDefaultName(self):
# Avoid infinite recursion by setting a temporary _defaultName
self._defaultName = ": ..."
# Use the string representation of main expression.
retString = "..."
try:
if self.expr is not None:
retString = str(self.expr)[:1000]
else:
retString = "None"
finally:
return self.__class__.__name__ + ": " + retString
def copy(self) -> ParserElement:
if self.expr is not None:
return super().copy()
else:
ret = Forward()
ret <<= self
return ret
def _setResultsName(self, name, list_all_matches=False):
if (
__diag__.warn_name_set_on_empty_Forward
and Diagnostics.warn_name_set_on_empty_Forward
not in self.suppress_warnings_
):
if self.expr is None:
warnings.warn(
"{}: setting results name {!r} on {} expression "
"that has no contained expression".format(
"warn_name_set_on_empty_Forward", name, type(self).__name__
),
stacklevel=3,
)
return super()._setResultsName(name, list_all_matches)
ignoreWhitespace = ignore_whitespace
leaveWhitespace = leave_whitespace
class TokenConverter(ParseElementEnhance):
"""
Abstract subclass of :class:`ParseExpression`, for converting parsed results.
"""
def __init__(self, expr: Union[ParserElement, str], savelist=False):
super().__init__(expr) # , savelist)
self.saveAsList = False
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the
input string; this can be disabled by specifying
``'adjacent=False'`` in the constructor.
Example::
real = Word(nums) + '.' + Word(nums)
print(real.parse_string('3.1416')) # -> ['3', '.', '1416']
# will also erroneously match the following
print(real.parse_string('3. 1416')) # -> ['3', '.', '1416']
real = Combine(Word(nums) + '.' + Word(nums))
print(real.parse_string('3.1416')) # -> ['3.1416']
# no match when there are internal spaces
print(real.parse_string('3. 1416')) # -> Exception: Expected W:(0123...)
"""
def __init__(
self,
expr: ParserElement,
join_string: str = "",
adjacent: bool = True,
*,
joinString: typing.Optional[str] = None,
):
super().__init__(expr)
joinString = joinString if joinString is not None else join_string
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leave_whitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore(self, other) -> ParserElement:
if self.adjacent:
ParserElement.ignore(self, other)
else:
super().ignore(other)
return self
def postParse(self, instring, loc, tokenlist):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults(
["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults
)
if self.resultsName and retToks.haskeys():
return [retToks]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for
returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions.
The optional ``aslist`` argument when set to True will return the
parsed tokens as a Python list instead of a pyparsing ParseResults.
Example::
ident = Word(alphas)
num = Word(nums)
term = ident | num
func = ident + Opt(delimited_list(term))
print(func.parse_string("fn a, b, 100"))
# -> ['fn', 'a', 'b', '100']
func = ident + Group(Opt(delimited_list(term)))
print(func.parse_string("fn a, b, 100"))
# -> ['fn', ['a', 'b', '100']]
"""
def __init__(self, expr: ParserElement, aslist: bool = False):
super().__init__(expr)
self.saveAsList = True
self._asPythonList = aslist
def postParse(self, instring, loc, tokenlist):
if self._asPythonList:
return ParseResults.List(
tokenlist.asList()
if isinstance(tokenlist, ParseResults)
else list(tokenlist)
)
else:
return [tokenlist]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also
as a dictionary. Each element can also be referenced using the first
token in the expression as its key. Useful for tabular report
scraping when the first column can be used as a item key.
The optional ``asdict`` argument when set to True will return the
parsed tokens as a Python dict instead of a pyparsing ParseResults.
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
# print attributes as plain groups
print(attr_expr[1, ...].parse_string(text).dump())
# instead of OneOrMore(expr), parse using Dict(Group(expr)[1, ...]) - Dict will auto-assign names
result = Dict(Group(attr_expr)[1, ...]).parse_string(text)
print(result.dump())
# access named fields as dict entries, or output as dict
print(result['shape'])
print(result.as_dict())
prints::
['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: 'light blue'
- posn: 'upper left'
- shape: 'SQUARE'
- texture: 'burlap'
SQUARE
{'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
See more examples at :class:`ParseResults` of accessing fields by results name.
"""
def __init__(self, expr: ParserElement, asdict: bool = False):
super().__init__(expr)
self.saveAsList = True
self._asPythonDict = asdict
def postParse(self, instring, loc, tokenlist):
for i, tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey, int):
ikey = str(ikey).strip()
if len(tok) == 1:
tokenlist[ikey] = _ParseResultsWithOffset("", i)
elif len(tok) == 2 and not isinstance(tok[1], ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i)
else:
try:
dictvalue = tok.copy() # ParseResults(i)
except Exception:
exc = TypeError(
"could not extract dict values from parsed results"
" - Dict expression must contain Grouped expressions"
)
raise exc from None
del dictvalue[0]
if len(dictvalue) != 1 or (
isinstance(dictvalue, ParseResults) and dictvalue.haskeys()
):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i)
if self._asPythonDict:
return [tokenlist.as_dict()] if self.resultsName else tokenlist.as_dict()
else:
return [tokenlist] if self.resultsName else tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression.
Example::
source = "a, b, c,d"
wd = Word(alphas)
wd_list1 = wd + (',' + wd)[...]
print(wd_list1.parse_string(source))
# often, delimiters that are useful during parsing are just in the
# way afterward - use Suppress to keep them out of the parsed output
wd_list2 = wd + (Suppress(',') + wd)[...]
print(wd_list2.parse_string(source))
# Skipped text (using '...') can be suppressed as well
source = "lead in START relevant text END trailing text"
start_marker = Keyword("START")
end_marker = Keyword("END")
find_body = Suppress(...) + start_marker + ... + end_marker
print(find_body.parse_string(source)
prints::
['a', ',', 'b', ',', 'c', ',', 'd']
['a', 'b', 'c', 'd']
['START', 'relevant text ', 'END']
(See also :class:`delimited_list`.)
"""
def __init__(self, expr: Union[ParserElement, str], savelist: bool = False):
if expr is ...:
expr = _PendingSkip(NoMatch())
super().__init__(expr)
def __add__(self, other) -> "ParserElement":
if isinstance(self.expr, _PendingSkip):
return Suppress(SkipTo(other)) + other
else:
return super().__add__(other)
def __sub__(self, other) -> "ParserElement":
if isinstance(self.expr, _PendingSkip):
return Suppress(SkipTo(other)) - other
else:
return super().__sub__(other)
def postParse(self, instring, loc, tokenlist):
return []
def suppress(self) -> ParserElement:
return self
def trace_parse_action(f: ParseAction) -> ParseAction:
"""Decorator for debugging parse actions.
When the parse action is called, this decorator will print
``">> entering method-name(line:<current_source_line>, <parse_location>, <matched_tokens>)"``.
When the parse action completes, the decorator will print
``"<<"`` followed by the returned value, or any exception that the parse action raised.
Example::
wd = Word(alphas)
@trace_parse_action
def remove_duplicate_chars(tokens):
return ''.join(sorted(set(''.join(tokens))))
wds = wd[1, ...].set_parse_action(remove_duplicate_chars)
print(wds.parse_string("slkdjs sld sldd sdlf sdljf"))
prints::
>>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
<<leaving remove_duplicate_chars (ret: 'dfjkls')
['dfjkls']
"""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.__name__
s, l, t = paArgs[-3:]
if len(paArgs) > 3:
thisFunc = paArgs[0].__class__.__name__ + "." + thisFunc
sys.stderr.write(
">>entering {}(line: {!r}, {}, {!r})\n".format(thisFunc, line(l, s), l, t)
)
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write("<<leaving {} (exception: {})\n".format(thisFunc, exc))
raise
sys.stderr.write("<<leaving {} (ret: {!r})\n".format(thisFunc, ret))
return ret
z.__name__ = f.__name__
return z
# convenience constants for positional expressions
empty = Empty().set_name("empty")
line_start = LineStart().set_name("line_start")
line_end = LineEnd().set_name("line_end")
string_start = StringStart().set_name("string_start")
string_end = StringEnd().set_name("string_end")
_escapedPunc = Word(_bslash, r"\[]-*.$+^?()~ ", exact=2).set_parse_action(
lambda s, l, t: t[0][1]
)
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").set_parse_action(
lambda s, l, t: chr(int(t[0].lstrip(r"\0x"), 16))
)
_escapedOctChar = Regex(r"\\0[0-7]+").set_parse_action(
lambda s, l, t: chr(int(t[0][1:], 8))
)
_singleChar = (
_escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r"\]", exact=1)
)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = (
Literal("[")
+ Opt("^").set_results_name("negate")
+ Group(OneOrMore(_charRange | _singleChar)).set_results_name("body")
+ "]"
)
def srange(s: str) -> str:
r"""Helper to easily define string ranges for use in :class:`Word`
construction. Borrows syntax from regexp ``'[]'`` string range
definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string
is the expanded character set joined into a single string. The
values enclosed in the []'s may be:
- a single character
- an escaped character with a leading backslash (such as ``\-``
or ``\]``)
- an escaped hex character with a leading ``'\x'``
(``\x21``, which is a ``'!'`` character) (``\0x##``
is also supported for backwards compatibility)
- an escaped octal character with a leading ``'\0'``
(``\041``, which is a ``'!'`` character)
- a range of any of the above, separated by a dash (``'a-z'``,
etc.)
- any combination of the above (``'aeiouy'``,
``'a-zA-Z0-9_$'``, etc.)
"""
_expanded = (
lambda p: p
if not isinstance(p, ParseResults)
else "".join(chr(c) for c in range(ord(p[0]), ord(p[1]) + 1))
)
try:
return "".join(_expanded(part) for part in _reBracketExpr.parse_string(s).body)
except Exception:
return ""
def token_map(func, *args) -> ParseAction:
"""Helper to define a parse action by mapping a function to all
elements of a :class:`ParseResults` list. If any additional args are passed,
they are forwarded to the given function as additional arguments
after the token, as in
``hex_integer = Word(hexnums).set_parse_action(token_map(int, 16))``,
which will convert the parsed data to an integer using base 16.
Example (compare the last to example in :class:`ParserElement.transform_string`::
hex_ints = Word(hexnums)[1, ...].set_parse_action(token_map(int, 16))
hex_ints.run_tests('''
00 11 22 aa FF 0a 0d 1a
''')
upperword = Word(alphas).set_parse_action(token_map(str.upper))
upperword[1, ...].run_tests('''
my kingdom for a horse
''')
wd = Word(alphas).set_parse_action(token_map(str.title))
wd[1, ...].set_parse_action(' '.join).run_tests('''
now is the winter of our discontent made glorious summer by this sun of york
''')
prints::
00 11 22 aa FF 0a 0d 1a
[0, 17, 34, 170, 255, 10, 13, 26]
my kingdom for a horse
['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
now is the winter of our discontent made glorious summer by this sun of york
['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
"""
def pa(s, l, t):
return [func(tokn, *args) for tokn in t]
func_name = getattr(func, "__name__", getattr(func, "__class__").__name__)
pa.__name__ = func_name
return pa
def autoname_elements() -> None:
"""
Utility to simplify mass-naming of parser elements, for
generating railroad diagram with named subdiagrams.
"""
for name, var in sys._getframe().f_back.f_locals.items():
if isinstance(var, ParserElement) and not var.customName:
var.set_name(name)
dbl_quoted_string = Combine(
Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
).set_name("string enclosed in double quotes")
sgl_quoted_string = Combine(
Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'"
).set_name("string enclosed in single quotes")
quoted_string = Combine(
Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
| Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'"
).set_name("quotedString using single or double quotes")
unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal")
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
# build list of built-in expressions, for future reference if a global default value
# gets updated
_builtin_exprs: List[ParserElement] = [
v for v in vars().values() if isinstance(v, ParserElement)
]
# backward compatibility names
tokenMap = token_map
conditionAsParseAction = condition_as_parse_action
nullDebugAction = null_debug_action
sglQuotedString = sgl_quoted_string
dblQuotedString = dbl_quoted_string
quotedString = quoted_string
unicodeString = unicode_string
lineStart = line_start
lineEnd = line_end
stringStart = string_start
stringEnd = string_end
traceParseAction = trace_parse_action
| 36.682373
| 174
| 0.57822
|
e0bdfff5218bdfdc4516efbb92e45ba51f8b08b9
| 3,284
|
py
|
Python
|
kubernetes_asyncio/client/models/v1_limit_range_spec.py
|
PidgeyBE/kubernetes_asyncio
|
14d15dc309890253c26b6274a022e84441e05217
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v1_limit_range_spec.py
|
PidgeyBE/kubernetes_asyncio
|
14d15dc309890253c26b6274a022e84441e05217
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v1_limit_range_spec.py
|
PidgeyBE/kubernetes_asyncio
|
14d15dc309890253c26b6274a022e84441e05217
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.13.5
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V1LimitRangeSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'limits': 'list[V1LimitRangeItem]'
}
attribute_map = {
'limits': 'limits'
}
def __init__(self, limits=None): # noqa: E501
"""V1LimitRangeSpec - a model defined in OpenAPI""" # noqa: E501
self._limits = None
self.discriminator = None
self.limits = limits
@property
def limits(self):
"""Gets the limits of this V1LimitRangeSpec. # noqa: E501
Limits is the list of LimitRangeItem objects that are enforced. # noqa: E501
:return: The limits of this V1LimitRangeSpec. # noqa: E501
:rtype: list[V1LimitRangeItem]
"""
return self._limits
@limits.setter
def limits(self, limits):
"""Sets the limits of this V1LimitRangeSpec.
Limits is the list of LimitRangeItem objects that are enforced. # noqa: E501
:param limits: The limits of this V1LimitRangeSpec. # noqa: E501
:type: list[V1LimitRangeItem]
"""
if limits is None:
raise ValueError("Invalid value for `limits`, must not be `None`") # noqa: E501
self._limits = limits
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1LimitRangeSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.310345
| 124
| 0.573386
|
a33dc0d41d298998ece9029ad250f45a6040eed2
| 3,163
|
py
|
Python
|
torch_sparse/__init__.py
|
KonstantinKlepikov/pytorch_sparse
|
468aea5b8a94a037659adb632900ad90f680be34
|
[
"MIT"
] | 1
|
2020-06-29T19:15:59.000Z
|
2020-06-29T19:15:59.000Z
|
torch_sparse/__init__.py
|
KonstantinKlepikov/pytorch_sparse
|
468aea5b8a94a037659adb632900ad90f680be34
|
[
"MIT"
] | null | null | null |
torch_sparse/__init__.py
|
KonstantinKlepikov/pytorch_sparse
|
468aea5b8a94a037659adb632900ad90f680be34
|
[
"MIT"
] | null | null | null |
import importlib
import os.path as osp
import torch
__version__ = '0.6.5'
for library in [
'_version', '_convert', '_diag', '_spmm', '_spspmm', '_metis', '_rw',
'_saint', '_sample'
]:
torch.ops.load_library(importlib.machinery.PathFinder().find_spec(
library, [osp.dirname(__file__)]).origin)
if torch.version.cuda is not None: # pragma: no cover
cuda_version = torch.ops.torch_sparse.cuda_version()
if cuda_version == -1:
major = minor = 0
elif cuda_version < 10000:
major, minor = int(str(cuda_version)[0]), int(str(cuda_version)[2])
else:
major, minor = int(str(cuda_version)[0:2]), int(str(cuda_version)[3])
t_major, t_minor = [int(x) for x in torch.version.cuda.split('.')]
if t_major != major or t_minor != minor:
raise RuntimeError(
f'Detected that PyTorch and torch_sparse were compiled with '
f'different CUDA versions. PyTorch has CUDA version '
f'{t_major}.{t_minor} and torch_sparse has CUDA version '
f'{major}.{minor}. Please reinstall the torch_sparse that '
f'matches your PyTorch install.')
from .storage import SparseStorage # noqa
from .tensor import SparseTensor # noqa
from .transpose import t # noqa
from .narrow import narrow, __narrow_diag__ # noqa
from .select import select # noqa
from .index_select import index_select, index_select_nnz # noqa
from .masked_select import masked_select, masked_select_nnz # noqa
from .permute import permute # noqa
from .diag import remove_diag, set_diag, fill_diag # noqa
from .add import add, add_, add_nnz, add_nnz_ # noqa
from .mul import mul, mul_, mul_nnz, mul_nnz_ # noqa
from .reduce import sum, mean, min, max # noqa
from .matmul import matmul # noqa
from .cat import cat, cat_diag # noqa
from .rw import random_walk # noqa
from .metis import partition # noqa
from .bandwidth import reverse_cuthill_mckee # noqa
from .saint import saint_subgraph # noqa
from .padding import padded_index, padded_index_select # noqa
from .sample import sample, sample_adj # noqa
from .convert import to_torch_sparse, from_torch_sparse # noqa
from .convert import to_scipy, from_scipy # noqa
from .coalesce import coalesce # noqa
from .transpose import transpose # noqa
from .eye import eye # noqa
from .spmm import spmm # noqa
from .spspmm import spspmm # noqa
__all__ = [
'SparseStorage',
'SparseTensor',
't',
'narrow',
'__narrow_diag__',
'select',
'index_select',
'index_select_nnz',
'masked_select',
'masked_select_nnz',
'permute',
'remove_diag',
'set_diag',
'fill_diag',
'add',
'add_',
'add_nnz',
'add_nnz_',
'mul',
'mul_',
'mul_nnz',
'mul_nnz_',
'sum',
'mean',
'min',
'max',
'matmul',
'cat',
'cat_diag',
'random_walk',
'partition',
'reverse_cuthill_mckee',
'saint_subgraph',
'padded_index',
'padded_index_select',
'to_torch_sparse',
'from_torch_sparse',
'to_scipy',
'from_scipy',
'coalesce',
'transpose',
'eye',
'spmm',
'spspmm',
'__version__',
]
| 28.754545
| 77
| 0.662662
|
a32716ef7c3226e6e3a4b90e6da889524ff3add3
| 2,175
|
py
|
Python
|
magnetics/processBp123T_GhkFix.py
|
golfit/work-archive
|
bdd37d46fda3fde15ec2164d3335d6b4ed576bd7
|
[
"MIT"
] | null | null | null |
magnetics/processBp123T_GhkFix.py
|
golfit/work-archive
|
bdd37d46fda3fde15ec2164d3335d6b4ed576bd7
|
[
"MIT"
] | null | null | null |
magnetics/processBp123T_GhkFix.py
|
golfit/work-archive
|
bdd37d46fda3fde15ec2164d3335d6b4ed576bd7
|
[
"MIT"
] | null | null | null |
'''
Created on August 12, 2014
based on processAllFix.py, which, itself, was used to process timebase fix.
Correct ordering mapping of Mirnov coils, BP123T_GHK, to correct digitizer inputs. Correct mapping at patch panel was lost after coil leads were all ripped during repair during up-to-air prior to 2014 campaign. See ChangeBP123CoilInputs12Aug2014.py
@author: golfit
'''
from myTools import getShotRange
import datetime
import sys
sList=getShotRange()
print("Total shot range={0:d}-{1:d}".format(sList[0],sList[-1]))
currentDate=datetime.datetime.now()
startDate=datetime.datetime(2014,8,30,0,0,0) #Start on Saturday, 30 August 2014.
maxShotsPerDay=150
runDays=[5,6] #Days of the week for which code runs, 0=Monday, 6=Sunday
numDaysToFinish=int(len(sList)/maxShotsPerDay) #Cast as int for python3
numDaysElapsed=0
#Count number of run days elapsed
for dayNum in range(0,(currentDate-startDate).days):
numDaysElapsed+= any([(startDate+datetime.timedelta(dayNum)).weekday()==day for day in runDays])
#Where to start in the list of shots.
startIndex=numDaysElapsed*maxShotsPerDay
endIndex=min(startIndex+maxShotsPerDay-1,len(sList)-1)
if( startIndex>=0 and startIndex<(len(sList)) ) :
#Run fix - modify sys.argv arguments to make tests work, since these parse sys.argv for shot range
sys.argv[1]=sList[startIndex]
sys.argv[2]=sList[endIndex]
subList=getShotRange()
print("Fixing shots {0:d}-{1:d}".format(subList[0], subList[-1]))
execfile("home/golfit/python/versionControlled/trunk/magnetics/ChangeBP123CoilInputs12Aug2014.py")
#Print fixed shot range to log.
myLogFile=open('Bp123t_ghk2014ChangeLog.txt','a')
myLogFile.write('{0:s} -> Fixed {1:d}-{2:d}'.format('currentDate',subList[0],subList[-1]))
myLogFile.close()
else :
print("Can't index into shot list - we may be done with this job already")
print("Start date was {0:s}".format(str(startDate)))
print("Current date is {0:s}".format(str(currentDate)))
print("Difference is {0:s}".format(str(currentDate-startDate)))
print("Shots processed per day={0:d}".format(maxShotsPerDay))
print("Exiting program.")
| 36.25
| 251
| 0.730115
|
b319e7bb6e077cd8799b82949f240e37870cdba9
| 5,218
|
py
|
Python
|
colossalai/nn/lr_scheduler/onecycle.py
|
RichardoLuo/ColossalAI
|
797a9dc5a9e801d7499b8667c3ef039a38aa15ba
|
[
"Apache-2.0"
] | 1,630
|
2021-10-30T01:00:27.000Z
|
2022-03-31T23:02:41.000Z
|
colossalai/nn/lr_scheduler/onecycle.py
|
RichardoLuo/ColossalAI
|
797a9dc5a9e801d7499b8667c3ef039a38aa15ba
|
[
"Apache-2.0"
] | 166
|
2021-10-30T01:03:01.000Z
|
2022-03-31T14:19:07.000Z
|
colossalai/nn/lr_scheduler/onecycle.py
|
RichardoLuo/ColossalAI
|
797a9dc5a9e801d7499b8667c3ef039a38aa15ba
|
[
"Apache-2.0"
] | 253
|
2021-10-30T06:10:29.000Z
|
2022-03-31T13:30:06.000Z
|
from torch.optim.lr_scheduler import OneCycleLR as _OneCycleLR
from colossalai.registry import LR_SCHEDULERS
@LR_SCHEDULERS.register_module
class OneCycleLR(_OneCycleLR):
r"""Sets the learning rate of each parameter group according to the
1cycle learning rate policy. The 1cycle policy anneals the learning
rate from an initial learning rate to some maximum learning rate and then
from that maximum learning rate to some minimum learning rate much lower
than the initial learning rate.
This policy was initially described in the paper `Super-Convergence:
Very Fast Training of Neural Networks Using Large Learning Rates`_.
The 1cycle learning rate policy changes the learning rate after every batch.
`step` should be called after a batch has been used for training.
This scheduler is not chainable.
Note also that the total number of steps in the cycle can be determined in one
of two ways (listed in order of precedence):
* A value for total_steps is explicitly provided.
* A number of epochs (epochs) and a number of steps per epoch (steps_per_epoch) are provided.
In this case, the number of total steps is inferred by total_steps = epochs * steps_per_epoch
You must either provide a value for total_steps or provide a value for both
epochs and steps_per_epoch.
The default behaviour of this scheduler follows the fastai implementation of 1cycle, which
claims that "unpublished work has shown even better results by using only two phases". To
mimic the behaviour of the original paper instead, set ``three_phase=True``.
Args:
optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
total_steps (int): Number of total training steps.
pct_start (float, optional):
The percentage of the cycle (in number of steps) spent increasing the learning rate, defaults to 0.3.
anneal_strategy (str, optional): {'cos', 'linear'}, Specifies the annealing strategy:
"cos" for cosine annealing, "linear" for linear annealing, defaults to 'cos'.
cycle_momentum (bool, optional): If ``True``, momentum is cycled inversely
to learning rate between 'base_momentum' and 'max_momentum', defaults to True.
base_momentum (float, optional): Lower momentum boundaries in the cycle for each parameter group.
Note that momentum is cycled inversely to learning rate; at the peak of a cycle, momentum is
'base_momentum' and learning rate is 'max_lr', defaults to 0.85.
max_momentum (float, optional): Upper momentum boundaries in the cycle for each parameter group.
Functionally, it defines the cycle amplitude (max_momentum - base_momentum).
Note that momentum is cycled inversely to learning rate; at the start of a cycle, momentum is 'max_momentum'
and learning rate is 'base_lr', defaults to 0.95.
div_factor (float, optional): Determines the initial learning rate via
initial_lr = max_lr/div_factor, defaults to 25.0.
final_div_factor (float, optional): Determines the minimum learning rate via
min_lr = initial_lr/final_div_factor, defaults to 10000.0.
last_epoch (int, optional): The index of the last batch. This parameter is used when resuming a training job.
Since `step()` should be invoked after each batch instead of after each epoch, this number represents
the total number of *batches* computed, not the total number of epochs computed.
When last_epoch=-1, the schedule is started from the beginning, defaults to -1
The ``kwargs`` for initializing torch.optim.lr_scheduler.OneCycleLR should include parameters below:
::
epochs (int, optional, default=None)
steps_per_epoch (int, optional, default=None)
three_phase (bool, optional, default=False)
verbose (bool, optional, default=False)
More details about kwargs could be found in
`OneCycleLR <https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.OneCycleLR.html#torch.optim.lr_scheduler.OneCycleLR>`_.
.. _Super-Convergence\: Very Fast Training of Neural Networks Using Large Learning Rates:
https://arxiv.org/abs/1708.07120
"""
def __init__(self, optimizer, total_steps: int,
pct_start=0.3,
anneal_strategy='cos',
cycle_momentum=True,
base_momentum=0.85,
max_momentum=0.95,
div_factor=25.0,
final_div_factor=10000.0,
last_epoch=-1, **kwargs):
max_lrs = list(map(lambda group: group['lr'], optimizer.param_groups))
super().__init__(optimizer, max_lrs, total_steps=total_steps,
pct_start=pct_start,
anneal_strategy=anneal_strategy,
cycle_momentum=cycle_momentum,
base_momentum=base_momentum,
max_momentum=max_momentum,
div_factor=div_factor,
final_div_factor=final_div_factor,
last_epoch=last_epoch)
| 57.977778
| 139
| 0.68647
|
31df6692b9c7d7aac22142f21469bcdc6cf30cce
| 1,939
|
py
|
Python
|
tests/utils/stacks/__init__.py
|
lavr/apm-agent-python
|
a67d57b991931d66b871668156e917bd3be466c9
|
[
"BSD-3-Clause"
] | 350
|
2017-08-17T12:27:08.000Z
|
2022-03-30T10:01:33.000Z
|
tests/utils/stacks/__init__.py
|
lavr/apm-agent-python
|
a67d57b991931d66b871668156e917bd3be466c9
|
[
"BSD-3-Clause"
] | 1,115
|
2017-08-17T15:30:35.000Z
|
2022-03-31T16:02:52.000Z
|
tests/utils/stacks/__init__.py
|
lavr/apm-agent-python
|
a67d57b991931d66b871668156e917bd3be466c9
|
[
"BSD-3-Clause"
] | 180
|
2017-08-17T12:26:53.000Z
|
2022-03-25T09:25:37.000Z
|
# BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import inspect
def get_me_a_test_frame():
a_local_var = 42
return inspect.currentframe()
def get_me_more_test_frames(count, func=None):
if count <= 1:
if func:
func()
return [inspect.currentframe()]
else:
return get_me_more_test_frames(count=count - 1, func=func) + [inspect.currentframe()]
| 42.152174
| 93
| 0.750903
|
1cdb1e3c28c9f76716b00a73a76a763621098945
| 1,370
|
py
|
Python
|
elastic_agent_setup/platform/platform.py
|
MSAdministrator/elastic-agent-setup
|
5cb8202aaed281f73706556a86657f5525495b56
|
[
"MIT"
] | 1
|
2022-02-04T16:59:53.000Z
|
2022-02-04T16:59:53.000Z
|
elastic_agent_setup/platform/platform.py
|
MSAdministrator/elastic-agent-setup
|
5cb8202aaed281f73706556a86657f5525495b56
|
[
"MIT"
] | 1
|
2021-07-21T11:30:13.000Z
|
2021-11-09T14:30:10.000Z
|
elastic_agent_setup/platform/platform.py
|
MSAdministrator/elastic-agent-setup
|
5cb8202aaed281f73706556a86657f5525495b56
|
[
"MIT"
] | null | null | null |
import abc
import os
import subprocess
from ..settings import Settings
class Platform:
def __init__(self):
if not os.path.exists(Settings.download_path):
from ..download import Download
Download().run()
@abc.abstractmethod
def extract(self):
raise NotImplemented
@abc.abstractmethod
def install_certificate(self):
raise NotImplemented
def run(self, subcommand='install'):
self.extract()
if Settings.certificate_authority:
self.install_certificate()
if Settings.platform == 'Linux' or Settings.platform == 'Darwin':
split_on = '.tar.gz'
elif Settings.platform == 'Windows':
split_on = '.zip'
command = Settings.agent_command_string.format(
dir=os.path.join('/', Settings.download_endpoint.rsplit(split_on,1)[0]),
subcommand=subcommand,
force=Settings.force_enroll if Settings.force_enroll else '',
kibana=Settings.kibana,
token=Settings.enrollment_token,
certificate_authorities=f'--certificate-authorities="{Settings.certificate_authority}"' if Settings.certificate_authority else '',
insecure='--insecure' if Settings.verify_ssl else ''
)
return subprocess.run(command, shell=True, check=True, capture_output=True)
| 34.25
| 142
| 0.650365
|
ff14d062964ca96cc48126d1f00c472815e3b32b
| 18,876
|
py
|
Python
|
safe_il/agents/pets/pets_utils.py
|
Justin-Yuan/safe_imitation
|
7528e9649f9921ee70a1386bd0c00b1e462717fd
|
[
"MIT"
] | null | null | null |
safe_il/agents/pets/pets_utils.py
|
Justin-Yuan/safe_imitation
|
7528e9649f9921ee70a1386bd0c00b1e462717fd
|
[
"MIT"
] | null | null | null |
safe_il/agents/pets/pets_utils.py
|
Justin-Yuan/safe_imitation
|
7528e9649f9921ee70a1386bd0c00b1e462717fd
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
from functools import partial
import numpy as np
import torch
from torch import nn as nn
from torch.nn import functional as F
import scipy.stats as stats
from safe_il.utils import random_sample
from safe_il.agents.mpc.mpc_utils import CEMOptimizer
# -----------------------------------------------------------------------------------
# Agent
# -----------------------------------------------------------------------------------
class PETSAgent:
"""Encapsulates ensemble model and sampling-based MPC."""
def __init__(self,
obs_space,
act_space,
env_cost_func,
hidden_dim=500,
ensemble_size=5,
weight_decays=[],
lr=0.001,
epochs=5,
batch_size=256,
act_opt_freq=1,
horizon=25,
num_particles=20,
cem_args={},
**kwargs):
# params
self.obs_space = obs_space
self.act_space = act_space
# NOTE: should match the reward/cost func from `env.step(...)`
self.env_cost_func = env_cost_func
self.ensemble_size = ensemble_size
self.epochs = epochs
self.batch_size = batch_size
# NOTE: determines how often the action sequence will be optimized
# NOTE: reoptimizes at every call to `act(...)`
self.act_opt_freq = act_opt_freq
self.horizon = horizon
self.num_particles = num_particles
assert num_particles % ensemble_size == 0, "Number of particles must be a multiple of the ensemble size."
self.particles_per_ensem = num_particles // ensemble_size
# model
self.model = EnsembleModel(ensemble_size,
in_features=obs_space.shape[0] + act_space.shape[0],
out_features=obs_space.shape[0] * 2,
hidden_size=hidden_dim,
num_layers=len(weight_decays),
weight_decays=weight_decays)
self.device = "cpu"
# optimizer (model)
self.model_opt = torch.optim.Adam(self.model.parameters(), lr)
# planner
self.dO = obs_space.shape[0]
self.dU = act_space.shape[0]
self.ac_ub = act_space.high
self.ac_lb = act_space.low
# optimizer (planner)
self.planner_opt = CEMOptimizer(self.horizon * self.dU,
lower_bound=np.tile(self.ac_lb, [self.horizon]),
upper_bound=np.tile(self.ac_ub, [self.horizon]),
**cem_args)
def to(self, device):
"""Puts agent to device."""
self.model.to(device)
self.device = device
def train(self):
"""Sets training mode."""
self.model.train()
def eval(self):
"""Sets evaluation mode."""
self.model.eval()
def state_dict(self):
"""Snapshots agent state."""
return {"model": self.model.state_dict(), "model_opt": self.model_opt.state_dict()}
def load_state_dict(self, state_dict):
"""Restores agent state."""
self.model.load_state_dict(state_dict["model"])
self.model_opt.load_state_dict(state_dict["model_opt"])
def update(self, rollouts, device="cpu"):
"""Performs a training step on ensemble model."""
resutls = defaultdict(list)
num_batch = rollouts.num_steps // self.batch_size
# initial buffer size can be smaller than batch_size
num_batch = max(num_batch, 1)
# get normalization heuristics
train_inputs, _ = rollouts.get(to_torch=False)
self.model.fit_input_stats(train_inputs)
# inner training loop
for epoch in range(self.epochs):
reg_loss_epoch, nll_loss_epoch, mse_loss_epoch = 0, 0, 0
sampler = rollouts.sampler(self.batch_size, num_nets=self.ensemble_size, device=device, drop_last=False)
for train_in, train_targ in sampler:
# each has shape (N, B, *)
# regularization loss
loss = 0.01 * (self.model.max_logvar.sum() - self.model.min_logvar.sum())
reg_loss = self.model.compute_decays()
loss += reg_loss
reg_loss_epoch += reg_loss.item()
# dynamics (nll) loss
mean, logvar = self.model(train_in.float(), ret_logvar=True)
inv_var = torch.exp(-logvar)
state_loss = ((mean - train_targ)**2) * inv_var + logvar
state_loss = state_loss.mean(-1).mean(-1).sum()
loss += state_loss
nll_loss_epoch += state_loss.item()
# mse loss (on predicted mean)
# not used for learning, only to monitor model accuracy
mse_loss = (mean - train_targ)**2
mse_loss = mse_loss.detach().mean(-1).mean(-1).sum()
mse_loss_epoch += mse_loss.item()
# perform update
self.model_opt.zero_grad()
loss.backward()
self.model_opt.step()
# `num_batch` is off by a little with sampler `drop_last=False`
resutls["reg_loss"].append(reg_loss_epoch / num_batch)
resutls["nll_loss"].append(nll_loss_epoch / num_batch)
resutls["mse_loss"].append(mse_loss_epoch / num_batch)
resutls = {k: sum(v) / len(v) for k, v in resutls.items()}
return resutls
def reset(self):
"""Resets this controller (at trajecotry start)."""
self.ac_buf = np.array([]).reshape(0, self.dU)
self.prev_sol = np.tile((self.ac_lb + self.ac_ub) / 2, [self.horizon])
self.init_var = np.tile(np.square(self.ac_ub - self.ac_lb) / 16, [self.horizon])
self.planner_opt.reset()
def act(self, obs, t, info):
"""Selects action based on learned dynamics and mpc planning.
Constructs the cost function for the current step, which is
different between steps due to different current obs, also
passes other necessary arguments `info` for `env_cost_func`.
"""
cost_func = partial(self.cost_func, obs=obs, info=info)
action = self._solve_mpc(cost_func)
return action
def _solve_mpc(self, cost_func):
"""Solves the MPC optimization problem for action sequence."""
if self.ac_buf.shape[0] > 0:
action, self.ac_buf = self.ac_buf[0], self.ac_buf[1:]
return action
soln = self.planner_opt.obtain_solution(self.prev_sol, self.init_var, cost_func)
# for next call of `act(...)`
# previous soln is everything after currently taken action
self.prev_sol = np.concatenate([np.copy(soln)[self.act_opt_freq * self.dU:], np.zeros(self.act_opt_freq * self.dU)])
# current soln, can take 1st step as action
# saves `act_opt_freq` steps to reduce solving mpc every step
self.ac_buf = soln[:self.act_opt_freq * self.dU].reshape(-1, self.dU)
return self._solve_mpc(cost_func)
@torch.no_grad()
def cost_func(self, ac_seqs, obs=None, info=None):
"""MPC rollout cost.
Args:
ac_seqs (np.array): decision vars, (pop_size, horizon * act_dim) actions.
obs (np.array): conditional vars, (O,) current observation.
info (dict): conditional vars, current info from env.
Returns:
np.array: (pop_size,) costs
"""
pop_size = ac_seqs.shape[0]
# For parallel compute, (Pop_size, H*A) -> (H, Pop_size * Num_par, A)
ac_seqs = torch.from_numpy(ac_seqs).float()
# (H, Pop_size, A)
ac_seqs = ac_seqs.view(-1, self.horizon, self.dU).transpose(0, 1)
# (H, Pop_size, Num_par, A)
ac_seqs = ac_seqs.unsqueeze(2).expand(-1, -1, self.num_particles, -1)
# (H, Pop_size * Num_par, A)
ac_seqs = ac_seqs.contiguous().view(self.horizon, -1, self.dU)
# current observation, (O,) -> (Pop_size * Num_par, O)
cur_obs = torch.from_numpy(obs).float()
cur_obs = cur_obs.unsqueeze(0).repeat((pop_size * self.num_particles, 1))
costs = torch.zeros(pop_size, self.num_particles)
for t in range(self.horizon):
cur_acs = ac_seqs[t]
# maybe model forward in GPU but mpc planning in CPU
# (Pop_size * Num_par, O) + (Pop_size * Num_par, A) -> (Pop_size * Num_par, O)
next_obs = self.predict_next_obs(cur_obs, cur_acs)
next_obs = next_obs.cpu()
cur_obs = next_obs
# shape (*,)
cost = self.env_cost_func(next_obs, cur_acs, info)
# (Pop_size * Num_par,) -> (Pop_size, Num_par)
cost = cost.view(-1, self.num_particles)
costs += cost
# replace nan with high cost
costs[costs != costs] = 1e6
mean_cost = costs.mean(dim=1)
# (Pop_size,)
return mean_cost.detach().cpu().numpy()
def predict_next_obs(self, obs, acs):
"""Get next state from current dynamics model.
Args:
obs (torch.FloatTensor): (*, O)
acs (torch.FloatTensor): (*, A)
Returns:
torch.FloatTensor: (*, O) next state
"""
proc_obs = self._reshape_model_input(obs)
acs = self._reshape_model_input(acs)
inputs = torch.cat((proc_obs, acs), dim=-1).to(self.device)
with torch.no_grad():
mean, var = self.model(inputs)
# sample next obs
predictions = mean + torch.randn_like(mean).to(self.device) * var.sqrt()
# TS Optimization: Remove additional dimension
predictions = self._reshape_model_output(predictions)
next_obs = obs.to(self.device) + predictions
return next_obs
def _reshape_model_input(self, x):
"""Converts (Pop_size*Num_par, O) -> (N, *, O)."""
dim = x.shape[-1]
new_x = x.reshape(-1, self.ensemble_size, self.particles_per_ensem, dim)
new_x = new_x.transpose(0, 1).reshape(self.ensemble_size, -1, dim)
return new_x
def _reshape_model_output(self, x):
"""Converts (N, *, O) -> (Pop_size*Num_par, O)."""
dim = x.shape[-1]
new_x = x.reshape(self.ensemble_size, -1, self.particles_per_ensem, dim)
new_x = x.transpose(0, 1).reshape(-1, dim)
return new_x
# -----------------------------------------------------------------------------------
# Model
# -----------------------------------------------------------------------------------
class EnsembleModel(nn.Module):
"""Model for a PETS agent."""
def __init__(self, ensemble_size, in_features, out_features, hidden_size, num_layers, weight_decays):
super().__init__()
self.ensemble_size = ensemble_size
self.in_features = in_features
self.out_features = out_features
self.num_layers = num_layers
self.weight_decays = weight_decays
self.linear_layers = nn.ParameterList()
# input layer
self.linear_layers.extend(get_affine_params(ensemble_size, in_features, hidden_size))
# hidden layers
for i in range(num_layers - 2):
self.linear_layers.extend(get_affine_params(ensemble_size, hidden_size, hidden_size))
# output layer
self.linear_layers.extend(get_affine_params(ensemble_size, hidden_size, out_features))
# input normalization
self.inputs_mu = nn.Parameter(torch.zeros(1, in_features), requires_grad=False)
self.inputs_sigma = nn.Parameter(torch.zeros(1, in_features), requires_grad=False)
# output variance bound
self.max_logvar = nn.Parameter(torch.ones(1, out_features // 2, dtype=torch.float32) / 2.0)
self.min_logvar = nn.Parameter(-torch.ones(1, out_features // 2, dtype=torch.float32) * 10.0)
def compute_decays(self):
"""Gets L2 regularization loss, only for weights (W) not bias (b)."""
decay = 0
for layer, weight_decay in zip(self.linear_layers[::2], self.weight_decays):
decay += weight_decay * (layer**2).sum() / 2.0
return decay
def fit_input_stats(self, data):
"""Gets 1st, 2nd moments from data."""
mu = np.mean(data, axis=0, keepdims=True)
sigma = np.std(data, axis=0, keepdims=True)
sigma[sigma < 1e-12] = 1.0
self.inputs_mu.data = torch.from_numpy(mu).to(self.inputs_mu.device).float()
self.inputs_sigma.data = torch.from_numpy(sigma).to(self.inputs_sigma.device).float()
def forward(self, inputs, ret_logvar=False):
"""Gets ensemble predictions
Args:
inputs (torch.FloatTensor): shape (N,B,I).
ret_logvar (bool): if to return log-variance or variance.
Returns:
3 torch.FloatTensor: (N,B,O) predicted mean, (log-)variance and catastrophe signal.
"""
# Transform inputs
# NUM_NETS x BATCH_SIZE X INPUT_LENGTH
# (N,B,I)
inputs = (inputs - self.inputs_mu) / self.inputs_sigma
# (N,B,I) x (N,I,O) -> (N,B,O), (N,B,O1) x (N,O1,O2) -> (N,B,O2)
for i, layer in enumerate(zip(self.linear_layers[::2], self.linear_layers[1::2])):
weight, bias = layer
inputs = inputs.matmul(weight) + bias
if i < self.num_layers - 1:
inputs = swish(inputs)
mean = inputs[:, :, :self.out_features // 2]
logvar = inputs[:, :, self.out_features // 2:]
# bound variance output
logvar = self.max_logvar - F.softplus(self.max_logvar - logvar)
logvar = self.min_logvar + F.softplus(logvar - self.min_logvar)
if ret_logvar:
return mean, logvar
return mean, torch.exp(logvar)
# -----------------------------------------------------------------------------------
# Storage
# -----------------------------------------------------------------------------------
class PETSBuffer(object):
"""Storage for rollouts during training (for dynamics model).
Attributes:
train_inputs (list): rollouts of training inputs, [(T,O+A)].
train_targets (list): rollouts of training targets, [(T,O)].
num_rollouts (int): total number of rollouts.
num_steps (int): total number of steps.
"""
def __init__(self, obs_space, act_space, batch_size=None):
self.batch_size = batch_size
self.reset()
def reset(self):
"""Allocate space for containers."""
self.train_inputs = []
self.train_targets = []
self.num_rollouts = 0
self.num_steps = 0
def __len__(self):
"""Returns current size of the buffer."""
return self.num_steps
def state_dict(self):
"""Packages buffer data."""
return {key: getattr(self, key) for key in ["train_inputs", "train_targets", "num_rollouts", "num_steps"]}
def load_state_dict(self, data):
"""Restores past buffer data."""
for key, val in data.items():
setattr(self, key, val)
def push(self, input_batch, target_batch):
"""Inserts transition step data (as dict) to storage.
Args:
input_batch (list): rollouts of inputs, [(T,O+A)]
target_batch (list): rollouts of targets, [(T,O)]
"""
self.train_inputs.extend(input_batch)
self.train_targets.extend(target_batch)
self.num_rollouts += len(input_batch)
self.num_steps += sum([int(traj.shape[0]) for traj in input_batch])
def get(self, to_torch=False, device="cpu"):
"""Returns all current data."""
train_inputs = np.concatenate(self.train_inputs, 0)
train_targets = np.concatenate(self.train_targets, 0)
# convert to torch tensors if needed
if to_torch:
train_inputs = torch.as_tensor(train_inputs, device=device)
train_targets = torch.as_tensor(train_targets, device=device)
return train_inputs, train_targets
def sampler(self, batch_size, num_nets=1, device="cpu", drop_last=False):
"""Makes sampler to loop through all data for ensemble model.
Assumes batch_size B, num_nets N, feature size *,
Each output is (N, B, *) for ensemble models.
"""
total_steps = len(self)
samplers = [random_sample(np.arange(total_steps), batch_size, drop_last) for _ in range(num_nets)]
train_inputs = np.concatenate(self.train_inputs, 0)
train_targets = np.concatenate(self.train_targets, 0)
for indices_list in zip(*samplers):
inputs = torch.as_tensor([train_inputs[indices] for indices in indices_list], device=device)
targets = torch.as_tensor([train_targets[indices] for indices in indices_list], device=device)
yield inputs, targets
# -----------------------------------------------------------------------------------
# Misc
# -----------------------------------------------------------------------------------
def swish(x):
return x * torch.sigmoid(x)
def truncated_normal(size, mean=0, std=1):
"""Truncated normal for pytorch.
Reference https://discuss.pytorch.org/t/implementing-truncated-normal-initializer/4778/19
"""
tensor = torch.zeros(size)
tmp = tensor.new_empty(size + (4,)).normal_()
valid = (tmp < 2) & (tmp > -2)
ind = valid.max(-1, keepdim=True)[1]
tensor.data.copy_(tmp.gather(-1, ind).squeeze(-1))
tensor.data.mul_(std).add_(mean)
return tensor
def get_affine_params(ensemble_size, in_features, out_features):
"""Gets weight and bias parameters for ensemble linear layer."""
w = truncated_normal(size=(ensemble_size, in_features, out_features), std=1.0 / (2.0 * np.sqrt(in_features)))
w = nn.Parameter(w)
b = nn.Parameter(torch.zeros(ensemble_size, 1, out_features, dtype=torch.float32))
return w, b
def truncated_normal2(size, std):
"""reference: https://github.com/abalakrishna123/recovery-rl/blob/master/config/utils.py
"""
val = stats.truncnorm.rvs(-2, 2, size=size) * std
return torch.tensor(val, dtype=torch.float32)
def get_affine_params2(ensemble_size, in_features, out_features):
"""reference: https://github.com/abalakrishna123/recovery-rl/blob/master/config/utils.py
"""
w = truncated_normal2(size=(ensemble_size, in_features, out_features), std=1.0 / (2.0 * np.sqrt(in_features)))
w = nn.Parameter(w)
b = nn.Parameter(torch.zeros(ensemble_size, 1, out_features, dtype=torch.float32))
return w, b
| 39.655462
| 124
| 0.581532
|
4f0db40b934bb080f6e0cc5bff83aa49d6f28cb0
| 1,022
|
py
|
Python
|
tests/data23/recipe-579126.py
|
JohannesBuchner/pystrict3
|
f442a89ac6a23f4323daed8ef829d8e9e1197f90
|
[
"BSD-2-Clause"
] | 1
|
2020-06-05T08:53:26.000Z
|
2020-06-05T08:53:26.000Z
|
tests/data23/recipe-579126.py
|
JohannesBuchner/pystrict3
|
f442a89ac6a23f4323daed8ef829d8e9e1197f90
|
[
"BSD-2-Clause"
] | 1
|
2020-06-04T13:47:19.000Z
|
2020-06-04T13:47:57.000Z
|
tests/data23/recipe-579126.py
|
JohannesBuchner/pystrict3
|
f442a89ac6a23f4323daed8ef829d8e9e1197f90
|
[
"BSD-2-Clause"
] | 1
|
2020-11-07T17:02:46.000Z
|
2020-11-07T17:02:46.000Z
|
# This code converts an url to pdf in Python using SelectPdf REST API through a POST request.
# The parameters are JSON encoded.
# The content is saved into a file on the disk.
import json
import urllib.request, urllib.error, urllib.parse
api_endpoint = 'http://selectpdf.com/api2/convert/'
key = 'your license key here'
test_url = 'http://selectpdf.com'
local_file = 'test.pdf'
# parameters - add here any needed API parameter
parameters = {
'key': key,
'url': test_url
}
requesturl = api_endpoint
print("Calling {0}\n".format(requesturl))
try:
request = urllib.request.Request(requesturl)
request.add_header('Content-Type', 'application/json')
result = urllib.request.urlopen(request, json.dumps(parameters))
localFile = open(local_file, 'wb')
localFile.write(result.read())
localFile.close()
print("Test pdf document generated successfully!")
except urllib.error.HTTPError as e:
print("HTTP Response Code: {0}\nHTTP Response Message: {1}".format(e.code, e.reason))
except:
print("An error occurred!")
| 30.058824
| 94
| 0.74364
|
8f2084e410016f7fd05709ddecc090993fe94ba2
| 9,129
|
py
|
Python
|
prostate_cancer_segmentation/datasets/lapa.py
|
vpeopleonatank/pytorch_lightning_segmentation_template
|
4637062d9de840714b2914583c51c4694646a6ab
|
[
"Apache-2.0"
] | 1
|
2021-02-23T15:37:22.000Z
|
2021-02-23T15:37:22.000Z
|
prostate_cancer_segmentation/datasets/lapa.py
|
vpeopleonatank/pytorch_lightning_segmentation_template
|
4637062d9de840714b2914583c51c4694646a6ab
|
[
"Apache-2.0"
] | null | null | null |
prostate_cancer_segmentation/datasets/lapa.py
|
vpeopleonatank/pytorch_lightning_segmentation_template
|
4637062d9de840714b2914583c51c4694646a6ab
|
[
"Apache-2.0"
] | null | null | null |
import enum
import random
from pathlib import Path
from typing import List, Optional, Tuple, Union
import albumentations as A
import cv2
import numpy as np
import pytorch_lightning as pl
import torch
import torchvision
from torch.utils.data import DataLoader, Dataset
from prostate_cancer_segmentation.utils.path_check import get_path, PathType
class DatasetSplit(enum.Enum):
TRAIN = 0
VAL = 1
TEST = 2
class LapaDataset(Dataset):
"""The Landmark guided face Parsing dataset (LaPa)
Contains pixel-level annotations for face parsing.
References:
https://github.com/JDAI-CV/lapa-dataset
"""
@enum.unique
class LapaClassId(enum.IntEnum):
# Mapping of the classes within the lapa dataset
BACKGROUND = 0
SKIN = 1
EYEBROW_LEFT = 2
EYEBROW_RIGHT = 3
EYE_LEFT = 4
EYE_RIGHT = 5
NOSE = 6
LIP_UPPER = 7
INNER_MOUTH = 8
LIP_LOWER = 9
HAIR = 10
SUBDIR_IMAGES = "images"
SUBDIR_LABELS = "labels"
SUBDIR_SPLIT = {DatasetSplit.TRAIN: "train", DatasetSplit.VAL: "val", DatasetSplit.TEST: "test"}
def __init__(
self,
root_dir: Union[str, Path],
data_split: DatasetSplit,
image_ext: Tuple[str] = ("*.jpg",),
label_ext: Tuple[str] = ("*.png",),
augmentations: Optional[A.Compose] = None,
):
super().__init__()
self.augmentations = augmentations
self.image_ext = image_ext # The file extensions of input images to search for in input dir
self.label_ext = label_ext # The file extensions of labels to search for in label dir
self.root_dir = self._check_dir(root_dir)
# Get subdirs for images and labels
self.images_dir = self._check_dir(self.root_dir / self.SUBDIR_SPLIT[data_split] / self.SUBDIR_IMAGES)
self.labels_dir = self._check_dir(self.root_dir / self.SUBDIR_SPLIT[data_split] / self.SUBDIR_LABELS)
# Create list of filenames
self._datalist_input = [] # Variable containing list of all input images filenames in dataset
self._datalist_label = [] # Variable containing list of all ground truth filenames in dataset
self._create_lists_filenames()
def __len__(self):
return len(self._datalist_input)
def __getitem__(self, index):
# Read input rgb imgs
image_path = self._datalist_input[index]
img = self._read_image(image_path)
# Read ground truth labels
label_path = self._datalist_label[index]
label = self._read_label(label_path)
# Apply image augmentations
if self.augmentations is not None:
augmented = self.augmentations(image=img, mask=label)
img = augmented["image"]
label = augmented["mask"]
# Convert to Tensor. RGB images are normally numpy uint8 array with shape (H, W, 3).
# RGB tensors should be (3, H, W) with dtype float32 in range [0, 1] (may change with normalization applied)
img_tensor = torchvision.transforms.ToTensor()(img)
label_tensor = torch.from_numpy(label)
# TODO: Return dict
# data = {
# 'image': img_tensor,
# 'label': label_tensor
# }
# return data
return img_tensor, label_tensor.long()
@staticmethod
def _check_dir(dir_path: Union[str, Path]) -> Path:
return get_path(dir_path, must_exist=True, path_type=PathType.DIR)
@staticmethod
def _read_label(label_path: Path) -> np.ndarray:
mask = cv2.imread(str(label_path), cv2.IMREAD_GRAYSCALE | cv2.IMREAD_ANYDEPTH | cv2.IMREAD_ANYCOLOR)
if len(mask.shape) != 2:
raise RuntimeError(f"The shape of label must be (H, W). Got: {mask.shape}")
return mask.astype(np.int32)
@staticmethod
def _read_image(image_path: Path) -> np.ndarray:
mask = cv2.imread(str(image_path), cv2.IMREAD_COLOR)
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB)
if len(mask.shape) != 3:
raise RuntimeError(f"The shape of image must be (H, W, C). Got: {mask.shape}")
return mask
def _create_lists_filenames(self):
"""Creates a list of filenames of images and labels in dataset"""
self._datalist_input = self._get_matching_files_in_dir(self.images_dir, self.image_ext)
self._datalist_label = self._get_matching_files_in_dir(self.labels_dir, self.label_ext)
num_images = len(self._datalist_input)
num_labels = len(self._datalist_label)
if num_images != num_labels:
raise ValueError(
f"The number of images ({num_images}) and labels ({num_labels}) do not match."
f"\n Images dir: {self.images_dir}\n Labels dir:{self.labels_dir}"
)
def _get_matching_files_in_dir(self, data_dir: Union[str, Path], wildcard_patterns: Tuple[str]) -> List[Path]:
"""Get filenames within a dir that match a set of wildcard patterns
Will not search within subdirectories.
Args:
data_dir: Directory to search within
wildcard_patterns: Tuple of wildcard patterns matching required filenames. Eg: ('*.rgb.png', '*.rgb.jpg')
Returns:
list[Path]: List of paths to files found
"""
data_dir = self._check_dir(data_dir)
list_matching_files = []
for ext in wildcard_patterns:
list_matching_files += sorted(data_dir.glob(ext))
if len(list_matching_files) == 0:
raise ValueError(
"No matching files found in given directory."
f"\n Directory: {data_dir}\n Search patterns: {wildcard_patterns}"
)
return list_matching_files
class LaPaDataModule(pl.LightningDataModule):
def __init__(self, data_dir: str, batch_size: int, num_workers: int, resize_h: int, resize_w: int):
super().__init__()
self.batch_size = batch_size
self.num_workers = num_workers
self.data_dir = get_path(data_dir, must_exist=True, path_type=PathType.DIR)
self.resize_h = resize_h
self.resize_w = resize_w
self.lapa_train = None
self.lapa_val = None
self.lapa_test = None
def prepare_data(self):
"""download dataset, tokenize, etc"""
"""
Downloading original data from author's google drive link:
>>> import gdown
>>> url = "https://drive.google.com/uc?export=download&id=1EtyCtiQZt2Y5qrb-0YxRxaVLpVcgCOQV"
>>> output = "lapa-downloaded.tar.gz"
>>> gdown.download(url, output, quiet=False, proxy=False)
"""
pass
def setup(self, stage=None):
# count number of classes, perform train/val/test splits, apply transforms, etc
augs_train = self.get_augs_train()
augs_test = self.get_augs_test()
self.lapa_train = LapaDataset(root_dir=self.data_dir, data_split=DatasetSplit.TRAIN, augmentations=augs_train)
self.lapa_test = LapaDataset(root_dir=self.data_dir, data_split=DatasetSplit.VAL, augmentations=augs_test)
self.lapa_val = LapaDataset(root_dir=self.data_dir, data_split=DatasetSplit.TEST, augmentations=augs_test)
def train_dataloader(self):
train_loader = DataLoader(
self.lapa_train,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=True,
drop_last=True,
worker_init_fn=self._dataloader_worker_init,
)
return train_loader
def val_dataloader(self):
val_loader = DataLoader(
self.lapa_val,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=True,
drop_last=False,
worker_init_fn=self._dataloader_worker_init,
)
return val_loader
def test_dataloader(self):
test_loader = DataLoader(
self.lapa_test,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=True,
drop_last=False,
worker_init_fn=self._dataloader_worker_init,
)
return test_loader
def get_augs_test(self):
augs_test = A.Compose(
[
# Geometric Augs
A.SmallestMaxSize(max_size=self.resize_h, interpolation=0, p=1.0),
A.CenterCrop(height=self.resize_h, width=self.resize_w, p=1.0),
]
)
return augs_test
def get_augs_train(self):
augs_train = A.Compose(
[
# Geometric Augs
A.SmallestMaxSize(max_size=self.resize_h, interpolation=0, p=1.0),
A.CenterCrop(height=self.resize_h, width=self.resize_w, p=1.0),
]
)
return augs_train
@staticmethod
def _dataloader_worker_init(*args):
"""Seeds the workers within the Dataloader"""
worker_seed = torch.initial_seed() % 2 ** 32
np.random.seed(worker_seed)
random.seed(worker_seed)
| 35.111538
| 118
| 0.634133
|
bdeddde8a030d2714ec3601222a43d925f496c00
| 4,679
|
py
|
Python
|
training/sklearn/structured/custom_routines/trainer/utils.py
|
renovate-bot/ai-platform-samples
|
f4ab48a8ff461f6de8f9e7fdc2a389757a1c05ce
|
[
"Apache-2.0"
] | null | null | null |
training/sklearn/structured/custom_routines/trainer/utils.py
|
renovate-bot/ai-platform-samples
|
f4ab48a8ff461f6de8f9e7fdc2a389757a1c05ce
|
[
"Apache-2.0"
] | 13
|
2022-01-04T22:18:52.000Z
|
2022-03-15T01:36:15.000Z
|
training/sklearn/structured/custom_routines/trainer/utils.py
|
renovate-bot/ai-platform-samples
|
f4ab48a8ff461f6de8f9e7fdc2a389757a1c05ce
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hold utility functions."""
import os
import tensorflow as tf
import pandas as pd
from sklearn import model_selection as ms
from sklearn.externals import joblib
from trainer import metadata
def data_train_test_split(data_df):
"""Split the DataFrame two subsets for training and testing.
Args:
data_df: (pandas.DataFrame) DataFrame the splitting to be performed on
Returns:
A Tuple of (pandas.DataFrame, pandas.Series,
pandas.DataFrame, pandas.Series)
"""
if metadata.FEATURE_NAMES is None:
# Use all the columns as features, except for the target column
feature_names = list(data_df.columns)
feature_names.remove(metadata.TARGET_NAME)
features = data_df[feature_names]
else:
# Only use metadata.FEATURE_NAMES
features = data_df[metadata.FEATURE_NAMES]
target = data_df[metadata.TARGET_NAME]
x_train, x_val, y_train, y_val = ms.train_test_split(features,
target,
test_size=0.2)
return x_train.values, y_train, x_val.values, y_val
def read_df_from_bigquery(full_table_path, project_id=None, num_samples=None):
"""Read data from BigQuery and split into train and validation sets.
Args:
full_table_path: (string) full path of the table containing training data
in the format of [project_id.dataset_name.table_name].
project_id: (string, Optional) Google BigQuery Account project ID.
num_samples: (int, Optional) Number of data samples to read.
Returns:
pandas.DataFrame
"""
query = metadata.BASE_QUERY.format(table=full_table_path)
limit = ' LIMIT {}'.format(num_samples) if num_samples else ''
query += limit
# Use "application default credentials"
# Use SQL syntax dialect
data_df = pd.read_gbq(query, project_id=project_id, dialect='standard')
return data_df
def read_df_from_gcs(file_pattern):
"""Read data from Google Cloud Storage, split into train and validation sets.
Assume that the data on GCS is in csv format without header.
The column names will be provided through metadata
Args:
file_pattern: (string) pattern of the files containing training data.
For example: [gs://bucket/folder_name/prefix]
Returns:
pandas.DataFrame
"""
# Download the files to local /tmp/ folder
df_list = []
for filepath in tf.io.gfile.glob(file_pattern):
with tf.io.gfile.GFile(filepath, 'r') as f:
if metadata.CSV_COLUMNS is None:
df_list.append(pd.read_csv(f))
else:
df_list.append(pd.read_csv(f, names=metadata.CSV_COLUMNS,
header=None))
data_df = pd.concat(df_list)
return data_df
def upload_to_gcs(local_path, gcs_path):
"""Upload local file to Google Cloud Storage.
Args:
local_path: (string) Local file
gcs_path: (string) Google Cloud Storage destination
Returns:
None
"""
tf.io.gfile.copy(local_path, gcs_path)
def dump_object(object_to_dump, output_path):
"""Pickle the object and save to the output_path.
Args:
object_to_dump: Python object to be pickled
output_path: (string) output path which can be Google Cloud Storage
Returns:
None
"""
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(os.path.dirname(output_path))
with tf.io.gfile.GFile(output_path, 'w') as wf:
joblib.dump(object_to_dump, wf)
def boolean_mask(columns, target_columns):
"""Create a boolean mask indicating location of target_columns in columns.
Args:
columns: (List[string]), list of all columns considered.
target_columns: (List[string]), columns whose position
should be masked as 1.
Returns:
List[bool]
"""
target_set = set(target_columns)
return [x in target_set for x in columns]
| 30.986755
| 81
| 0.663176
|
cb03832a62e65e794ac0b155609ffef30a3cf785
| 24,939
|
py
|
Python
|
tests/data_context/test_data_context_utils.py
|
arunnthevapalan/great_expectations
|
97f1481bcd1c3f4d8878c6f383f4e6f008b20cd1
|
[
"Apache-2.0"
] | null | null | null |
tests/data_context/test_data_context_utils.py
|
arunnthevapalan/great_expectations
|
97f1481bcd1c3f4d8878c6f383f4e6f008b20cd1
|
[
"Apache-2.0"
] | null | null | null |
tests/data_context/test_data_context_utils.py
|
arunnthevapalan/great_expectations
|
97f1481bcd1c3f4d8878c6f383f4e6f008b20cd1
|
[
"Apache-2.0"
] | null | null | null |
import os
from contextlib import contextmanager
from unittest import mock
import pytest
import great_expectations.exceptions as gee
from great_expectations.data_context.util import (
PasswordMasker,
parse_substitution_variable,
secretmanager,
substitute_value_from_aws_secrets_manager,
substitute_value_from_azure_keyvault,
substitute_value_from_gcp_secret_manager,
substitute_value_from_secret_store,
)
from great_expectations.types import safe_deep_copy
from great_expectations.util import load_class
def test_load_class_raises_error_when_module_not_found():
with pytest.raises(gee.PluginModuleNotFoundError):
load_class("foo", "bar")
def test_load_class_raises_error_when_class_not_found():
with pytest.raises(gee.PluginClassNotFoundError):
load_class("TotallyNotARealClass", "great_expectations.datasource")
def test_load_class_raises_error_when_class_name_is_None():
with pytest.raises(TypeError):
load_class(None, "great_expectations.datasource")
def test_load_class_raises_error_when_class_name_is_not_string():
for bad_input in [1, 1.3, ["a"], {"foo": "bar"}]:
with pytest.raises(TypeError):
load_class(bad_input, "great_expectations.datasource")
def test_load_class_raises_error_when_module_name_is_None():
with pytest.raises(TypeError):
load_class("foo", None)
def test_load_class_raises_error_when_module_name_is_not_string():
for bad_input in [1, 1.3, ["a"], {"foo": "bar"}]:
with pytest.raises(TypeError):
load_class(bad_input, "great_expectations.datasource")
@pytest.mark.filterwarnings(
"ignore:SQLAlchemy is not installed*:UserWarning:great_expectations.data_context.util"
)
def test_password_masker_mask_db_url(monkeypatch, tmp_path):
"""
What does this test and why?
The PasswordMasker.mask_db_url() should mask passwords consistently in database urls. The output of mask_db_url should be the same whether user_urlparse is set to True or False.
This test uses database url examples from
https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls
"""
# PostgreSQL (if installed in test environment)
# default
db_hostname = os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost")
try:
assert (
PasswordMasker.mask_db_url(
f"postgresql://scott:tiger@{db_hostname}:65432/mydatabase"
)
== f"postgresql://scott:{PasswordMasker.MASKED_PASSWORD_STRING}@{db_hostname}:65432/mydatabase"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
f"postgresql://scott:tiger@{db_hostname}:65432/mydatabase",
use_urlparse=True,
)
== f"postgresql://scott:{PasswordMasker.MASKED_PASSWORD_STRING}@{db_hostname}:65432/mydatabase"
)
# missing port number, using urlparse
assert (
PasswordMasker.mask_db_url(
f"postgresql://scott:tiger@{db_hostname}/mydatabase", use_urlparse=True
)
== f"postgresql://scott:{PasswordMasker.MASKED_PASSWORD_STRING}@{db_hostname}/mydatabase"
)
# psycopg2 (if installed in test environment)
try:
assert (
PasswordMasker.mask_db_url(
f"postgresql+psycopg2://scott:tiger@{db_hostname}:65432/mydatabase"
)
== f"postgresql+psycopg2://scott:{PasswordMasker.MASKED_PASSWORD_STRING}@{db_hostname}:65432/mydatabase"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
f"postgresql+psycopg2://scott:tiger@{db_hostname}:65432/mydatabase",
use_urlparse=True,
)
== f"postgresql+psycopg2://scott:{PasswordMasker.MASKED_PASSWORD_STRING}@{db_hostname}:65432/mydatabase"
)
# pg8000 (if installed in test environment)
try:
assert (
PasswordMasker.mask_db_url(
f"postgresql+pg8000://scott:tiger@{db_hostname}:65432/mydatabase"
)
== f"postgresql+pg8000://scott:{PasswordMasker.MASKED_PASSWORD_STRING}@{db_hostname}:65432/mydatabase"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
f"postgresql+pg8000://scott:tiger@{db_hostname}:65432/mydatabase",
use_urlparse=True,
)
== f"postgresql+pg8000://scott:{PasswordMasker.MASKED_PASSWORD_STRING}@{db_hostname}:65432/mydatabase"
)
# MySQL
# default (if installed in test environment)
try:
assert (
PasswordMasker.mask_db_url(f"mysql://scott:tiger@{db_hostname}:65432/foo")
== f"mysql://scott:{PasswordMasker.MASKED_PASSWORD_STRING}@{db_hostname}:65432/foo"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
f"mysql://scott:tiger@{db_hostname}:65432/foo", use_urlparse=True
)
== f"mysql://scott:{PasswordMasker.MASKED_PASSWORD_STRING}@{db_hostname}:65432/foo"
)
# mysqlclient (a maintained fork of MySQL-Python) (if installed in test environment)
try:
assert (
PasswordMasker.mask_db_url(
f"mysql+mysqldb://scott:tiger@{db_hostname}:65432/foo"
)
== f"mysql+mysqldb://scott:{PasswordMasker.MASKED_PASSWORD_STRING}@{db_hostname}:65432/foo"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
f"mysql+mysqldb://scott:tiger@{db_hostname}:65432/foo", use_urlparse=True
)
== f"mysql+mysqldb://scott:{PasswordMasker.MASKED_PASSWORD_STRING}@{db_hostname}:65432/foo"
)
# PyMySQL (if installed in test environment)
try:
assert (
PasswordMasker.mask_db_url(
f"mysql+pymysql://scott:tiger@{db_hostname}:65432/foo"
)
== f"mysql+pymysql://scott:{PasswordMasker.MASKED_PASSWORD_STRING}@{db_hostname}:65432/foo"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
f"mysql+pymysql://scott:tiger@{db_hostname}:65432/foo", use_urlparse=True
)
== f"mysql+pymysql://scott:{PasswordMasker.MASKED_PASSWORD_STRING}@{db_hostname}:65432/foo"
)
# Oracle (if installed in test environment)
url_host = os.getenv("GE_TEST_LOCALHOST_URL", "127.0.0.1")
try:
assert (
PasswordMasker.mask_db_url(f"oracle://scott:tiger@{url_host}:1521/sidname")
== f"oracle://scott:***@{url_host}:1521/sidname"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
f"oracle://scott:tiger@{url_host}:1521/sidname", use_urlparse=True
)
== f"oracle://scott:{PasswordMasker.MASKED_PASSWORD_STRING}@{url_host}:1521/sidname"
)
try:
assert (
PasswordMasker.mask_db_url("oracle+cx_oracle://scott:tiger@tnsname")
== f"oracle+cx_oracle://scott:{PasswordMasker.MASKED_PASSWORD_STRING}@tnsname"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
"oracle+cx_oracle://scott:tiger@tnsname", use_urlparse=True
)
== f"oracle+cx_oracle://scott:{PasswordMasker.MASKED_PASSWORD_STRING}@tnsname"
)
# Microsoft SQL Server
# pyodbc (if installed in test environment)
try:
assert (
PasswordMasker.mask_db_url("mssql+pyodbc://scott:tiger@mydsn")
== "mssql+pyodbc://scott:***@mydsn"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
"mssql+pyodbc://scott:tiger@mydsn", use_urlparse=True
)
== f"mssql+pyodbc://scott:{PasswordMasker.MASKED_PASSWORD_STRING}@mydsn"
)
# pymssql (if installed in test environment)
try:
assert (
PasswordMasker.mask_db_url(
f"mssql+pymssql://scott:tiger@{db_hostname}:12345/dbname"
)
== f"mssql+pymssql://scott:{PasswordMasker.MASKED_PASSWORD_STRING}@{db_hostname}:12345/dbname"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
f"mssql+pymssql://scott:tiger@{db_hostname}:12345/dbname", use_urlparse=True
)
== f"mssql+pymssql://scott:{PasswordMasker.MASKED_PASSWORD_STRING}@{db_hostname}:12345/dbname"
)
# SQLite
# relative path
temp_dir = tmp_path / "sqllite_tests"
temp_dir.mkdir()
monkeypatch.chdir(temp_dir)
assert (
PasswordMasker.mask_db_url(f"sqlite:///something/foo.db")
== f"sqlite:///something/foo.db"
)
assert (
PasswordMasker.mask_db_url(f"sqlite:///something/foo.db", use_urlparse=True)
== f"sqlite:///something/foo.db"
)
# absolute path
# Unix/Mac - 4 initial slashes in total
assert (
PasswordMasker.mask_db_url("sqlite:////absolute/path/to/foo.db")
== "sqlite:////absolute/path/to/foo.db"
)
assert (
PasswordMasker.mask_db_url(
"sqlite:////absolute/path/to/foo.db", use_urlparse=True
)
== "sqlite:////absolute/path/to/foo.db"
)
# Windows
assert (
PasswordMasker.mask_db_url("sqlite:///C:\\path\\to\\foo.db")
== "sqlite:///C:\\path\\to\\foo.db"
)
assert (
PasswordMasker.mask_db_url("sqlite:///C:\\path\\to\\foo.db", use_urlparse=True)
== "sqlite:///C:\\path\\to\\foo.db"
)
# Windows alternative using raw string
assert (
PasswordMasker.mask_db_url(r"sqlite:///C:\path\to\foo.db")
== r"sqlite:///C:\path\to\foo.db"
)
assert (
PasswordMasker.mask_db_url(r"sqlite:///C:\path\to\foo.db", use_urlparse=True)
== r"sqlite:///C:\path\to\foo.db"
)
# in-memory
assert PasswordMasker.mask_db_url("sqlite://") == "sqlite://"
assert PasswordMasker.mask_db_url("sqlite://", use_urlparse=True) == "sqlite://"
def test_sanitize_config_raises_exception_with_bad_input(
basic_data_context_config,
):
# expect that an Exception is raised if something other than a dict is passed
with pytest.raises(TypeError):
PasswordMasker.sanitize_config(basic_data_context_config)
def test_sanitize_config_doesnt_change_config_without_datasources(
basic_data_context_config_dict,
):
# expect no change without datasources
config_without_creds = PasswordMasker.sanitize_config(
basic_data_context_config_dict
)
assert config_without_creds == basic_data_context_config_dict
def test_sanitize_config_masks_cloud_store_backend_access_tokens(
data_context_config_dict_with_cloud_backed_stores, ge_cloud_access_token
):
# test that cloud store backend tokens have been properly masked
config_with_creds_in_stores = PasswordMasker.sanitize_config(
data_context_config_dict_with_cloud_backed_stores
)
for name, store_config in config_with_creds_in_stores["stores"].items():
if (
not store_config.get("store_backend")
or not store_config["store_backend"].get("ge_cloud_credentials")
or not store_config["store_backend"]["ge_cloud_credentials"].get(
"access_token"
)
):
# a field in store_config["store_backend"]["ge_cloud_credentials"]["access_token"]
# doesn't exist, so we expect this config to be unchanged
assert (
store_config
== data_context_config_dict_with_cloud_backed_stores["stores"][name]
)
else:
# check that the original token exists
assert (
data_context_config_dict_with_cloud_backed_stores["stores"][name][
"store_backend"
]["ge_cloud_credentials"]["access_token"]
== ge_cloud_access_token
)
# expect that the GE Cloud token has been obscured
assert (
store_config["store_backend"]["ge_cloud_credentials"]["access_token"]
!= ge_cloud_access_token
)
def test_sanitize_config_masks_execution_engine_connection_strings(
data_context_config_dict_with_datasources, conn_string_password
):
# test that datasource credentials have been properly masked
unaltered_datasources = data_context_config_dict_with_datasources["datasources"]
config_with_creds_masked = PasswordMasker.sanitize_config(
data_context_config_dict_with_datasources
)
masked_datasources = config_with_creds_masked["datasources"]
# iterate through the processed datasources and check for correctness
for name, processed_config in masked_datasources.items():
# check if processed_config["execution_engine"]["connection_string"] exists
if processed_config.get("execution_engine") and processed_config[
"execution_engine"
].get("connection_string"):
# check if the connection string contains a password
if (
conn_string_password
in unaltered_datasources[name]["execution_engine"]["connection_string"]
):
# it does contain a password, so make sure its masked
assert (
conn_string_password
not in processed_config["execution_engine"]["connection_string"]
)
else:
# it doesn't contain a password, so make sure it's unaltered
assert processed_config == unaltered_datasources[name]
# processed_config either doesn't have an `execution_engine` field,
# or a `connection_string` field
else:
# expect this config to be unaltered
assert processed_config == unaltered_datasources[name]
def test_sanitize_config_with_arbitrarily_nested_sensitive_keys():
# base case - this config should pass through unaffected
config = {
"some_field": "and a value",
"some_other_field": {"password": "expect this to be found"},
}
config_copy = safe_deep_copy(config)
res = PasswordMasker.sanitize_config(config_copy)
assert res != config
assert res["some_other_field"]["password"] == PasswordMasker.MASKED_PASSWORD_STRING
def test_sanitize_config_with_password_field():
# this case has a password field inside a credentials dict - expect it to be masked
config = {"credentials": {"password": "my-super-duper-secure-passphrase-123"}}
config_copy = safe_deep_copy(config)
res = PasswordMasker.sanitize_config(config_copy)
assert res != config
assert res["credentials"]["password"] == PasswordMasker.MASKED_PASSWORD_STRING
def test_sanitize_config_with_url_field(
conn_string_with_embedded_password, conn_string_password
):
# this case has a url field inside a credentials dict - expect the password inside
# of it to be masked
config = {"credentials": {"url": conn_string_with_embedded_password}}
config_copy = safe_deep_copy(config)
res = PasswordMasker.sanitize_config(config_copy)
assert res != config
assert conn_string_password not in res["credentials"]["url"]
assert PasswordMasker.MASKED_PASSWORD_STRING in res["credentials"]["url"]
def test_sanitize_config_with_nested_url_field(
conn_string_password, conn_string_with_embedded_password
):
# this case has a connection string in an execution_engine dict
config = {
"execution_engine": {"connection_string": conn_string_with_embedded_password}
}
config_copy = safe_deep_copy(config)
res = PasswordMasker.sanitize_config(config_copy)
assert res != config
assert conn_string_password not in res["execution_engine"]["connection_string"]
assert (
PasswordMasker.MASKED_PASSWORD_STRING
in res["execution_engine"]["connection_string"]
)
def test_sanitize_config_regardless_of_parent_key():
# expect this config still be masked
config = {
"some_field": "and a value",
"some_other_field": {"access_token": "but this won't be found"},
}
config_copy = safe_deep_copy(config)
res = PasswordMasker.sanitize_config(config_copy)
assert res != config
assert (
res["some_other_field"]["access_token"] == PasswordMasker.MASKED_PASSWORD_STRING
)
def test_sanitize_config_masks_cloud_access_token(ge_cloud_access_token):
# expect the access token to be found and masked
config = {
"store_backend": {
"ge_cloud_credentials": {"access_token": ge_cloud_access_token}
}
}
config_copy = safe_deep_copy(config)
res = PasswordMasker.sanitize_config(config_copy)
assert res != config
assert (
res["store_backend"]["ge_cloud_credentials"]["access_token"]
== PasswordMasker.MASKED_PASSWORD_STRING
)
def test_sanitize_config_works_with_list():
config = {"some_key": [{"access_token": "12345"}]}
config_copy = safe_deep_copy(config)
res = PasswordMasker.sanitize_config(config_copy)
assert res != config
assert res["some_key"][0]["access_token"] == PasswordMasker.MASKED_PASSWORD_STRING
def test_parse_substitution_variable():
"""
What does this test and why?
Ensure parse_substitution_variable works as expected.
Returns:
"""
assert parse_substitution_variable("${SOME_VAR}") == "SOME_VAR"
assert parse_substitution_variable("$SOME_VAR") == "SOME_VAR"
assert parse_substitution_variable("SOME_STRING") is None
assert parse_substitution_variable("SOME_$TRING") is None
assert parse_substitution_variable("${some_var}") == "some_var"
assert parse_substitution_variable("$some_var") == "some_var"
assert parse_substitution_variable("some_string") is None
assert parse_substitution_variable("some_$tring") is None
assert parse_substitution_variable("${SOME_$TRING}") is None
assert parse_substitution_variable("$SOME_$TRING") == "SOME_"
@contextmanager
def does_not_raise():
yield
@pytest.mark.parametrize(
"input_value,method_to_patch,return_value",
[
("any_value", None, "any_value"),
("secret|any_value", None, "secret|any_value"),
(
"secret|arn:aws:secretsmanager:region-name-1:123456789012:secret:my-secret",
"great_expectations.data_context.util.substitute_value_from_aws_secrets_manager",
"success",
),
(
"secret|projects/project_id/secrets/my_secret",
"great_expectations.data_context.util.substitute_value_from_gcp_secret_manager",
"success",
),
(
"secret|https://my-vault-name.vault.azure.net/secrets/my_secret",
"great_expectations.data_context.util.substitute_value_from_azure_keyvault",
"success",
),
],
)
def test_substitute_value_from_secret_store(input_value, method_to_patch, return_value):
if method_to_patch:
with mock.patch(method_to_patch, return_value=return_value):
assert substitute_value_from_secret_store(value=input_value) == return_value
else:
assert substitute_value_from_secret_store(value=input_value) == return_value
class MockedBoto3Client:
def __init__(self, secret_response):
self.secret_response = secret_response
def get_secret_value(self, *args, **kwargs):
return self.secret_response
class MockedBoto3Session:
def __init__(self, secret_response):
self.secret_response = secret_response
def __call__(self):
return self
def client(self, *args, **kwargs):
return MockedBoto3Client(self.secret_response)
@pytest.mark.parametrize(
"input_value,secret_response,raises,expected",
[
(
"secret|arn:aws:secretsmanager:region-name-1:123456789012:secret:my-secret",
{"SecretString": "value"},
does_not_raise(),
"value",
),
(
"secret|arn:aws:secretsmanager:region-name-1:123456789012:secret:my-secret",
{"SecretBinary": b"dmFsdWU="},
does_not_raise(),
"value",
),
(
"secret|arn:aws:secretsmanager:region-name-1:123456789012:secret:my-secret|key",
{"SecretString": '{"key": "value"}'},
does_not_raise(),
"value",
),
(
"secret|arn:aws:secretsmanager:region-name-1:123456789012:secret:my-secret|key",
{"SecretBinary": b"eyJrZXkiOiAidmFsdWUifQ=="},
does_not_raise(),
"value",
),
(
"secret|arn:aws:secretsmanager:region-name-1:123456789012:secret:my-se%&et|key",
None,
pytest.raises(ValueError),
None,
),
(
"secret|arn:aws:secretsmanager:region-name-1:123456789012:secret:my-secret:000000000-0000-0000-0000-00000000000|key",
None,
pytest.raises(ValueError),
None,
),
],
)
def test_substitute_value_from_aws_secrets_manager(
input_value, secret_response, raises, expected
):
with raises:
with mock.patch(
"great_expectations.data_context.util.boto3.session.Session",
return_value=MockedBoto3Session(secret_response),
):
assert substitute_value_from_aws_secrets_manager(input_value) == expected
class MockedSecretManagerServiceClient:
def __init__(self, secret_response):
self.secret_response = secret_response
def __call__(self):
return self
def access_secret_version(self, *args, **kwargs):
class Response:
pass
response = Response()
response._pb = Response()
response._pb.payload = Response()
response._pb.payload.data = self.secret_response
return response
@pytest.mark.skipif(
secretmanager is None,
reason="Could not import 'secretmanager' from google.cloud in data_context.util",
)
@pytest.mark.parametrize(
"input_value,secret_response,raises,expected",
[
(
"secret|projects/project_id/secrets/my_secret",
b"value",
does_not_raise(),
"value",
),
(
"secret|projects/project_id/secrets/my_secret|key",
b'{"key": "value"}',
does_not_raise(),
"value",
),
(
"secret|projects/project_id/secrets/my_se%&et|key",
None,
pytest.raises(ValueError),
None,
),
(
"secret|projects/project_id/secrets/my_secret/version/A|key",
None,
pytest.raises(ValueError),
None,
),
],
)
def test_substitute_value_from_gcp_secret_manager(
input_value, secret_response, raises, expected
):
with raises:
with mock.patch(
"great_expectations.data_context.util.secretmanager.SecretManagerServiceClient",
return_value=MockedSecretManagerServiceClient(secret_response),
):
assert substitute_value_from_gcp_secret_manager(input_value) == expected
class MockedSecretClient:
def __init__(self, secret_response):
self.secret_response = secret_response
def __call__(self, *args, **kwargs):
return self
def get_secret(self, *args, **kwargs):
class Response:
pass
response = Response()
response.value = self.secret_response
return response
@mock.patch("great_expectations.data_context.util.DefaultAzureCredential", new=object)
@pytest.mark.parametrize(
"input_value,secret_response,raises,expected",
[
(
"secret|https://my-vault-name.vault.azure.net/secrets/my-secret",
"value",
does_not_raise(),
"value",
),
(
"secret|https://my-vault-name.vault.azure.net/secrets/my-secret|key",
'{"key": "value"}',
does_not_raise(),
"value",
),
(
"secret|https://my-vault-name.vault.azure.net/secrets/my-se%&et|key",
None,
pytest.raises(ValueError),
None,
),
(
"secret|https://my_vault_name.vault.azure.net/secrets/my-secret/A0000000000000000000000000000000|key",
None,
pytest.raises(ValueError),
None,
),
],
)
def test_substitute_value_from_azure_keyvault(
input_value, secret_response, raises, expected
):
with raises:
with mock.patch(
"great_expectations.data_context.util.SecretClient",
return_value=MockedSecretClient(secret_response),
):
assert substitute_value_from_azure_keyvault(input_value) == expected
| 34.209877
| 181
| 0.656402
|
452b6428e476369698c15cbb4cd290b477592212
| 191
|
py
|
Python
|
setup.py
|
datafields-team/gym-cryptotrading
|
96cf28b07175fb2fbf2daa7060494db81ea8d58d
|
[
"MIT"
] | 104
|
2018-04-26T06:30:45.000Z
|
2022-03-31T17:58:33.000Z
|
setup.py
|
datafields-team/gym-cryptotrading
|
96cf28b07175fb2fbf2daa7060494db81ea8d58d
|
[
"MIT"
] | 1
|
2018-06-21T06:06:17.000Z
|
2019-02-09T20:23:17.000Z
|
setup.py
|
perara/gym-cryptotrading
|
96cf28b07175fb2fbf2daa7060494db81ea8d58d
|
[
"MIT"
] | 42
|
2018-05-04T12:00:35.000Z
|
2022-03-30T18:33:08.000Z
|
from setuptools import setup
setup(name='gym_cryptotrading',
version='0.0.1',
install_requires=['gym>=0.2.3',
'numpy',
'pandas']
)
| 23.875
| 37
| 0.492147
|
7f81bf97f74c24c3ec4d9e9e573714f77e19a031
| 2,268
|
py
|
Python
|
pyclam/tests/test_functional.py
|
BurntSushi/clam
|
e374c08d016018d6be4d2fc4e8b0999b52f82929
|
[
"MIT"
] | 1
|
2020-07-31T16:55:45.000Z
|
2020-07-31T16:55:45.000Z
|
pyclam/tests/test_functional.py
|
BurntSushi/clam
|
e374c08d016018d6be4d2fc4e8b0999b52f82929
|
[
"MIT"
] | null | null | null |
pyclam/tests/test_functional.py
|
BurntSushi/clam
|
e374c08d016018d6be4d2fc4e8b0999b52f82929
|
[
"MIT"
] | null | null | null |
import unittest
import numpy as np
from pyclam import Manifold, criterion
from pyclam.tests.utils import linear_search
class TestManifoldFunctional(unittest.TestCase):
def test_random_no_limits(self):
# We begin by getting some data and building with no constraints.
data = np.random.randn(100, 3)
manifold = Manifold(data, 'euclidean').build()
# With no constraints, clusters should be singletons.
self.assertEqual(data.shape[0], manifold.graph.population)
self.assertEqual(1, len(manifold.find_clusters(data[0], 0., -1)))
self.assertEqual(1, len(manifold.find_points(data[0], 0.)))
self.assertEqual(data.shape[0], manifold.layers[-1].cardinality)
return
def test_random_large(self):
data = np.random.randn(1000, 3)
manifold = Manifold(data, 'euclidean').build(
criterion.MaxDepth(10),
criterion.LFDRange(60, 50),
)
for _ in range(10):
point = int(np.random.choice(3))
linear_results = linear_search(data[point], 0.5, data, manifold.metric)
self.assertEqual(len(linear_results), len(manifold.find_points(data[point], 0.5)))
return
def test_all_same(self):
# A bit simpler, every point is the same.
data = np.ones((1000, 3))
manifold = Manifold(data, 'euclidean').build()
# There should only ever be one cluster here.
self.assertEqual(1, len(manifold.layers))
manifold.build_tree()
# Even after explicit deepen calls.
self.assertEqual(1, len(manifold.layers))
self.assertEqual(1, len(manifold.find_clusters(np.asarray([1, 1, 1]), 0.0, -1)))
# And, we should get all 1000 points back for any of the data.
self.assertEqual(1000, len(manifold.find_points(data[0], 0.0)))
return
def test_two_points_with_dups(self):
# Here we have two distinct clusters.
data = np.concatenate([np.ones((500, 2)) * -2, np.ones((500, 2)) * 2])
manifold = Manifold(data, 'euclidean').build()
# We expect building to stop with two clusters.
self.assertEqual(2, manifold.graph.cardinality, f'Expected 2 clusters, got {manifold.graph.cardinality}')
return
| 42
| 113
| 0.645503
|
c416eb4fc4eaf6c08474414a37e2cecdff6235f7
| 1,318
|
py
|
Python
|
build/plugins/docs.py
|
wikman/catboost
|
984989d556a92f4978df193b835dfe98afa77bc2
|
[
"Apache-2.0"
] | null | null | null |
build/plugins/docs.py
|
wikman/catboost
|
984989d556a92f4978df193b835dfe98afa77bc2
|
[
"Apache-2.0"
] | null | null | null |
build/plugins/docs.py
|
wikman/catboost
|
984989d556a92f4978df193b835dfe98afa77bc2
|
[
"Apache-2.0"
] | null | null | null |
import json
def extract_macro_calls(unit, macro_value_name):
if not unit.get(macro_value_name):
return []
return filter(None, unit.get(macro_value_name).replace('$' + macro_value_name, '').split())
def macro_calls_to_dict(unit, calls):
def split_args(arg):
if arg is None:
return None
kv = filter(None, arg.split('='))
if len(kv) != 2:
unit.message(['error', 'Invalid variables specification "{}": value expected to be in form %name%=%value% (with no spaces)'.format(arg)])
return None
return kv
return dict(filter(None, map(split_args, calls)))
def get_variables(unit):
orig_variables = macro_calls_to_dict(unit, extract_macro_calls(unit, '_DOCS_VARS_VALUE'))
return {k: unit.get(k) or v for k, v in orig_variables.items()}
def onprocess_docs(unit, *args):
if unit.enabled('_DOCS_USE_PLANTUML'):
unit.on_docs_yfm_use_plantuml([])
variables = get_variables(unit)
if variables:
unit.set(['_DOCS_VARS_FLAG', '--vars {}'.format(json.dumps(json.dumps(variables, sort_keys=True)))])
def onprocess_mkdocs(unit, *args):
variables = get_variables(unit)
if variables:
unit.set(['_DOCS_VARS_FLAG', ' '.join(['--var {}={}'.format(k, v) for k, v in variables.items()])])
| 29.954545
| 149
| 0.650986
|
157f1c915c5d801010a8428a23e9766fd52d7cb0
| 4,522
|
py
|
Python
|
ecs/bin/ecsctl.py
|
lejmr/ecsctl
|
688c5d5e10536ee74ded9de187309134a4b7effc
|
[
"MIT"
] | null | null | null |
ecs/bin/ecsctl.py
|
lejmr/ecsctl
|
688c5d5e10536ee74ded9de187309134a4b7effc
|
[
"MIT"
] | 1
|
2021-02-14T19:53:08.000Z
|
2021-02-14T19:53:08.000Z
|
ecs/bin/ecsctl.py
|
lejmr/ecsctl
|
688c5d5e10536ee74ded9de187309134a4b7effc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import click
from ecs import project_loader, ecs
import yaml
import json
from pprint import pprint
import os
import boto3
import botocore
# Shared options
_shared_options = [
click.option('-p', '--project-path', 'project_path', show_default=True, default="ecs/"),
click.option('-v', '--val', 'values', type=click.Path(exists=True), multiple=True),
click.option('-e', 'envs', type=str, multiple=True)
]
def add_options(options):
def _add_options(func):
for option in reversed(options):
func = option(func)
return func
return _add_options
# Definition of group necessary for building arguments
@click.group()
def group(**kwargs):
pass
def _render_project(**kwargs):
""" Function used for handling project_path, values, and envs options"""
# Convert env input into dict
envs = list(kwargs.get('envs', []))
envs = dict([x.split('=') for x in envs])
# Load and interpolate project
return project_loader.load_project(
kwargs.get('project_path'),
list(kwargs.get('values', [])),
envs)
@group.command()
@add_options(_shared_options)
@click.option('-f', '--format', 'oformat', type=click.Choice(['json', 'yaml']), default='json')
def render(**kwargs):
""" Interpolate and print project configuration """
# Interpolate the project
ld = _render_project(**kwargs)
# Print
for i in zip(['Task definition', 'Service'], ld):
# Generate in the output format
if kwargs['oformat'].lower() == 'yaml':
g = yaml.dump(i[1])
if kwargs['oformat'].lower() == 'json':
g = json.dumps(i[1],indent=2)
# Print
click.secho("* {}:".format(i[0]), fg='green')
click.echo(g)
click.secho("* Output:", fg='green')
click.echo(ld[2])
@group.command()
@add_options(_shared_options)
@click.option('-f', '--format', 'oformat', type=click.Choice(['json', 'yaml']), default='json')
def output(**kwargs):
""" Interpolate output.j2 file """
# Interpolate the project
ld = _render_project(**kwargs)
# Print the output
click.echo(ld[2])
@group.command()
@add_options(_shared_options)
@click.option('-td', '--task-definition', 'task_definition', default='task-definition.json')
@click.option('-s', '--service', 'service', default='service.json')
@click.option('-o', '--output', 'output', default='output.txt')
def generate(**kwargs):
""" Generate configuration files which can be used by aws-cli """
# Interpolate the project
ld = _render_project(**kwargs)
# Portion generating output files
for i in zip([kwargs['task_definition'], kwargs['service'], kwargs['output']], ld):
# Helper print
click.secho("Generating {}".format(i[0]))
# Generate output file
with open(i[0], "w") as f:
g = json.dumps(i[1],indent=2)
f.write(g)
@group.command()
@add_options(_shared_options)
@click.option('--force-task-definition', "force_td", is_flag=True)
@click.option('--fast-redeploy', "fast_redeploy", is_flag=True)
@click.option('--wait', "wait_for_service", is_flag=True)
@click.option('-r', '--region', 'aws_region', show_default=True, default="us-east-1")
def install(**kwargs):
""" Installs task definition and service to given cluster """
# Interpolate the project
td, ts, output = _render_project(**kwargs)
service_name = ts['serviceName']
# Install task-definition
os.environ['AWS_DEFAULT_REGION'] = kwargs['aws_region']
arn_td = ecs.install_or_update_task_definition(td, kwargs['force_td'])
arn_s = ecs.install_service(ts, arn_td)
# Echo ARNS
click.secho("Task-definition ARN: {}".format(arn_td))
click.secho("Service ARN: {}".format(arn_s))
# Kill all currently running tasks
if kwargs['fast_redeploy']:
ecs.kill_tasks(ts.get('cluster'), arn_s)
# Waiter
if kwargs['wait_for_service']:
client = boto3.client('ecs')
waiter = client.get_waiter('services_stable')
click.secho('Waiting until service is stable.')
for i in range(5):
try:
waiter.wait(cluster=ts['cluster'], services=[service_name])
except botocore.exceptions.WaiterError as e:
if "Max attempts exceeded" in e.message:
click.secho("Service wasn't started in 600s")
continue
click.secho(e.message)
if __name__ == '__main__':
group()
| 30.146667
| 95
| 0.6318
|
07fcbbe17dcb85754cd142c303928cfe3acbcfb3
| 2,577
|
py
|
Python
|
src/plugins/grass/scripts/qgis.v.kernel.rast.py
|
dyna-mis/Hilabeling
|
cb7d5d4be29624a20c8a367162dbc6fd779b2b52
|
[
"MIT"
] | null | null | null |
src/plugins/grass/scripts/qgis.v.kernel.rast.py
|
dyna-mis/Hilabeling
|
cb7d5d4be29624a20c8a367162dbc6fd779b2b52
|
[
"MIT"
] | null | null | null |
src/plugins/grass/scripts/qgis.v.kernel.rast.py
|
dyna-mis/Hilabeling
|
cb7d5d4be29624a20c8a367162dbc6fd779b2b52
|
[
"MIT"
] | 1
|
2021-12-25T08:40:30.000Z
|
2021-12-25T08:40:30.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
***************************************************************************
qgis.v.kernel.rast.py
---------------------
Date : February 2010
Copyright : (C) 2010 by Radim Blazek
Email : radim dot blazek at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Radim Blazek'
__date__ = 'February 2010'
__copyright__ = '(C) 2010, Radim Blazek'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
############################################################################
#
# MODULE: qgis.v.kernel.rast.py
# PURPOSE: Export a vector to PostGIS (PostgreSQL) database table
#
#############################################################################
#%Module
#% description: Generates a raster density map from vector points data using a moving 2D isotropic Gaussian kernel.
#% keywords: vector, export, database
#%End
#%option
#% key: input
#% type: string
#% gisprompt: old,vector,vector
#% key_desc : name
#% description: Input vector with training points
#% required : yes
#%end
#%option
#% key: stddeviation
#% type: double
#% description: Standard deviation in map units
#% required : yes
#%end
#%option
#% key: output
#% type: string
#% gisprompt: new,cell,raster
#% key_desc : name
#% description: Output raster map
#% required : yes
#%end
try:
from grass.script import core as grass
except ImportError:
import grass
except:
raise Exception("Cannot find 'grass' Python module. Python is supported by GRASS from version >= 6.4")
def main():
input = options['input']
output = options['output']
stddeviation = options['stddeviation']
if grass.run_command('v.kernel', input=input, stddeviation=stddeviation, output=output) != 0:
grass.fatal("Cannot run v.kernel.")
if __name__ == "__main__":
options, flags = grass.parser()
main()
| 30.317647
| 115
| 0.523477
|
44959ec57df995358247aceb8dcf60c36c9ee4f6
| 3,150
|
py
|
Python
|
ietf/review/admin.py
|
hassanakbar4/ietfdb
|
cabee059092ae776015410640226064331c293b7
|
[
"BSD-3-Clause"
] | 25
|
2022-03-05T08:26:52.000Z
|
2022-03-30T15:45:42.000Z
|
ietf/review/admin.py
|
hassanakbar4/ietfdb
|
cabee059092ae776015410640226064331c293b7
|
[
"BSD-3-Clause"
] | 219
|
2022-03-04T17:29:12.000Z
|
2022-03-31T21:16:14.000Z
|
ietf/review/admin.py
|
hassanakbar4/ietfdb
|
cabee059092ae776015410640226064331c293b7
|
[
"BSD-3-Clause"
] | 22
|
2022-03-04T15:34:34.000Z
|
2022-03-28T13:30:59.000Z
|
# Copyright The IETF Trust 2016-2020, All Rights Reserved
# -*- coding: utf-8 -*-
import simple_history
from django.contrib import admin
from ietf.review.models import (ReviewerSettings, ReviewSecretarySettings, UnavailablePeriod,
ReviewWish, NextReviewerInTeam, ReviewRequest, ReviewAssignment, ReviewTeamSettings )
class ReviewerSettingsAdmin(simple_history.admin.SimpleHistoryAdmin):
def acronym(self, obj):
return obj.team.acronym
list_display = ['id', 'person', 'acronym', 'min_interval', 'filter_re', 'remind_days_before_deadline', ]
list_filter = ["team"]
search_fields = ["person__name"]
ordering = ["-id"]
raw_id_fields = ["team", "person"]
admin.site.register(ReviewerSettings, ReviewerSettingsAdmin)
class ReviewSecretarySettingsAdmin(admin.ModelAdmin):
list_display = ['id', 'team', 'person', 'remind_days_before_deadline', 'max_items_to_show_in_reviewer_list', 'days_to_show_in_reviewer_list']
raw_id_fields = ['team', 'person']
admin.site.register(ReviewSecretarySettings, ReviewSecretarySettingsAdmin)
class UnavailablePeriodAdmin(simple_history.admin.SimpleHistoryAdmin):
list_display = ["person", "team", "start_date", "end_date", "availability", "reason"]
list_display_links = ["person"]
list_filter = ["team"]
date_hierarchy = "start_date"
search_fields = ["person__name"]
ordering = ["-id"]
raw_id_fields = ["team", "person"]
admin.site.register(UnavailablePeriod, UnavailablePeriodAdmin)
class ReviewWishAdmin(admin.ModelAdmin):
list_display = ["person", "team", "doc"]
list_display_links = ["person"]
list_filter = ["team"]
search_fields = ["person__name"]
ordering = ["-id"]
raw_id_fields = ["team", "person", "doc"]
admin.site.register(ReviewWish, ReviewWishAdmin)
class NextReviewerInTeamAdmin(admin.ModelAdmin):
list_display = ["team", "next_reviewer"]
list_display_links = ["team"]
ordering = ["team"]
raw_id_fields = ["team", "next_reviewer"]
admin.site.register(NextReviewerInTeam, NextReviewerInTeamAdmin)
class ReviewRequestAdmin(simple_history.admin.SimpleHistoryAdmin):
list_display = ["doc", "time", "type", "team", "deadline"]
list_display_links = ["doc"]
list_filter = ["team", "type", "state"]
ordering = ["-id"]
raw_id_fields = ["doc", "team", "requested_by"]
date_hierarchy = "time"
search_fields = ["doc__name"]
admin.site.register(ReviewRequest, ReviewRequestAdmin)
class ReviewAssignmentAdmin(simple_history.admin.SimpleHistoryAdmin):
list_display = ["review_request", "reviewer", "assigned_on", "result"]
list_filter = ["result", "state"]
ordering = ["-id"]
raw_id_fields = ["review_request", "reviewer", "result", "review"]
search_fields = ["review_request__doc__name"]
admin.site.register(ReviewAssignment, ReviewAssignmentAdmin)
class ReviewTeamSettingsAdmin(admin.ModelAdmin):
list_display = ["group", ]
search_fields = ["group__acronym", ]
raw_id_fields = ["group", ]
filter_horizontal = ["review_types", "review_results", "notify_ad_when"]
admin.site.register(ReviewTeamSettings, ReviewTeamSettingsAdmin)
| 37.5
| 145
| 0.725714
|
2ca1cbb12e0cc81816c6eec679e6f453251f9dec
| 5,789
|
py
|
Python
|
amt_eval/upload_human_eval_results.py
|
chateval/archive
|
70896454f270d9e04fc36342f33dbc04ba563c5f
|
[
"Apache-2.0"
] | 1
|
2021-01-30T12:43:36.000Z
|
2021-01-30T12:43:36.000Z
|
amt_eval/upload_human_eval_results.py
|
chateval/archive
|
70896454f270d9e04fc36342f33dbc04ba563c5f
|
[
"Apache-2.0"
] | null | null | null |
amt_eval/upload_human_eval_results.py
|
chateval/archive
|
70896454f270d9e04fc36342f33dbc04ba563c5f
|
[
"Apache-2.0"
] | null | null | null |
import sys
import argparse
from time import gmtime, strftime
from analyze_2choice_responses import *
def arguments():
parser = argparse.ArgumentParser(description="Arguments to upload the results into the ChatEval DB")
parser.add_argument("--username", "-u", required=False, default='jsedoc',
help="This is the username for the database [default: jsedoc]")
parser.add_argument("--password", "-p", required=True,
help="This is the password for the database")
parser.add_argument("--hostname", required=False, default="35.237.91.101",
help="This is the hostname/ip of the database")
parser.add_argument("--schema", "-s", required=False, default="demo",
help="This is the database schema [default: demo]")
parser.add_argument("--evalset-id", '-e', required=True, help="Evaluation set ID")
parser.add_argument("--model-1-id", required=True, help="model 1 ID")
parser.add_argument("--model-2-id", required=True, help="model 2 ID")
parser.add_argument("--path", required=True, help="path to the evaluation folder")
parser.add_argument("--dryrun", required=False, action='store_true', help="Dryrun without DB writing")
return parser.parse_args()
def connect(hostname="35.237.91.101", user="jsedoc", passwd="", db="demo"):
try:
import mysql.connector
db = mysql.connector.connect(host=hostname, user=user, passwd=passwd, db=db)
except:
import MySQLdb
db=MySQLdb.connect(host=hostname, user=user, passwd=passwd, db=db)
c=db.cursor()
return (c, db)
def get_last_ids(db_connector):
c = db_connector
c.execute('SELECT max(mturk_run_id_id), max(id) FROM demo.HumanEvaluationsABComparison')
(last_mturk_id, last_eval_id) = c.fetchone()
return (last_mturk_id, last_eval_id)
def get_eval_min_prompt(db_connector, evalset_id):
c = db_connector
c.execute('SELECT min(prompt_id) FROM demo.EvaluationDatasetText WHERE evaluationdataset_id=' + str(evalset_id))
min_prompt_id = c.fetchone()[0]
return min_prompt_id
def check_duplicate(db_connector, evalset_id, m1id, m2id):
c = db_connector
c.execute('SELECT * FROM demo.HumanEvaluations where evaluationdataset_id=' + str(evalset_id) + ' and model_1=' + str(m1id) + ' and model_2=' + str(m2id))
if len(c.fetchall())>0:
return True
return False
def upload_evaluation(evalset_id, m1id, m2id, path, mturk_run_id, eval_id, min_prompt_id):
insert_into_humanevals_table=True
target_files = open(path + '/order.txt').readlines()
target_files[0] = target_files[0].strip()
target_files[1] = target_files[1].strip()
examples = utils.process_source_and_responses(
os.path.abspath(os.path.join('../eval_data/ncm/neural_conv_model_eval_source.txt')), target_files)
examples_dict = {}
for example in examples:
examples_dict[example.key] = example
worker_results_list = pickle.load(open(path + '/amt_hit_responses.pkl','rb'))
for i,r in enumerate(worker_results_list):
try:
subdt = r['Assignments'][0]['AcceptTime']
except:
#import pdb; pdb.set_trace()
pass
utils.process_amt_hit_responses(worker_results_list, examples_dict)
for (key, ex) in examples_dict.items():
#print(ex.hits)
#import pdb; pdb.set_trace()
#for worker, vote, hit, accept_dt in zip(ex.workers, ex.votes, ex.hits, ex.acceptdates):
for worker, vote, hit in zip(ex.workers, ex.votes, ex.hits):
#print(worker + '\t' + m1.replace(' ','_')+'-'+m2.replace(' ','_')+'-'+key + '\t' + str(vote))
# HACK ---
dt = subdt
accept_dt = subdt
dt = accept_dt.strftime("%Y-%m-%d %H:%M:%S")
if insert_into_humanevals_table == True:
print("""INSERT INTO HumanEvaluations (Mturk_run_id, Submit_datetime, Results_path, Evaluationdataset_id, Model_1, Model_2) VALUES (%s, %s, %s, %s, %s, %s)""" , (mturk_run_id,dt,path,evalset_id,m1id,m2id))
c.execute("""INSERT INTO HumanEvaluations (Mturk_run_id, Submit_datetime, Results_path, Evaluationdataset_id, Model_1, Model_2) VALUES (%s, %s, %s, %s, %s, %s)""" , (mturk_run_id,dt,path,evalset_id,m1id,m2id))
insert_into_humanevals_table = False
print(eval_id, worker, hit, accept_dt, mturk_run_id, str(int(key.strip('ex-')) + 1))
print("""INSERT INTO `demo`.`HumanEvaluationsABComparison` (`id`, `worker_id`, `hit`, `accept_datetime`, `value`, `mturk_run_id_id`, `prompt_id`) VALUES (%s, %s, %s, %s, %s, %s, %s)""", (eval_id, worker, hit, dt, vote, mturk_run_id, str(int(key.strip('ex-')) + min_prompt_id)))
c.execute("""INSERT INTO `demo`.`HumanEvaluationsABComparison` (`id`, `worker_id`, `hit`, `accept_datetime`, `value`, `mturk_run_id_id`, `prompt_id`) VALUES (%s, %s, %s, %s, %s, %s, %s)""", (eval_id, worker, hit, dt, vote, mturk_run_id, str(int(key.strip('ex-')) + min_prompt_id)))
eval_id += 1
#import pdb; pdb.set_trace()
mturk_run_id += 1
if __name__ == "__main__":
args = arguments()
(c, db) = connect(passwd=args.password)
(last_mturk_id, last_eval_id) = get_last_ids(c)
min_prompt_id = get_eval_min_prompt(c, args.evalset_id)
if not check_duplicate(c, args.evalset_id, args.model_1_id, args.model_2_id):
upload_evaluation(args.evalset_id, args.model_1_id, args.model_2_id, args.path, last_mturk_id+1, last_eval_id+1, min_prompt_id)
if not args.dryrun:
print("committing to DB")
db.commit()
else:
print("This may be duplicate ... please check")
| 48.241667
| 293
| 0.648989
|
afb298e8268114b83aaa96c02238bc5f0ffa86e7
| 2,757
|
py
|
Python
|
setup.py
|
derpferd/smart_open
|
8f64d5b491b3f2f559059f8e16755e30ad2382fa
|
[
"MIT"
] | null | null | null |
setup.py
|
derpferd/smart_open
|
8f64d5b491b3f2f559059f8e16755e30ad2382fa
|
[
"MIT"
] | null | null | null |
setup.py
|
derpferd/smart_open
|
8f64d5b491b3f2f559059f8e16755e30ad2382fa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Radim Rehurek <me@radimrehurek.com>
#
# This code is distributed under the terms and conditions
# from the MIT License (MIT).
import io
import os
import sys
from setuptools import setup, find_packages
def _get_version():
curr_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(curr_dir, 'smart_open', 'version.py')) as fin:
#
# __version__ = '1.8.4'
#
line = fin.readline().strip()
parts = line.split(' ')
assert parts[0] == '__version__'
assert parts[1] == '='
return parts[2][1:-1]
#
# We cannot do "from smart_open.version import __version__" because that will
# require the dependencies for smart_open to already be in place, and that is
# not necessarily the case when running setup.py for the first time.
#
__version__ = _get_version()
def read(fname):
return io.open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read()
tests_require = [
'mock',
'moto[server]',
'pathlib2',
'responses',
'boto3',
# Not used directly but allows boto GCE plugins to load.
# https://github.com/GoogleCloudPlatform/compute-image-packages/issues/262
'google-compute-engine==2.8.12'
]
install_requires = [
'boto >= 2.32',
'requests',
'boto3',
]
if sys.version_info[0] == 2:
install_requires.append('bz2file')
setup(
name='smart_open',
version=__version__,
description='Utils for streaming large files (S3, HDFS, gzip, bz2...)',
long_description=read('README.rst'),
packages=find_packages(),
package_data={
"smart_open.tests": ["test_data/*gz"],
},
author='Radim Rehurek',
author_email='me@radimrehurek.com',
maintainer='Radim Rehurek',
maintainer_email='me@radimrehurek.com',
url='https://github.com/piskvorky/smart_open',
download_url='http://pypi.python.org/pypi/smart_open',
keywords='file streaming, s3, hdfs',
license='MIT',
platforms='any',
install_requires=install_requires,
tests_require=tests_require,
extras_require={
'test': tests_require,
},
test_suite="smart_open.tests",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: System :: Distributed Computing',
'Topic :: Database :: Front-Ends',
],
)
| 25.766355
| 91
| 0.636924
|
6d1baf57b3d5956c146f40d1392263d2a10a1022
| 1,738
|
py
|
Python
|
setup.py
|
open-data-toronto/ckan-customization-open-data-toronto
|
43c8ddd2161cc5416c46741385468168147bf6ea
|
[
"MIT"
] | 2
|
2019-06-21T15:27:57.000Z
|
2019-08-15T11:47:48.000Z
|
setup.py
|
open-data-toronto/ckan-customization-open-data-toronto
|
43c8ddd2161cc5416c46741385468168147bf6ea
|
[
"MIT"
] | 1
|
2019-08-12T18:37:53.000Z
|
2019-08-12T18:37:53.000Z
|
setup.py
|
open-data-toronto/ckan-customization-open-data-toronto
|
43c8ddd2161cc5416c46741385468168147bf6ea
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="""ckanext-opendatatoronto""",
version="2.2.0",
description="""
This extension contains plugins that modifiy and extend default CKAN features for the City of Toronto Open Data Portal.
""",
long_description=long_description,
url="https://github.com/open-data-toronto/ckan-customization-open-data-toronto",
author="""Open Data Toronto""",
author_email="""opendata@toronto.ca""",
license="MIT",
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
],
keywords="",
packages=find_packages(exclude=["contrib", "docs", "tests*"]),
namespace_packages=["ckanext"],
install_requires=[],
include_package_data=True,
package_data={},
data_files=[],
entry_points="""
[ckan.plugins]
updateschema=ckanext.opendata.plugin:UpdateSchemaPlugin
extendedurl=ckanext.opendata.plugin:ExtendedURLPlugin
extendedapi=ckanext.opendata.plugin:ExtendedAPIPlugin
[babel.extractors]
ckan = ckan.lib.extract:extract_ckan
""",
message_extractors={
"ckanext": [
("**.py", "python", None),
("**.js", "javascript", None),
("**/templates/**.html", "ckan", None),
],
},
)
| 33.423077
| 127
| 0.644419
|
36e30cd5f41273f46639024876180d00af1ebb5e
| 8,135
|
py
|
Python
|
hexrd/ui/material_site_editor.py
|
bnmajor/hexrdgui
|
d19f7cf4a4469b0d3b6978f2f65c5e8a6bd81785
|
[
"BSD-3-Clause"
] | null | null | null |
hexrd/ui/material_site_editor.py
|
bnmajor/hexrdgui
|
d19f7cf4a4469b0d3b6978f2f65c5e8a6bd81785
|
[
"BSD-3-Clause"
] | null | null | null |
hexrd/ui/material_site_editor.py
|
bnmajor/hexrdgui
|
d19f7cf4a4469b0d3b6978f2f65c5e8a6bd81785
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from PySide2.QtCore import QObject, QSignalBlocker, Signal
from PySide2.QtWidgets import QSizePolicy, QTableWidgetItem
from hexrd.material import Material
from hexrd.ui.periodic_table_dialog import PeriodicTableDialog
from hexrd.ui.scientificspinbox import ScientificDoubleSpinBox
from hexrd.ui.ui_loader import UiLoader
COLUMNS = {
'symbol': 0,
'occupancy': 1,
'thermal_factor': 2
}
DEFAULT_U = Material.DFLT_U[0]
OCCUPATION_MIN = 0
OCCUPATION_MAX = 1000
THERMAL_FACTOR_MIN = -1.e7
THERMAL_FACTOR_MAX = 1.e7
U_TO_B = 8 * np.pi ** 2
B_TO_U = 1 / U_TO_B
class MaterialSiteEditor(QObject):
site_modified = Signal()
def __init__(self, site, parent=None):
super().__init__(parent)
loader = UiLoader()
self.ui = loader.load_file('material_site_editor.ui', parent)
self._site = site
self.occupancy_spinboxes = []
self.thermal_factor_spinboxes = []
self.update_gui()
self.setup_connections()
def setup_connections(self):
self.ui.select_atom_types.pressed.connect(self.select_atom_types)
self.ui.thermal_factor_type.currentIndexChanged.connect(
self.update_thermal_factor_header)
self.ui.thermal_factor_type.currentIndexChanged.connect(
self.update_gui)
for w in self.site_settings_widgets:
w.valueChanged.connect(self.update_config)
self.ui.total_occupancy.valueChanged.connect(
self.update_occupancy_validity)
def select_atom_types(self):
dialog = PeriodicTableDialog(self.atom_types, self.ui)
if not dialog.exec_():
return
self.atom_types = dialog.selected_atoms
@property
def site(self):
return self._site
@site.setter
def site(self, v):
self._site = v
self.update_gui()
@property
def atoms(self):
return self.site['atoms']
@property
def total_occupancy(self):
return self.site['total_occupancy']
@total_occupancy.setter
def total_occupancy(self, v):
self.site['total_occupancy'] = v
@property
def fractional_coords(self):
return self.site['fractional_coords']
@property
def thermal_factor_type(self):
return self.ui.thermal_factor_type.currentText()
def U(self, val):
# Take a thermal factor from a spin box and convert it to U
type = self.thermal_factor_type
if type == 'U':
multiplier = 1
elif type == 'B':
multiplier = B_TO_U
else:
raise Exception(f'Unknown type: {type}')
return val * multiplier
def B(self, val):
# Take a thermal factor from a spin box and convert it to B
type = self.thermal_factor_type
if type == 'U':
multiplier = U_TO_B
elif type == 'B':
multiplier = 1
else:
raise Exception(f'Unknown type: {type}')
return val * multiplier
def thermal_factor(self, atom):
# Given an atom, return the thermal factor in either B or U
type = self.thermal_factor_type
if type == 'U':
multiplier = 1
elif type == 'B':
multiplier = U_TO_B
else:
raise Exception(f'Unknown type: {type}')
return atom['U'] * multiplier
@property
def atom_types(self):
return [x['symbol'] for x in self.site['atoms']]
@atom_types.setter
def atom_types(self, v):
if v == self.atom_types:
# No changes needed...
return
# Reset all the occupancies
atoms = self.atoms
atoms.clear()
atoms += [{'symbol': x, 'U': DEFAULT_U} for x in v]
self.reset_occupancies()
self.update_table()
self.emit_site_modified_if_valid()
def create_symbol_label(self, v):
w = QTableWidgetItem(v)
return w
def create_occupancy_spinbox(self, v):
sb = ScientificDoubleSpinBox(self.ui.table)
sb.setKeyboardTracking(False)
sb.setMinimum(OCCUPATION_MIN)
sb.setMaximum(OCCUPATION_MAX)
sb.setValue(v)
sb.valueChanged.connect(self.update_config)
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sb.setSizePolicy(size_policy)
self.occupancy_spinboxes.append(sb)
return sb
def create_thermal_factor_spinbox(self, v):
sb = ScientificDoubleSpinBox(self.ui.table)
sb.setKeyboardTracking(False)
sb.setMinimum(THERMAL_FACTOR_MIN)
sb.setMaximum(THERMAL_FACTOR_MAX)
sb.setValue(v)
sb.valueChanged.connect(self.update_config)
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sb.setSizePolicy(size_policy)
self.thermal_factor_spinboxes.append(sb)
return sb
def clear_table(self):
self.occupancy_spinboxes.clear()
self.thermal_factor_spinboxes.clear()
self.ui.table.clearContents()
def update_gui(self):
widgets = self.site_settings_widgets
blockers = [QSignalBlocker(w) for w in widgets] # noqa: F841
self.ui.total_occupancy.setValue(self.total_occupancy)
for i, w in enumerate(self.fractional_coords_widgets):
w.setValue(self.fractional_coords[i])
self.update_table()
def update_table(self):
blocker = QSignalBlocker(self.ui.table) # noqa: F841
atoms = self.site['atoms']
self.clear_table()
self.ui.table.setRowCount(len(atoms))
for i, atom in enumerate(atoms):
w = self.create_symbol_label(atom['symbol'])
self.ui.table.setItem(i, COLUMNS['symbol'], w)
w = self.create_occupancy_spinbox(atom['occupancy'])
self.ui.table.setCellWidget(i, COLUMNS['occupancy'], w)
w = self.create_thermal_factor_spinbox(self.thermal_factor(atom))
self.ui.table.setCellWidget(i, COLUMNS['thermal_factor'], w)
self.update_occupancy_validity()
def update_thermal_factor_header(self):
w = self.ui.table.horizontalHeaderItem(COLUMNS['thermal_factor'])
w.setText(self.thermal_factor_type)
def update_config(self):
self.total_occupancy = self.ui.total_occupancy.value()
for i, w in enumerate(self.fractional_coords_widgets):
self.fractional_coords[i] = w.value()
for atom, spinbox in zip(self.atoms, self.occupancy_spinboxes):
atom['occupancy'] = spinbox.value()
for atom, spinbox in zip(self.atoms, self.thermal_factor_spinboxes):
atom['U'] = self.U(spinbox.value())
self.update_occupancy_validity()
self.emit_site_modified_if_valid()
def reset_occupancies(self):
total = self.total_occupancy
atoms = self.atoms
num_atoms = len(atoms)
for atom in atoms:
atom['occupancy'] = total / num_atoms
self.update_occupancy_validity()
@property
def site_valid(self):
return self.occupancies_valid
@property
def occupancies_valid(self):
total_occupancy = sum(x['occupancy'] for x in self.atoms)
tol = 1.e-6
return abs(total_occupancy - self.site['total_occupancy']) < tol
def update_occupancy_validity(self):
valid = self.occupancies_valid
color = 'white' if valid else 'red'
msg = '' if valid else 'Sum of occupancies must equal total occupancy'
for w in self.occupancy_spinboxes + [self.ui.total_occupancy]:
w.setStyleSheet(f'background-color: {color}')
w.setToolTip(msg)
def emit_site_modified_if_valid(self):
if not self.site_valid:
return
self.site_modified.emit()
@property
def fractional_coords_widgets(self):
return [
self.ui.coords_x,
self.ui.coords_y,
self.ui.coords_z
]
@property
def site_settings_widgets(self):
return [
self.ui.total_occupancy
] + self.fractional_coords_widgets
| 28.745583
| 79
| 0.638844
|
2b55fcc3fa90f37554f936ff9165628cbc778e3a
| 3,441
|
py
|
Python
|
rich/highlighter.py
|
furunkel/rich
|
5cf16ae44efdc6d0db9797f00a2ff8aa744a7550
|
[
"MIT"
] | null | null | null |
rich/highlighter.py
|
furunkel/rich
|
5cf16ae44efdc6d0db9797f00a2ff8aa744a7550
|
[
"MIT"
] | null | null | null |
rich/highlighter.py
|
furunkel/rich
|
5cf16ae44efdc6d0db9797f00a2ff8aa744a7550
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from typing import List, Union
from .text import Text
class Highlighter(ABC):
"""Abstract base class for highlighters."""
def __call__(self, text: Union[str, Text]) -> Text:
"""Highlight a str or Text instance.
Args:
text (Union[str, ~Text]): Text to highlight.
Raises:
TypeError: If not called with text or str.
Returns:
Text: A test instance with highlighting applied.
"""
if isinstance(text, str):
highlight_text = Text(text)
elif isinstance(text, Text):
highlight_text = text.copy()
else:
raise TypeError(f"str or Text instance required, not {text!r}")
self.highlight(highlight_text)
return highlight_text
@abstractmethod
def highlight(self, text: Text) -> None:
"""Apply highlighting in place to text.
Args:
text (~Text): A text object highlight.
"""
class NullHighlighter(Highlighter):
"""A highlighter object that doesn't highlight.
May be used to disable highlighting entirely.
"""
def highlight(self, text: Text) -> None:
"""Nothing to do"""
class RegexHighlighter(Highlighter):
"""Applies highlighting from a list of regular expressions."""
highlights: List[str] = []
base_style: str = ""
def highlight(self, text: Text) -> None:
"""Highlight :class:`rich.text.Text` using regular expressions.
Args:
text (~Text): Text to highlighted.
"""
highlight_regex = text.highlight_regex
for re_highlight in self.highlights:
highlight_regex(re_highlight, style_prefix=self.base_style)
class ReprHighlighter(RegexHighlighter):
"""Highlights the text typically produced from ``__repr__`` methods."""
base_style = "repr."
highlights = [
r"(?P<brace>[\{\[\(\)\]\}])",
r"(?P<tag_start>\<)(?P<tag_name>\w*)(?P<tag_contents>.*?)(?P<tag_end>\>)",
r"(?P<attrib_name>\w+?)=(?P<attrib_value>\"?\S+\"?)",
r"(?P<bool_true>True)|(?P<bool_false>False)|(?P<none>None)",
r"(?P<number>(?<!\w)\-?[0-9]+\.?[0-9]*\b)",
r"(?P<number>0x[0-9a-f]*)",
r"(?P<path>\B(\/[\w\.\-\_\+]+)*\/)(?P<filename>[\w\.\-\_\+]*)?",
r"(?<!\\)(?P<str>b?\'\'\'.*?(?<!\\)\'\'\'|b?\'.*?(?<!\\)\'|b?\"\"\".*?(?<!\\)\"\"\"|b?\".*?(?<!\\)\")",
r"(?P<url>https?:\/\/[0-9a-zA-Z\$\-\_\+\!`\(\)\,\.\?\/\;\:\&\=\%]*)",
r"(?P<uuid>[a-fA-F0-9]{8}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{4}\-[a-fA-F0-9]{12})",
]
if __name__ == "__main__": # pragma: no cover
from .console import Console
console = Console()
console.print("[bold green]hello world![/bold green]")
console.print("'[bold green]hello world![/bold green]'")
console.print(" /foo")
console.print("/foo/")
console.print("/foo/bar")
console.print("foo/bar/baz")
console.print("/foo/bar/baz?foo=bar+egg&egg=baz")
console.print("/foo/bar/baz/")
console.print("/foo/bar/baz/egg")
console.print("/foo/bar/baz/egg.py")
console.print("/foo/bar/baz/egg.py word")
console.print(" /foo/bar/baz/egg.py word")
console.print("foo /foo/bar/baz/egg.py word")
console.print("foo /foo/bar/ba._++z/egg+.py word")
console.print("https://example.org?foo=bar")
| 31.861111
| 111
| 0.553618
|
1ef436ba4409779ba22f64805880a5a71796ad0f
| 3,032
|
py
|
Python
|
sheet06/exercise06_2.py
|
chlewe/spatiotemporal-modelling
|
e99450f66825eed346dd2efe6590138ac56eae59
|
[
"Unlicense"
] | null | null | null |
sheet06/exercise06_2.py
|
chlewe/spatiotemporal-modelling
|
e99450f66825eed346dd2efe6590138ac56eae59
|
[
"Unlicense"
] | null | null | null |
sheet06/exercise06_2.py
|
chlewe/spatiotemporal-modelling
|
e99450f66825eed346dd2efe6590138ac56eae59
|
[
"Unlicense"
] | null | null | null |
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.curdir)))
import sim
from evaluation import plot_nxm
from kernel import kernel_e_2d_gaussian
from lists import CellList2D, VerletList
from typing import Tuple
from numpy import ndarray
from sim_impl import simulate_2d, pse_predict_u_2d
def delta(a: float, x: float):
return 1 / (a * math.sqrt(math.pi)) * math.exp(-(x / a) ** 2)
def u0(x: float, y: float):
a = 1 / 16
x_ = x - 1 / 4
y_ = y - 1 / 2
return delta(a, x_) * delta(a, y_)
def initial_particles() -> Tuple[ndarray, VerletList]:
_particles = np.zeros((sim.particle_number_per_dim ** 2, 3))
for i in range(0, sim.particle_number_per_dim):
for j in range(0, sim.particle_number_per_dim):
x = i * sim.h
y = j * sim.h
mass = u0(x, y) * sim.volume_p
_particles[i * sim.particle_number_per_dim + j][:] = x, y, mass
_cells = CellList2D(_particles[:, 0:2])
_verlet = VerletList(_particles[:, 0:2], _cells)
return _particles, _verlet
def apply_diffusion(_particles: ndarray, _verlet: VerletList) -> ndarray:
updated_particles = np.zeros((sim.particle_number_per_dim ** 2, 3))
for i in range(0, sim.particle_number_per_dim ** 2):
p = _particles[i]
summed_mass_interaction = 0
for j in _verlet[i]:
q = _particles[j]
kernel_value = kernel_e_2d_gaussian(p, q)
mass_difference = q[2] - p[2]
summed_mass_interaction += mass_difference * kernel_value
d_mass = sim.volume_p * sim.D / (sim.epsilon ** 2) * summed_mass_interaction
updated_mass = p[2] + d_mass * sim.dt
updated_particles[i][:] = p[0], p[1], updated_mass
return updated_particles
if __name__ == "__main__":
sim.D = 2
sim.domain_lower_bound = 0
sim.domain_upper_bound = 1
sim.particle_number_per_dim = 26
sim.h = (sim.domain_upper_bound - sim.domain_lower_bound) / (sim.particle_number_per_dim - 1)
sim.epsilon = sim.h
sim.volume_p = sim.h ** 2
sim.cutoff = 3 * sim.epsilon
sim.cell_side = sim.cutoff
sim.t_max = 0.3
sim.dt = sim.h ** 2 / (3 * sim.D)
particles, verlet = initial_particles()
particle_evolution = simulate_2d(particles, verlet, 4, apply_diffusion)
#######################################
# 4-in-1 plot
#######################################
xyu_coords = []
t_coords = []
for t in range(0, 4):
x_coords, y_coords, concentration = pse_predict_u_2d(particle_evolution[t][1], 0)
xyu_coords.append((x_coords, y_coords, concentration))
t_coords.append(round(particle_evolution[t][0], 2))
fig = plot_nxm(xyu_coords, 2, 2,
zlabels=("u", "u", "u", "u"),
titles=("t={}".format(t_coords[0]), "t={}".format(t_coords[1]),
"t={}".format(t_coords[2]), "t={}".format(t_coords[3])))
plt.show()
| 31.257732
| 97
| 0.610158
|
1eb7b7f32752257ce0976816f4a5e0ab9b290d7d
| 325
|
py
|
Python
|
networking_p4/db/migration/alembic_migrations/versions/pike/contract/dfd1a1f22c4180_initial.py
|
osinstom/networking-p4
|
3b82025090b6b2bf1e9faa58492d13ed1da6c34f
|
[
"Apache-2.0"
] | 2
|
2019-12-26T08:53:25.000Z
|
2020-05-13T11:53:51.000Z
|
networking_p4/db/migration/alembic_migrations/versions/pike/contract/dfd1a1f22c4180_initial.py
|
osinstom/networking-dppx
|
3b82025090b6b2bf1e9faa58492d13ed1da6c34f
|
[
"Apache-2.0"
] | null | null | null |
networking_p4/db/migration/alembic_migrations/versions/pike/contract/dfd1a1f22c4180_initial.py
|
osinstom/networking-dppx
|
3b82025090b6b2bf1e9faa58492d13ed1da6c34f
|
[
"Apache-2.0"
] | null | null | null |
"""start networking-p4 contract branch
Revision ID: dfd1a1f22c4180
Create Date: 2018-03-13 12:34:56.789098
"""
from neutron.db.migration import cli
# revision identifiers, used by Alembic.
revision = 'dfd1a1f22c4180'
down_revision = 'start_networking_p4'
branch_labels = (cli.CONTRACT_BRANCH,)
def upgrade():
pass
| 17.105263
| 40
| 0.76
|
943076f8a67e8c67acf2c71aaaa1ebe68de98b01
| 2,949
|
py
|
Python
|
src/app/api/material.py
|
SLeRest/3DSliceServer
|
3f93b240ef2b55200a7bf725bb983c8146b1cc9a
|
[
"MIT"
] | 1
|
2021-05-21T10:27:08.000Z
|
2021-05-21T10:27:08.000Z
|
src/app/api/material.py
|
SLeRest/3DSliceServer
|
3f93b240ef2b55200a7bf725bb983c8146b1cc9a
|
[
"MIT"
] | null | null | null |
src/app/api/material.py
|
SLeRest/3DSliceServer
|
3f93b240ef2b55200a7bf725bb983c8146b1cc9a
|
[
"MIT"
] | null | null | null |
from fastapi import (
APIRouter,
Depends,
Query
)
from fastapi_jwt_auth import AuthJWT
from typing import List, Optional
from sqlalchemy.orm import Session
from schema.material import MaterialOut, MaterialIn, MaterialPatch
from dependencies.database import get_db
import crud.material as crud_material
from fastapi import HTTPException
router = APIRouter()
@router.get("", response_model=List[MaterialOut], name="users:list-users")
async def list_materials(
supplier: Optional[str] = Query(
None,
title="Supplier",
description="Query string for filter material by supplier",
),
name: Optional[str] = Query(
None,
title="Name",
description="Query string for filter material by name",
),
general_type: Optional[str] = Query(
None,
title="General type",
description="Query string for filter material by general type",
),
specific_type: Optional[str] = Query(
None,
title="Specific type",
description="Query string for filter material by specific type",
),
Authorize: AuthJWT = Depends(),
db: Session = Depends(get_db)) -> List[MaterialOut]:
Authorize.jwt_required()
materials = crud_material.list_materials(
db, supplier, name, general_type, specific_type)
if materials is None:
return []
for i, m in enumerate(materials):
materials[i] = materials[i].ToMaterialOut()
return materials
@router.get("/{id_material}", response_model=MaterialOut, name="materials:get-material")
async def get_material(
id_material: int,
Authorize: AuthJWT = Depends(),
db: Session = Depends(get_db)) -> MaterialOut:
Authorize.jwt_required()
m = crud_material.get_material(id_material, db)
return m.ToMaterialOut()
@router.post("", response_model=MaterialOut, name="materials:create-material")
async def create_material(
material: MaterialIn,
Authorize: AuthJWT = Depends(),
db: Session = Depends(get_db)) -> MaterialOut:
Authorize.jwt_required()
m = crud_material.create_material(material, db)
return m.ToMaterialOut()
@router.patch("/{id_material}", response_model=MaterialOut, name="materials:modify-material")
async def patch_material(
id_material: int,
material: MaterialPatch,
Authorize: AuthJWT = Depends(),
db: Session = Depends(get_db)) -> MaterialOut:
Authorize.jwt_required()
m = crud_material.patch_material(db, material, id_material)
return m.ToMaterialOut()
@router.delete("/{id_material}", status_code=204, name="materials:delete-material")
async def delete_material(
id_material: int,
Authorize: AuthJWT = Depends(),
db: Session = Depends(get_db)):
Authorize.jwt_required()
m = crud_material.delete_material(db, id_material)
| 35.107143
| 93
| 0.666667
|
64adb0d619f45834b3a3f764d0ea503fca008950
| 643
|
py
|
Python
|
tests/test_init.py
|
vbhavsar/scanctl
|
7fdf8629040d9b0e07f1698ba71ef32c00f620d1
|
[
"Apache-2.0"
] | 3
|
2018-06-15T17:36:18.000Z
|
2019-04-02T04:23:47.000Z
|
tests/test_init.py
|
vbhavsar/scanctl
|
7fdf8629040d9b0e07f1698ba71ef32c00f620d1
|
[
"Apache-2.0"
] | 50
|
2018-09-20T13:30:16.000Z
|
2021-03-25T21:56:28.000Z
|
tests/test_init.py
|
vbhavsar/scanctl
|
7fdf8629040d9b0e07f1698ba71ef32c00f620d1
|
[
"Apache-2.0"
] | 3
|
2018-11-26T23:56:11.000Z
|
2022-03-19T10:04:19.000Z
|
# Copyright 2018 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def test_it_works():
assert True
| 35.722222
| 77
| 0.721617
|
7f5b8d60d97bab397046ce2f36ee6f2c54f7e6d2
| 487
|
py
|
Python
|
Curso Python/Aula07/Exercicios/Exercicios07.py
|
ElHa07/Python
|
d8014948a6472daa3dd0c9be5e536fc79742f02e
|
[
"MIT"
] | null | null | null |
Curso Python/Aula07/Exercicios/Exercicios07.py
|
ElHa07/Python
|
d8014948a6472daa3dd0c9be5e536fc79742f02e
|
[
"MIT"
] | null | null | null |
Curso Python/Aula07/Exercicios/Exercicios07.py
|
ElHa07/Python
|
d8014948a6472daa3dd0c9be5e536fc79742f02e
|
[
"MIT"
] | null | null | null |
# Exercício Python 07 Números Primos
# Exercício Faça um programa que leia um número inteiro e diga se ele é ou não um número primo.
núm = int(input('Digite um número: '))
tot = 0
for c in range(1, núm + 1):
if núm % c == 0:
print('', end=' ')
else:
print('', end=' ')
print('{}'.format(c), end=' ')
print('O número {} foi divisivel {} vezes'.format(núm, tot))
if tot == 2:
print('É por isso ele é PRIMO!')
else:
print('É por isso ele NÂO É PRIMO!')
| 28.647059
| 95
| 0.593429
|
f41553b7611d9bda1566d94aebd5cfb93d32c23b
| 20,624
|
py
|
Python
|
bddm/sampler/sampler.py
|
tencent-ailab/bddm
|
8c3f807e84f0ebf1a4942a990f369a92cba79c61
|
[
"Apache-2.0"
] | 76
|
2022-03-25T08:28:34.000Z
|
2022-03-31T07:44:25.000Z
|
bddm/sampler/sampler.py
|
shaun95/bddm
|
c78786e6de6b58c7c6ac4f97e22fe08b99a4d88a
|
[
"Apache-2.0"
] | 1
|
2022-03-29T15:49:16.000Z
|
2022-03-29T15:49:16.000Z
|
bddm/sampler/sampler.py
|
shaun95/bddm
|
c78786e6de6b58c7c6ac4f97e22fe08b99a4d88a
|
[
"Apache-2.0"
] | 10
|
2022-03-25T14:26:18.000Z
|
2022-03-30T03:11:10.000Z
|
#!/bin/env python
# -*- coding: utf-8 -*-
########################################################################
#
# BDDM Sampler (Supports Noise Scheduling and Sampling)
#
# Author: Max W. Y. Lam (maxwylam@tencent.com)
# Copyright (c) 2021Tencent. All Rights Reserved
#
########################################################################
from __future__ import absolute_import
import os
import librosa
import torch
import numpy as np
from scipy.io.wavfile import write as wavwrite
from pystoi import stoi
from pypesq import pesq
from bddm.utils.log_utils import log
from bddm.utils.check_utils import check_score_network
from bddm.utils.check_utils import check_schedule_network
from bddm.utils.diffusion_utils import compute_diffusion_params
from bddm.utils.diffusion_utils import map_noise_scale_to_time_step
from bddm.models import get_score_network, get_schedule_network
from bddm.loader.dataset import create_generation_dataloader
from bddm.loader.dataset import create_train_and_valid_dataloader
class Sampler(object):
metric2index = {"PESQ": 0, "STOI": 1}
def __init__(self, config):
"""
Sampler Class, implements the Noise Scheduling and Sampling algorithms in BDDMs
Parameters:
config (namespace): BDDM Configuration
"""
self.config = config
self.exp_dir = config.exp_dir
self.clip = config.grad_clip
self.load = config.load
self.model = get_score_network(config).cuda().eval()
self.schedule = None
# Initialize diffusion parameters using a pre-specified linear schedule
noise_schedule = torch.linspace(config.beta_0, config.beta_T, config.T).cuda()
self.diff_params = compute_diffusion_params(noise_schedule)
if self.config.command != 'train':
# Find schedule net, if not trained then use DDPM or DDIM sampling mode
schedule_net_trained, schedule_net_path = check_schedule_network(config)
if not schedule_net_trained:
_, score_net_path = check_score_network(config)
assert score_net_path is not None, 'Error: No score network can be found!'
self.config.load = score_net_path
else:
self.config.load = schedule_net_path
self.model.schedule_net = get_schedule_network(config).cuda().eval()
# Perform noise scheduling when noise schedule file (.schedule) is not given
if self.config.command == 'generate' and self.config.sampling_noise_schedule != '':
# Generation mode given pre-searched noise schedule
self.schedule = torch.load(self.config.sampling_noise_schedule, map_location='cpu')
self.reset()
def reset(self):
"""
Reset sampling environment
"""
if self.config.command != 'train' and self.load != '':
package = torch.load(self.load, map_location=lambda storage, loc: storage.cuda())
self.model.load_state_dict(package['model_state_dict'])
log('Loaded checkpoint %s' % self.load, self.config)
if self.config.command == 'generate':
# Given Mel-spectrogram directory for speech vocoding
self.gen_loader = create_generation_dataloader(self.config)
else:
# Sample a reference audio sample for noise scheduling
_, self.vl_loader = create_train_and_valid_dataloader(self.config)
self.draw_reference_data_pair()
def draw_reference_data_pair(self):
"""
Draw a new input-output pair for noise scheduling
"""
self.ref_spec, self.ref_audio = next(iter(self.vl_loader))
self.ref_spec, self.ref_audio = self.ref_spec.cuda(), self.ref_audio.cuda()
def generate(self):
"""
Start the generation process
"""
generate_dir = os.path.join(self.exp_dir, 'generated')
os.makedirs(generate_dir, exist_ok=True)
scores = {metric: [] for metric in self.metric2index}
for filepath, mel_spec, audio in self.gen_loader:
mel_spec = mel_spec.cuda()
generated_audio, n_steps = self.sampling(schedule=self.schedule,
condition=mel_spec,
ddim=self.config.use_ddim_steps)
audio_key = '.'.join(filepath[0].split('/')[-1].split('.')[:-1])
if len(audio) > 0:
# Assess the generated audio with the reference audio
self.ref_audio = audio
score = self.assess(generated_audio, audio_key=audio_key)
for metric in self.metric2index:
scores[metric].append(score[self.metric2index[metric]])
model_name = 'BDDM' if self.schedule is not None else (
'DDIM' if self.config.use_ddim_steps else 'DDPM')
generated_file = os.path.join(generate_dir,
'%s_by_%s-%d.wav'%(audio_key, model_name, n_steps))
wavwrite(generated_file, self.config.sampling_rate,
generated_audio.squeeze().cpu().numpy())
log('Generated '+generated_file, self.config)
log('Avg Scores: PESQ = %.3f +/- %.3f, %.5f +/- %.5f'%(
np.mean(scores['PESQ']), 1.96 * np.std(scores['PESQ']),
np.mean(scores['STOI']), 1.96 * np.std(scores['STOI'])), self.config)
def noise_scheduling(self, ddim=False):
"""
Start the noise scheduling process
Parameters:
ddim (bool): whether to use the DDIM's p_theta for noise scheduling or not
Returns:
ts_infer (tensor): the step indices estimated by BDDM
a_infer (tensor): the alphas estimated by BDDM
b_infer (tensor): the betas estimated by BDDM
s_infer (tensor): the std. deviations estimated by BDDM
"""
max_steps = self.diff_params["N"]
alpha = self.diff_params["alpha"]
alpha_param = self.diff_params["alpha_param"]
beta_param = self.diff_params["beta_param"]
min_beta = self.diff_params["beta"].min()
betas = []
x = torch.normal(0, 1, size=self.ref_audio.shape).cuda()
with torch.no_grad():
b_cur = torch.ones(1, 1, 1).cuda() * beta_param
a_cur = torch.ones(1, 1, 1).cuda() * alpha_param
for n in range(max_steps - 1, -1, -1):
step = map_noise_scale_to_time_step(a_cur.squeeze().item(), alpha)
if step >= 0:
betas.append(b_cur.squeeze().item())
else:
break
ts = (step * torch.ones((1, 1))).cuda()
e = self.model((x, self.ref_spec.clone(), ts,))
a_nxt = a_cur / (1 - b_cur).sqrt()
if ddim:
c1 = a_nxt / a_cur
c2 = -(1 - a_cur**2.).sqrt() * c1
x = c1 * x + c2 * e
c3 = (1 - a_nxt**2.).sqrt()
x = x + c3 * e
else:
x = x - b_cur / torch.sqrt(1 - a_cur**2.) * e
x = x / torch.sqrt(1 - b_cur)
if n > 0:
z = torch.normal(0, 1, size=self.ref_audio.shape).cuda()
x = x + torch.sqrt((1 - a_nxt**2.) / (1 - a_cur**2.) * b_cur) * z
a_nxt, beta_nxt = a_cur, b_cur
a_cur = a_nxt / (1 - beta_nxt).sqrt()
if a_cur > 1:
break
b_cur = self.model.schedule_net(x.squeeze(1),
(beta_nxt.view(-1, 1), (1 - a_cur**2.).view(-1, 1)))
if b_cur.squeeze().item() < min_beta:
break
b_infer = torch.FloatTensor(betas[::-1]).cuda()
a_infer = 1 - b_infer
s_infer = b_infer + 0
for n in range(1, len(b_infer)):
a_infer[n] *= a_infer[n-1]
s_infer[n] *= (1 - a_infer[n-1]) / (1 - a_infer[n])
a_infer = torch.sqrt(a_infer)
s_infer = torch.sqrt(s_infer)
# Mapping noise scales to time steps
ts_infer = []
for n in range(len(b_infer)):
step = map_noise_scale_to_time_step(a_infer[n], alpha)
if step >= 0:
ts_infer.append(step)
ts_infer = torch.FloatTensor(ts_infer)
return ts_infer, a_infer, b_infer, s_infer
def sampling(self, schedule=None, condition=None,
ddim=0, return_sequence=False, audio_size=None):
"""
Perform the sampling algorithm
Parameters:
schedule (list): the [ts_infer, a_infer, b_infer, s_infer]
returned by the noise scheduling algorithm
condition (tensor): the condition for computing scores
ddim (bool): whether to use the DDIM for sampling or not
return_sequence (bool): whether returning all steps' samples or not
audio_size (list): the shape of the audio to be sampled
Returns:
xs (list): (if return_sequence) the list of generated audios
x (tensor): the generated audio(s) in shape=audio_size
N (int): the number of sampling steps
"""
n_steps = self.diff_params["T"]
if condition is None:
condition = self.ref_spec
if audio_size is None:
audio_length = condition.size(-1) * self.config.hop_len
audio_size = (1, 1, audio_length)
if schedule is None:
if ddim > 1:
# Use DDIM (linear) for sampling ({ddim} steps)
ts_infer = torch.linspace(0, n_steps - 1, ddim).long()
a_infer = self.diff_params["alpha"].index_select(0, ts_infer.cuda())
b_infer = self.diff_params["beta"].index_select(0, ts_infer.cuda())
s_infer = self.diff_params["sigma"].index_select(0, ts_infer.cuda())
else:
# Use DDPM for sampling (complete T steps)
# P.S. if ddim = 1, run DDIM reverse process for T steps
ts_infer = torch.linspace(0, n_steps - 1, n_steps)
a_infer = self.diff_params["alpha"]
b_infer = self.diff_params["beta"]
s_infer = self.diff_params["sigma"]
else:
ts_infer, a_infer, b_infer, s_infer = schedule
sampling_steps = len(ts_infer)
x = torch.normal(0, 1, size=audio_size).cuda()
if return_sequence:
xs = []
with torch.no_grad():
for n in range(sampling_steps - 1, -1, -1):
if sampling_steps > 50 and (sampling_steps - n) % 50 == 0:
# Log progress per 50 steps when sampling_steps is large
log('\tComputed %d / %d steps !'%(
sampling_steps - n, sampling_steps), self.config)
ts = (ts_infer[n] * torch.ones((1, 1))).cuda()
e = self.model((x, condition, ts,))
if ddim:
if n > 0:
a_nxt = a_infer[n - 1]
else:
a_nxt = a_infer[n] / (1 - b_infer[n]).sqrt()
c1 = a_nxt / a_infer[n]
c2 = -(1 - a_infer[n]**2.).sqrt() * c1
c3 = (1 - a_nxt**2.).sqrt()
x = c1 * x + (c2 + c3) * e
else:
x = x - b_infer[n] / torch.sqrt(1 - a_infer[n]**2.) * e
x = x / torch.sqrt(1 - b_infer[n])
if n > 0:
z = torch.normal(0, 1, size=audio_size).cuda()
x = x + s_infer[n] * z
if return_sequence:
xs.append(x)
if return_sequence:
return xs
return x, sampling_steps
def noise_scheduling_with_params(self, alpha_param, beta_param):
"""
Run noise scheduling for once given the (alpha_param, beta_param) pair
Parameters:
alpha_param (float): a hyperparameter defining the alpha_hat value at step N
beta_param (float): a hyperparameter defining the beta_hat value at step N
"""
log('TRY alpha_param=%.2f, beta_param=%.2f:'%(
alpha_param, beta_param), self.config)
# Define the pair key
key = '%.2f,%.2f' % (alpha_param, beta_param)
# Set alpha_param and beta_param in self.diff_params
self.diff_params['alpha_param'] = alpha_param
self.diff_params['beta_param'] = beta_param
# Use DDPM reverse process for noise scheduling
ddpm_schedule = self.noise_scheduling(ddim=False)
log("\tSearched a %d-step schedule using DDPM reverse process" % (
len(ddpm_schedule[0])), self.config)
generated_audio, _ = self.sampling(schedule=ddpm_schedule)
# Compute objective scores
ddpm_score = self.assess(generated_audio)
# Get the number of sampling steps with this schedule
steps = len(ddpm_schedule[0])
# Compare the performance with previous same-step schedule using the metric
if steps not in self.steps2score:
# Save the first schedule with this number of steps
self.steps2score[steps] = [key, ] + ddpm_score
self.steps2schedule[steps] = ddpm_schedule
log('\tFound the first %d-step schedule: (PESQ, STOI) = (%.2f, %.3f)'%(
steps, ddpm_score[0], ddpm_score[1]), self.config)
elif ddpm_score[0] > self.steps2score[steps][1] and ddpm_score[1] > self.steps2score[steps][2]:
# Found a better same-step schedule achieving a higher score
log('\tFound a better %d-step schedule: (PESQ, STOI) = (%.2f, %.3f) -> (%.2f, %.3f)'%(
steps, self.steps2score[steps][1], self.steps2score[steps][2],
ddpm_score[0], ddpm_score[1]), self.config)
self.steps2score[steps] = [key, ] + ddpm_score
self.steps2schedule[steps] = ddpm_schedule
# Use DDIM reverse process for noise scheduling
ddim_schedule = self.noise_scheduling(ddim=True)
log("\tSearched a %d-step schedule using DDIM reverse process" % (
len(ddim_schedule[0])), self.config)
generated_audio, _ = self.sampling(schedule=ddim_schedule)
# Compute objective scores
ddim_score = self.assess(generated_audio)
# Get the number of sampling steps with this schedule
steps = len(ddim_schedule[0])
# Compare the performance with previous same-step schedule using the metric
if steps not in self.steps2score:
# Save the first schedule with this number of steps
self.steps2score[steps] = [key, ] + ddim_score
self.steps2schedule[steps] = ddim_schedule
log('\tFound the first %d-step schedule: (PESQ, STOI) = (%.2f, %.3f)'%(
steps, ddim_score[0], ddim_score[1]), self.config)
elif ddim_score[0] > self.steps2score[steps][1] and ddim_score[1] > self.steps2score[steps][2]:
# Found a better same-step schedule achieving a higher score
log('\tFound a better %d-step schedule: (PESQ, STOI) = (%.2f, %.3f) -> (%.2f, %.3f)'%(
steps, self.steps2score[steps][1], self.steps2score[steps][2],
ddim_score[0], ddim_score[1]), self.config)
self.steps2score[steps] = [key, ] + ddim_score
self.steps2schedule[steps] = ddim_schedule
def noise_scheduling_without_params(self):
"""
Search for the best noise scheduling hyperparameters: (alpha_param, beta_param)
"""
# Noise scheduling mode, given N
self.reverse_process = 'BDDM'
assert 'N' in vars(self.config).keys(), 'Error: N is undefined for BDDM!'
self.diff_params["N"] = self.config.N
# Init search result dictionaries
self.steps2schedule, self.steps2score = {}, {}
search_bins = int(self.config.bddm_search_bins)
# Define search range of alpha_param
alpha_last = self.diff_params["alpha"][-1].squeeze().item()
alpha_first = self.diff_params["alpha"][0].squeeze().item()
alpha_diff = (alpha_first - alpha_last) / (search_bins + 1)
alpha_param_list = [alpha_last + alpha_diff * (i + 1) for i in range(search_bins)]
# Define search range of beta_param
beta_diff = 1. / (search_bins + 1)
beta_param_list = [beta_diff * (i + 1) for i in range(search_bins)]
# Search for beta_param and alpha_param, take O(search_bins^2)
for beta_param in beta_param_list:
for alpha_param in alpha_param_list:
if alpha_param > (1 - beta_param) ** 0.5:
# Invalid range
continue
# Update the scores and noise schedules with (alpha_param, beta_param)
self.noise_scheduling_with_params(alpha_param, beta_param)
# Lastly, repeat the random starting point (x_hat_N) and choose the best schedule
noise_schedule_dir = os.path.join(self.exp_dir, 'noise_schedules')
os.makedirs(noise_schedule_dir, exist_ok=True)
steps_list = list(self.steps2score.keys())
for steps in steps_list:
log("-"*80, self.config)
log("Select the best out of %d x_hat_N ~ N(0,I) for %d steps:"%(
self.config.noise_scheduling_attempts, steps), self.config)
# Get current best pair
key = self.steps2score[steps][0]
# Get back the best (alpha_param, beta_param) pair for a given steps
alpha_param, beta_param = list(map(float, key.split(',')))
# Repeat K times for a given number of steps
for _ in range(self.config.noise_scheduling_attempts):
# Random +/- 5%
_alpha_param = alpha_param * (0.95 + np.random.rand() * 0.1)
_beta_param = beta_param * (0.95 + np.random.rand() * 0.1)
# Update the scores and noise schedules with (alpha_param, beta_param)
self.noise_scheduling_with_params(_alpha_param, _beta_param)
# Save the best searched noise schedule ({N}steps_{key}_{metric}{best_score}.ns)
for steps in sorted(self.steps2score.keys(), reverse=True):
filepath = os.path.join(noise_schedule_dir, '%dsteps_PESQ%.2f_STOI%.3f.ns'%(
steps, self.steps2score[steps][1], self.steps2score[steps][2]))
torch.save(self.steps2schedule[steps], filepath)
log("Saved searched schedule: %s" % filepath, self.config)
def assess(self, generated_audio, audio_key=None):
"""
Assess the generated audio using objective metrics: PESQ and STOI.
P.S. Users should first install pypesq and pystoi using pip
Parameters:
generated_audio (tensor): the generated audio to be assessed
audio_key (str): the key of the respective audio
Returns:
pesq_score (float): the PESQ score (the higher the better)
stoi_score (float): the STOI score (the higher the better)
"""
est_audio = generated_audio.squeeze().cpu().numpy()
ref_audio = self.ref_audio.squeeze().cpu().numpy()
if est_audio.shape[-1] > ref_audio.shape[-1]:
est_audio = est_audio[..., :ref_audio.shape[-1]]
else:
ref_audio = ref_audio[..., :est_audio.shape[-1]]
# Compute STOI using PySTOI
# PySTOI (https://github.com/mpariente/pystoi)
stoi_score = stoi(ref_audio, est_audio, self.config.sampling_rate, extended=False)
# Resample audio to 16K Hz to compute PESQ using PyPESQ (supports only 8K / 16K)
# PyPESQ (https://github.com/vBaiCai/python-pesq)
if self.config.sampling_rate != 16000:
est_audio_16k = librosa.resample(est_audio, self.config.sampling_rate, 16000)
ref_audio_16k = librosa.resample(ref_audio, self.config.sampling_rate, 16000)
pesq_score = pesq(ref_audio_16k, est_audio_16k, 16000)
else:
pesq_score = pesq(ref_audio, est_audio, 16000)
# Log scores
log('\t%sScores: PESQ = %.3f, STOI = %.5f'%(
'' if audio_key is None else audio_key+' ', pesq_score, stoi_score), self.config)
# Return scores: the higher the better
return [pesq_score, stoi_score]
| 48.756501
| 103
| 0.580489
|
00ac000ff2b3d56862b38c223561d67461b88317
| 155
|
py
|
Python
|
bag/apps.py
|
KimLHill/MSP4
|
2321076c24f3d49caa409b03109fb67330ed63fe
|
[
"blessing"
] | null | null | null |
bag/apps.py
|
KimLHill/MSP4
|
2321076c24f3d49caa409b03109fb67330ed63fe
|
[
"blessing"
] | null | null | null |
bag/apps.py
|
KimLHill/MSP4
|
2321076c24f3d49caa409b03109fb67330ed63fe
|
[
"blessing"
] | 1
|
2021-09-30T10:51:36.000Z
|
2021-09-30T10:51:36.000Z
|
from django.apps import AppConfig
# Config bag app
class BagConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'bag'
| 19.375
| 56
| 0.741935
|
ccff76ba748c9f1cd041066ce75e8ae31c23c772
| 131
|
py
|
Python
|
9_package_management/code/egg-example/setup.py
|
lluxury/P_U_S_A
|
1eb9d1fef74f9ce3618ae950f5223f598510be84
|
[
"MIT"
] | null | null | null |
9_package_management/code/egg-example/setup.py
|
lluxury/P_U_S_A
|
1eb9d1fef74f9ce3618ae950f5223f598510be84
|
[
"MIT"
] | null | null | null |
9_package_management/code/egg-example/setup.py
|
lluxury/P_U_S_A
|
1eb9d1fef74f9ce3618ae950f5223f598510be84
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name = "HelloWorld",
version = "0.1",
packages = find_packages(),
)
| 18.714286
| 43
| 0.664122
|
c55c11159a00e9599c89a170431c5ced9752791b
| 39,541
|
py
|
Python
|
scluster/aws_backend.py
|
dorgun/ncluster
|
20ba95fb7250a5f7239d704b01bf468a57e8fb7b
|
[
"MIT"
] | null | null | null |
scluster/aws_backend.py
|
dorgun/ncluster
|
20ba95fb7250a5f7239d704b01bf468a57e8fb7b
|
[
"MIT"
] | null | null | null |
scluster/aws_backend.py
|
dorgun/ncluster
|
20ba95fb7250a5f7239d704b01bf468a57e8fb7b
|
[
"MIT"
] | null | null | null |
"""AWS implementation of backend.py
Not thread-safe
"""
import glob
import os
import pprint
import shlex
import signal
import stat
import threading
import time
import logging
from typing import Tuple, List
import paramiko
from . import scluster_globals
from . import aws_create_resources as create_lib
from . import aws_util as u
from . import backend
from . import util
TMPDIR = '/tmp/scluster' # location for temp files on launching machine
AWS_LOCK_FN = '/tmp/aws.lock' # lock file used to prevent concurrent creation of AWS resources by multiple workers in parallel
SCLUSTER_DEFAULT_REGION = 'us-east-1' # used as last resort if no other method set a region
LOGDIR_ROOT = '/scluster/runs'
# some image which is fast to load, to use for quick runs
GENERIC_SMALL_IMAGE = 'amzn2-ami-hvm-2.0.20180622.1-x86_64-gp2'
class Task(backend.Task):
"""AWS task is initialized with an AWS instance and handles initialization,
creation of SSH session, shutdown"""
last_status: int # status of last command executed
tmux_window_id: int
tmux_available_window_ids: List[int]
sftp: paramiko.SFTPClient
def __init__(self, name, *, instance, install_script='', image_name='',
**extra_kwargs):
"""
Initializes Task on top of existing AWS instance. Blocks until instance is ready to execute
shell commands.
Args:
name: task name
instance: ec2.Instance object (https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#instance)
install_script:
image_name: AWS image name
**extra_kwargs: unused kwargs (kept for compatibility with other backends)
"""
self._cmd_fn = None
self._cmd = None
self._status_fn = None # location of output of last status
self.last_status = -1
self.__logger = logging.getLogger(__name__)
self._can_run = False # indicates that things needed for .run were created
self.initialize_called = False
self.name = name
self.instance = instance
self.install_script = install_script
self.extra_kwargs = extra_kwargs
access_type = os.environ.get('SCLUSTER_INSTANCE_ACCESS_TYPE', "private")
if access_type == 'private':
self.ip = u.get_ip(instance)
elif access_type == 'public':
self.ip = u.get_public_ip(instance)
else:
raise Exception("Wrong SCLUSTER_INSTANCE_ACCESS_TYPE that should be 'private' or 'public'")
self.sftp = None
self._linux_type = 'ubuntu'
# heuristic to tell if I'm using Amazon image name
# default image has name like 'amzn2-ami-hvm-2.0.20180622.1-x86_64-gp2'
if 'amzn' in image_name.lower() or 'amazon' in image_name.lower():
self.__logger.info('Detected Amazon Linux image')
self._linux_type = 'amazon'
self.run_counter = 0
launch_id = util.random_id()
self.local_scratch = f"{TMPDIR}/{name}-{launch_id}"
self.remote_scratch = f"{TMPDIR}/{name}-{launch_id}"
os.system('mkdir -p ' + self.local_scratch)
self._initialized_fn = f'is_initialized'
# _current_directory tracks current directory on task machine
# used for uploading without specifying absolute path on target machine
if self._linux_type == 'ubuntu':
# self._current_directory = '/home/ubuntu'
self.ssh_username = 'ubuntu' # default username on task machine
elif self._linux_type == 'amazon':
# self._current_directory = '/home/ec2-user'
self.ssh_username = 'ec2-user'
self.homedir = '/home/' + self.ssh_username
self.ssh_client = u.ssh_to_task(self, os.environ.get('SCLUSTER_INSTANCE_ACCESS_TYPE', "private"))
self._setup_tmux()
self._run_raw('mkdir -p ' + self.remote_scratch)
self._can_run = True
if self._is_initialized_fn_present():
self.__logger.info("reusing previous initialized state")
else:
self.__logger.info("running install script")
# bin/bash needed to make self-executable or use with UserData
self.install_script = '#!/bin/bash\n' + self.install_script
self.install_script += f'\necho ok > {self._initialized_fn}\n'
self.file_write('install.sh', util.shell_add_echo(self.install_script))
self.run('bash -e install.sh') # fail on errors
assert self._is_initialized_fn_present(), f"Install script didn't write to {self._initialized_fn}"
self._mount_efs()
self.__logger.info("Initialize complete")
self.__logger.info(f"To connect to {self.name} ssh -i {u.get_keypair_fn()} "
f"-o StrictHostKeyChecking=no {self.ssh_username}@{self.ip} \n"
f"tmux a".strip())
def _is_initialized_fn_present(self):
self.__logger.info("Checking for initialization status")
try:
return 'ok' in self.read(self._initialized_fn)
except Exception:
return False
def _setup_tmux(self):
self.__logger.info("Setting up tmux")
self.tmux_session = self.name.replace('.', '=')
self.tmux_window_id = 0
self.tmux_available_window_ids = [0]
tmux_cmd = [f'tmux set-option -g history-limit 50000 \; ',
f'set-option -g mouse on \; ',
f'new-session -s {self.tmux_session} -n 0 -d']
# hack to get around Amazon linux not having tmux
if self._linux_type == 'amazon':
self._run_raw('sudo yum install tmux -y')
del tmux_cmd[1] # Amazon tmux is really old, no mouse option
if not util.is_set("NCLUSTER_NOKILL_TMUX"):
self._run_raw(f'tmux kill-session -t {self.tmux_session}',
ignore_errors=True)
else:
self.__logger.warning("Warning, NCLUSTER_NOKILL_TMUX is on, make sure remote tmux prompt "
"is available or things will hang")
self._run_raw(''.join(tmux_cmd))
self._can_run = True
def _mount_efs(self):
self.__logger.info("Mounting EFS")
region = u.get_region()
efs_id = u.get_efs_dict()[u.get_prefix()]
dns = f"{efs_id}.efs.{region}.amazonaws.com"
self.run('sudo mkdir -p /scluster')
# ignore error on remount (efs already mounted)
self.run(
f"sudo mount -t nfs -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2 {dns}:/ /scluster",
ignore_errors=True)
# sometimes mount command doesn't work, make sure it's really mounted before returning
stdout, stderr = self.run_with_output('df')
while '/scluster' not in stdout:
sleep_sec = 2
self.__logger.info(f"EFS not yet mounted, sleeping {sleep_sec} seconds")
time.sleep(sleep_sec)
self.run(
f"sudo mount -t nfs -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2 {dns}:/ /scluster",
ignore_errors=True)
stdout, stderr = self.run_with_output('df')
self.run('sudo chmod 777 /scluster')
def run(
self,
cmd,
non_blocking=False,
ignore_errors=False,
max_wait_sec=365 * 24 * 3600,
check_interval=0.2
):
# TODO(y): make _run_with_output_on_failure default, and delete this
if util.is_set('SCLUSTER_RUN_WITH_OUTPUT_ON_FAILURE') or True:
# experimental version that captures output and prints it on failure
# redirection things break bash commands, so
# don't redirect on bash commands like source
# TODO(y): remove this, put in this filtering becase I thought it broke
# source activate, but now it seems it doesn't
if not util.is_bash_builtin(cmd) or True:
return self._run_with_output_on_failure(cmd, non_blocking, ignore_errors, max_wait_sec)
else:
self.__logger.info("Found bash built-in, using regular run")
if not self._can_run:
assert False, "Using .run before initialization finished"
if '\n' in cmd:
cmds = cmd.split('\n')
self.__logger.info(
f"Running {len(cmds)} commands at once, returning status of last")
status = -1
for subcmd in cmds:
status = self.run(subcmd)
self.last_status = status
return status
cmd = cmd.strip()
if cmd.startswith('#'): # ignore empty/commented out lines
return -1
self.run_counter += 1
self.__logger.info("tmux> %s", cmd)
self._cmd = cmd
self._cmd_fn = f'{self.remote_scratch}/{self.run_counter}.cmd'
self._status_fn = f'{self.remote_scratch}/{self.run_counter}.status'
cmd = util.shell_strip_comment(cmd)
assert '&' not in cmd, f"cmd {cmd} contains &, that breaks things"
# modify command to dump shell success status into file
self.file_write(self._cmd_fn, cmd + '\n')
modified_cmd = f'{cmd}; echo $? > {self._status_fn}'
modified_cmd = shlex.quote(modified_cmd)
tmux_window = self.tmux_session + ':' + str(self.tmux_window_id)
tmux_cmd = f'tmux send-keys -t {tmux_window} {modified_cmd} Enter'
self._run_raw(tmux_cmd, ignore_errors=ignore_errors)
if non_blocking:
return 0
if not self.wait_for_file(self._status_fn, max_wait_sec=30):
self.__logger.info(f"Retrying waiting for {self._status_fn}")
while not self.exists(self._status_fn):
self.__logger.info(f"Still waiting for {cmd}")
self.wait_for_file(self._status_fn, max_wait_sec=30)
contents = self.read(self._status_fn)
# if empty wait a bit to allow for race condition
if len(contents) == 0:
time.sleep(check_interval)
contents = self.read(self._status_fn)
status = int(contents.strip())
self.last_status = status
if status != 0:
if not ignore_errors:
raise RuntimeError(f"Command {cmd} returned status {status}")
else:
self.__logger.info(f"Warning: command {cmd} returned status {status}")
return status
def join(self, ignore_errors=False):
"""Waits until last executed command completed."""
assert self._status_fn, "Asked to join a task which hasn't had any commands executed on it"
check_interval = 0.2
status_fn = self._status_fn
if not self.wait_for_file(status_fn, max_wait_sec=30):
self.__logger.info(f"Retrying waiting for {status_fn}")
while not self.exists(status_fn):
self.__logger.info(f"Still waiting for {self._cmd}")
self.wait_for_file(status_fn, max_wait_sec=30)
contents = self.read(status_fn)
# if empty wait a bit to allow for race condition
if len(contents) == 0:
time.sleep(check_interval)
contents = self.read(status_fn)
status = int(contents.strip())
self.last_status = status
if status != 0:
extra_msg = '(ignoring error)' if ignore_errors else '(failing)'
if util.is_set('SCLUSTER_RUN_WITH_OUTPUT_ON_FAILURE') or True:
self.__logger.info(f"Start failing output {extra_msg}: \n{'*' * 80}\n\n '{self.read(self._out_fn)}'")
self.__logger.info(f"\n{'*' * 80}\nEnd failing output")
if not ignore_errors:
raise RuntimeError(f"Command {self._cmd} returned status {status}")
else:
self.__logger.warning(f"Warning: command {self._cmd} returned status {status}")
return status
def _run_with_output_on_failure(
self,
cmd,
non_blocking=False,
ignore_errors=False,
max_wait_sec=365 * 24 * 3600,
check_interval=0.2
) -> str:
"""
Experimental version of run propagates error messages to client. This command will be default "run" eventually
:param cmd:
:param non_blocking:
:param ignore_errors:
:param max_wait_sec:
:param check_interval:
:return:
"""
if not self._can_run:
assert False, "Using .run before initialization finished"
if '\n' in cmd:
assert False, "Don't support multi-line for run2"
cmd = cmd.strip()
if cmd.startswith('#'): # ignore empty/commented out lines
return ''
self.run_counter += 1
self.__logger.info("tmux> %s", cmd)
self._cmd = cmd
self._cmd_fn = f'{self.remote_scratch}/{self.run_counter}.cmd'
self._status_fn = f'{self.remote_scratch}/{self.run_counter}.status'
self._out_fn = f'{self.remote_scratch}/{self.run_counter}.out'
cmd = util.shell_strip_comment(cmd)
assert '&' not in cmd, f"cmd {cmd} contains &, that breaks things"
# modify command to dump shell success status into file
self.file_write(self._cmd_fn, cmd + '\n')
# modified_cmd = f'{cmd} > {out_fn} 2>&1; echo $? > {status_fn}'
# https://stackoverflow.com/a/692407/419116
# $cmd > >(tee -a fn) 2> >(tee -a fn >&2)
modified_cmd = f'{cmd} > >(tee -a {self._out_fn}) 2> >(tee -a {self._out_fn} >&2); echo $? > {self._status_fn}'
modified_cmd = shlex.quote(modified_cmd)
start_time = time.time()
tmux_window = self.tmux_session + ':' + str(self.tmux_window_id)
tmux_cmd = f"tmux send-keys -t {tmux_window} {modified_cmd} Enter"
self._run_raw(tmux_cmd, ignore_errors=ignore_errors)
if non_blocking:
return 0
if not self.wait_for_file(self._status_fn, max_wait_sec=60):
self.__logger.info(f"Retrying waiting for {self._status_fn}")
elapsed_time = time.time() - start_time
while not self.exists(self._status_fn) and elapsed_time < max_wait_sec:
self.__logger.info(f"Still waiting for {cmd}")
self.wait_for_file(self._status_fn, max_wait_sec=60)
elapsed_time = time.time() - start_time
contents = self.read(self._status_fn)
# if empty wait a bit to allow for race condition
if len(contents) == 0:
time.sleep(check_interval)
contents = self.read(self._status_fn)
status = int(contents.strip())
self.last_status = status
if status != 0:
extra_msg = '(ignoring error)' if ignore_errors else '(failing)'
self.__logger.warning(f"Start failing output {extra_msg}: '{self.read(self._out_fn)}'")
if not ignore_errors:
raise RuntimeError(f"Command {cmd} returned status {status}")
else:
self.__logger.info(f"Warning: command {cmd} returned status {status}")
return self.read(self._out_fn)
def _run_raw(self, cmd: str, ignore_errors=False) -> Tuple[str, str]:
"""Runs given cmd in the task using current SSH session, returns
stdout/stderr as strings. Because it blocks until cmd is done, use it for
short cmds. Silently ignores failing commands.
This is a barebones method to be used during initialization that have
minimal dependencies (no tmux)
"""
# self._log("run_ssh: %s"%(cmd,))
stdin, stdout, stderr = u.call_with_retries(self.ssh_client.exec_command,
command=cmd, get_pty=True)
stdout_str = stdout.read().decode()
stderr_str = stderr.read().decode()
if stdout.channel.recv_exit_status() != 0:
if not ignore_errors:
self.__logger.info(f"command ({cmd}) failed with --->")
self.__logger.info("failing stdout: " + stdout_str)
self.__logger.info("failing stderr: " + stderr_str)
assert False, "_run_raw failed (see logs for error)"
return stdout_str, stderr_str
def upload(self, local_fn: str, remote_fn: str = '', dont_overwrite: bool = False) -> None:
"""Uploads file to remote instance. If location not specified, dumps it
into default directory. If remote location has files or directories with the
same name, behavior is undefined."""
# support wildcard through glob
if '*' in local_fn:
for local_subfn in glob.glob(local_fn):
self.upload(local_subfn)
return
if '#' in local_fn: # hashes also give problems from shell commands
self.__logger.info("skipping backup file {local_fn}")
return
if not self.sftp:
self.sftp = u.call_with_retries(self.ssh_client.open_sftp,
'self.ssh_client.open_sftp')
def maybe_fix_mode(local_fn_, remote_fn_):
"""Makes remote file execute for locally executable files"""
mode = oct(os.stat(local_fn_)[stat.ST_MODE])[-3:]
if '7' in mode:
self.__logger.info(f"Making {remote_fn_} executable with mode {mode}")
# use raw run, in case tmux is unavailable
self._run_raw(f"chmod {mode} {remote_fn_}")
# augmented SFTP client that can transfer directories, from
# https://stackoverflow.com/a/19974994/419116
def _put_dir(source, target):
""" Uploads the contents of the source directory to the target path."""
def _safe_mkdir(path, mode=511, ignore_existing=True):
""" Augments mkdir by adding an option to not fail if the folder exists asdf asdf asdf as"""
try:
self.sftp.mkdir(path, mode)
except IOError:
if ignore_existing:
pass
else:
raise
assert os.path.isdir(source)
_safe_mkdir(target)
for item in os.listdir(source):
if os.path.isfile(os.path.join(source, item)):
self.sftp.put(os.path.join(source, item), os.path.join(target, item))
maybe_fix_mode(os.path.join(source, item), os.path.join(target, item))
else:
_safe_mkdir(f'{target}/{item}')
_put_dir(f'{source}/{item}', f'{target}/{item}')
if not remote_fn:
remote_fn = os.path.basename(local_fn)
self.__logger.info('uploading ' + local_fn + ' to ' + remote_fn)
remote_fn = remote_fn.replace('~', self.homedir)
if '/' in remote_fn:
remote_dir = os.path.dirname(remote_fn)
assert self.exists(
remote_dir), f"Remote dir {remote_dir} doesn't exist"
if dont_overwrite and self.exists(remote_fn):
self.__logger.info("Remote file %s exists, skipping" % (remote_fn,))
return
assert os.path.exists(local_fn), f"{local_fn} not found"
if os.path.isdir(local_fn):
_put_dir(local_fn, remote_fn)
else:
assert os.path.isfile(local_fn), "%s is not a file" % (local_fn,)
# this crashes with IOError when upload failed
if self.exists(remote_fn) and self.isdir(remote_fn):
remote_fn = remote_fn + '/' + os.path.basename(local_fn)
self.sftp.put(localpath=local_fn, remotepath=remote_fn)
maybe_fix_mode(local_fn, remote_fn)
def download(self, remote_fn, local_fn=''):
self.__logger.info("downloading %s" % remote_fn)
# sometimes open_sftp fails with Administratively prohibited, do retries
# root cause could be too many SSH connections being open
# https://unix.stackexchange.com/questions/14160/ssh-tunneling-error-channel-1-open-failed-administratively-prohibited-open
if not self.sftp:
self.sftp = u.call_with_retries(self.ssh_client.open_sftp,
'self.ssh_client.open_sftp')
if not local_fn:
local_fn = os.path.basename(remote_fn)
self.__logger.info("downloading %s to %s" % (remote_fn, local_fn))
self.sftp.get(remote_fn, local_fn)
def exists(self, remote_fn):
stdout, stderr = self._run_raw('stat ' + remote_fn, ignore_errors=True)
return 'No such file' not in stdout
def write(self, remote_fn, contents):
tmp_fn = self.local_scratch + '/' + self.name + "_" + str(self.run_counter) + "." + remote_fn.split(".")[-1]
open(tmp_fn, 'w').write(contents)
self.upload(tmp_fn, remote_fn)
def read(self, remote_fn):
tmp_fn = self.local_scratch + '/' + self.name + "_" + str(self.run_counter) + "." + remote_fn.split(".")[-1]
self.download(remote_fn, tmp_fn)
return open(tmp_fn).read()
def isdir(self, remote_fn):
stdout, _stderr = self._run_raw('ls -ld ' + remote_fn)
return stdout.startswith('d')
def switch_window(self, window_id: int):
"""
Switches currently active tmux window for given task. 0 is the default window
Args:
window_id: integer id of tmux window to use
"""
# windows are numbered sequentially 0, 1, 2, ...
# create any missing windows and make them point to the same directory
if window_id not in self.tmux_available_window_ids:
for i in range(max(self.tmux_available_window_ids) + 1, window_id + 1):
self._run_raw(f'tmux new-window -t {self.tmux_session} -d')
self.tmux_available_window_ids.append(i)
self.tmux_window_id = window_id
@property
def logdir(self):
"""Returns logging directory, creating one if necessary. See "Logdir" section
of design doc on naming convention"""
run_name = scluster_globals.get_run_for_task(self)
logdir = scluster_globals.get_logdir(run_name)
if logdir:
return logdir
# create logdir. Only single task in a group creates the logdir
if scluster_globals.is_chief(self, run_name):
chief = self
else:
chief = scluster_globals.get_chief(run_name)
chief.setup_logdir()
return scluster_globals.get_logdir(run_name)
# release lock
def setup_logdir(self):
# todo: locking on logdir creation
"""Create logdir for task/job/run
"""
run_name = scluster_globals.get_run_for_task(self)
self.__logger.info("Creating logdir for run " + run_name)
logdir_root = scluster_globals.LOGDIR_ROOT
assert logdir_root
self.run(f'mkdir -p {logdir_root}')
find_command = f'find {logdir_root} -maxdepth 1 -type d'
stdout, stderr = self.run_with_output(find_command)
logdir = f"{logdir_root}/{run_name}"
counter = 0
while logdir in stdout:
counter += 1
new_logdir = f'{logdir_root}/{run_name}.{counter:02d}'
self.__logger.info(f'Warning, logdir {logdir} exists, deduping to {new_logdir}')
logdir = new_logdir
self.run(f'mkdir -p {logdir}')
scluster_globals.set_logdir(run_name, logdir)
return logdir
# legacy methods
def file_exists(self, remote_fn):
return self.exists(remote_fn)
def file_write(self, *args, **kwargs):
return self.write(*args, **kwargs)
def file_read(self, remote_fn):
return self.read(remote_fn)
class Job(backend.Job):
pass
class Run(backend.Run):
"""Run is a collection of jobs that share state. IE, training run will contain gradient worker job, parameter
server job, and TensorBoard visualizer job. These jobs will use the same shared directory to store checkpoints and
event files.
:ivar aws_placement_group_name: somedoc
"""
placement_group: str # unique identifier to use as placement_group group name
jobs: List[Job]
def __init__(self, name='', **kwargs):
"""Creates a run. If install_script is specified, it's used as default
install_script for all jobs (can be overridden by Job constructor)"""
self.__logger = logging.getLogger(__name__)
assert name, "Must specify name for current run"
jobs = []
self.name = name
self.jobs = jobs
self.kwargs = kwargs
self.__logger.info(f"Choosing placement_group for run {name}")
self.placement_group = name + '-' + util.random_id()
@property
def logdir(self):
# querying logdir has a side-effect of creation, so do it on chief task
chief_task = scluster_globals.get_chief(self.name)
return chief_task.logdir
# TODO: currently this is synchronous, use non_blocking wrapper like in Job to parallelize methods
def run(self, *args, **kwargs):
"""Runs command on every job in the run."""
for job in self.jobs:
job.run(*args, **kwargs)
def run_with_output(self, *args, **kwargs):
"""Runs command on every first job in the run, returns stdout."""
for job in self.jobs:
job.run_with_output(*args, **kwargs)
def _run_raw(self, *args, **kwargs):
"""_run_raw on every job in the run."""
for job in self.jobs:
job._run_raw(*args, **kwargs)
def upload(self, *args, **kwargs):
"""Runs command on every job in the run."""
for job in self.jobs:
job.upload(*args, **kwargs)
def make_job(self, name='', **kwargs):
return make_job(name + '.' + self.name, run_name=self.name, **kwargs)
def make_task(
name: str = '',
run_name: str = '',
install_script: str = '',
instance_type: str = '',
image_name: str = '',
disk_size: int = 0,
preemptible=None,
logging_task: backend.Task = None,
create_resources=True,
spot=False
) -> Task:
"""
Create task on AWS.
Automatically places it in singleton Run/singleton Job objects, see Run/Job/Task hierarchy for details
https://docs.google.com/document/d/1Gg4T243cYrDUW1YDCikmqp7fzSQDU3rZxOkJr9ohhs8/edit#heading=h.j4td4oixogib
Args:
disk_size: default size of root disk, in GBs
create_resources: whether this task will handle resource creation
name: see ncluster.make_task
run_name: see ncluster.make_task
install_script: see ncluster.make_task
instance_type: instance type to use, defaults to $NCLUSTER_INSTANCE or t3.micro if unset
image_name: name of image, ie, "Deep Learning AMI (Ubuntu) Version 12.0", defaults to $NCLUSTER_IMAGE or amzn2-ami-hvm-2.0.20180622.1-x86_64-gp2 if unset
preemptible: use cheaper preemptible/spot instances
logging_task: partially initialized Task object, use it for logging
Returns:
"""
logger = logging.getLogger(__name__)
scluster_globals.task_launched = True
# if name not specified, use name which is the same across script invocations for given image/instance-type
name = scluster_globals.auto_assign_task_name_if_needed(name, instance_type,
image_name)
if not instance_type:
instance_type = os.environ.get('SCLUSTER_INSTANCE', 't3.micro')
logger.info("Using instance " + instance_type)
_set_aws_environment()
if create_resources:
_maybe_create_resources(logging_task=logging_task)
else:
pass
run: Run = scluster_globals.get_run_object(run_name)
placement_group = ''
if u.instance_supports_placement_groups(instance_type) and run:
placement_group = run.placement_group
logger.info(f"Launching into placement_group group {placement_group}")
u.maybe_create_placement_group(run.placement_group)
if not image_name:
image_name = os.environ.get('SCLUSTER_IMAGE', GENERIC_SMALL_IMAGE)
logger.info("Using image " + image_name)
if preemptible is None:
preemptible = os.environ.get('SCLUSTER_PREEMPTIBLE', False)
preemptible = bool(preemptible)
if preemptible:
logger.info("Using preemptible instances")
image = u.lookup_image(image_name)
keypair = u.get_keypair()
security_group = u.get_security_group()
subnet = u.get_subnet()
ec2 = u.get_ec2_resource()
instance = u.lookup_instance(name, instance_type, image_name)
_maybe_start_instance(instance)
_maybe_wait_for_initializing_instance(instance)
# create the instance if not present
if instance:
logger.info(f"Reusing {instance}")
else:
logger.info(f"Allocating {instance_type} for task {name}")
args = dict(
ImageId=image.id,
InstanceType=instance_type,
MinCount=1,
MaxCount=1,
SubnetId=subnet.id,
SecurityGroupIds=[security_group.id],
KeyName=keypair.name
)
args['TagSpecifications'] = [{
'ResourceType': 'instance',
'Tags': [{
'Key': 'Name',
'Value': name
}]
}]
placement_specs = {}
if placement_group:
placement_specs['GroupName'] = placement_group
args['Placement'] = placement_specs
args['Monitoring'] = {'Enabled': True}
if disk_size:
assert disk_size > 0
ebs = {
'VolumeSize': disk_size,
'VolumeType': 'gp2',
}
args['BlockDeviceMappings'] = [{
'DeviceName': '/dev/sda1',
'Ebs': ebs
}]
# Use high throughput disk (0.065/iops-month = about $1/hour)
if 'SCLUSTER_AWS_FAST_ROOTDISK' in os.environ:
assert not disk_size, f"Specified both disk_size {disk_size} " \
f"and $SCLUSTER_AWS_FAST_ROOTDISK, they are incompatible as $NCLUSTER_AWS_FAST_ROOTDISK " \
f"hardwired disk size"
ebs = {
'VolumeSize': 500,
'VolumeType': 'io1',
'Iops': 11500
}
args['BlockDeviceMappings'] = [{
'DeviceName': '/dev/sda1',
'Ebs': ebs
}]
instances = []
try:
if spot:
instances = u.create_spot_instances(args)
else:
instances = ec2.create_instances(**args)
except Exception as e:
logger.info(f"Instance creation for {name} failed with ({e})")
logger.info("You can change availability zone using export SCLUSTER_ZONE=...")
logger.info("Terminating")
os.kill(os.getpid(), signal.SIGINT) # sys.exit() doesn't work inside thread
assert instances, f"ec2.create_instances returned {instances}"
logger.info(f"Allocated {len(instances)} instances")
instance = instances[0]
task = Task(
name,
instance=instance,
install_script=install_script,
image_name=image_name,
instance_type=instance_type
)
scluster_globals.register_task(task, run_name)
return task
def make_job(
name: str = '',
run_name: str = '',
num_tasks: int = 1,
install_script: str = '',
instance_type: str = '',
image_name: str = '',
create_resources=True,
**kwargs) -> Job:
"""
Args:
create_resources: if True, will create resources if necessary
name: see backend.make_task
run_name: see backend.make_task
num_tasks: number of tasks to launch
install_script: see make_task
instance_type: see make_task
image_name: see make_task
Returns:
"""
logger = logging.getLogger(__name__)
assert num_tasks > 0, f"Can't create job with {num_tasks} tasks"
assert name.count(
'.') <= 1, "Job name has too many .'s (see ncluster design: Run/Job/Task hierarchy for convention)"
# dummy tasks for logging
tasks = [backend.Task(f"{i}.{name}") for i in range(num_tasks)]
_set_aws_environment(tasks[0])
if create_resources:
_maybe_create_resources(tasks[0])
name = scluster_globals.auto_assign_job_name_if_needed(name)
run_name = scluster_globals.auto_assign_run_name_if_needed(run_name)
_run = scluster_globals.create_run_if_needed(run_name, make_run)
job = Job(name=name, tasks=tasks, run_name=run_name, **kwargs)
exceptions = []
# make tasks in parallel
def make_task_fn(i: int):
try:
tasks[i] = make_task(f"{i}.{name}", run_name=run_name,
install_script=install_script,
instance_type=instance_type, image_name=image_name,
logging_task=tasks[i],
create_resources=False,
# handle resources in job already
**kwargs)
except Exception as e:
exceptions.append(e)
logger.info("Creating threads")
threads = [threading.Thread(name=f'make_task_{i}', target=make_task_fn, args=[i]) for i in range(num_tasks)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
logger.info(f"Exception are {exceptions}")
if exceptions:
raise exceptions[0]
job.tasks = tasks
# double check that all instances are in the same placement_group group
# this can happen if some instances from previous smaller run are getting reused
placement_dict = {task.instance.placement_group: task.name for task in
job.tasks}
# TODO: make placement_group group name derived from run, to make it deterministic
# on individual instance restarts
if len(placement_dict) > 1:
util.log("Job tasks are spread over multiple placement_group groups")
pprint.pprint(placement_dict)
raise RuntimeError(
f"Got instance spread over multiple placement_group groups: {placement_dict}. Must terminate all instances in run {run_name} and try again.")
return job
def make_run(name) -> Run:
run = Run(name)
scluster_globals.register_run(name, run)
return run
# TODO: this method and a few others are backend specific, document in API doc
def _maybe_start_instance(instance):
"""Starts instance if it's stopped, no-op otherwise."""
logger = logging.getLogger(__name__)
if not instance:
return
if instance.state['Name'] == 'stopped':
instance.start()
while True:
logger.info(f"Waiting for {instance} to start.")
instance.reload()
if instance.state['Name'] == 'running':
break
time.sleep(10)
def _maybe_wait_for_initializing_instance(instance):
"""Starts instance if it's stopped, no-op otherwise."""
logger = logging.getLogger(__name__)
if not instance:
return
if instance.state['Name'] == 'initializing':
while True:
logger.info(f"Waiting for {instance} to leave state 'initializing'.")
instance.reload()
if instance.state['Name'] == 'running':
break
time.sleep(10)
def _maybe_create_resources(logging_task: Task = None):
"""Use heuristics to decide to possibly create resources"""
logger = logging.getLogger(__name__)
def should_create_resources():
"""Check if gateway, keypair, vpc exist."""
prefix = u.get_prefix()
if u.get_keypair_name() not in u.get_keypair_dict():
logger.info(f"Missing {u.get_keypair_name()} keypair, creating resources")
return True
vpcs = u.get_vpc_dict()
vpc_name = u.get_vpc_name()
if vpc_name not in vpcs:
logger.info(f"Missing {vpc_name} vpc, creating resources")
return True
vpc = vpcs[u.get_vpc_name()]
gateways = u.get_gateway_dict(vpc)
gateway_name = u.get_gateway_name()
if gateway_name not in gateways:
logger.info(f"Missing {gateway_name} gateway, creating resources")
return True
efs = u.get_efs_dict()
efs_name = u.get_efs_name()
if efs_name not in efs:
logger.info(f"Missing {efs_name} efs, creating resources")
return True
return False
try:
# this locking is approximate, still possible for threads to slip through
if os.path.exists(AWS_LOCK_FN):
pid, ts, lock_taskname = open(AWS_LOCK_FN).read().split('-')
ts = int(ts)
logger.info(f"waiting for aws resource creation, another resource initiation was "
f"initiated {int(time.time() - ts)} seconds ago by "
f"{lock_taskname}, delete lock file "
f"{AWS_LOCK_FN} if this is an error")
while True:
if os.path.exists(AWS_LOCK_FN):
logger.info(f"waiting for lock file {AWS_LOCK_FN} to get deleted "
f"initiated {int(time.time() - ts)} seconds ago by ")
time.sleep(2)
continue
else:
break
return
with open(AWS_LOCK_FN, 'w') as f:
f.write(
f'{os.getpid()}-{int(time.time())}-{logging_task.name if logging_task else ""}')
if not should_create_resources():
logger.info("Resources already created, no-op")
os.remove(AWS_LOCK_FN)
return
create_lib.create_resources()
finally:
if os.path.exists(AWS_LOCK_FN):
os.remove(AWS_LOCK_FN)
def _set_aws_environment(task: Task = None):
"""Sets up AWS environment from SCLUSTER environment variables"""
current_zone = os.environ.get('SCLUSTER_ZONE', '')
current_region = os.environ.get('AWS_DEFAULT_REGION', '')
logger = logging.getLogger(__name__)
def log(*args):
if task:
task.log(*args)
else:
util.log(*args)
if current_region and current_zone:
assert current_zone.startswith(
current_region), f'Current zone "{current_zone}" ($SCLUSTER_ZONE) is not ' \
f'in current region "{current_region} ($AWS_DEFAULT_REGION)'
assert u.get_session().region_name == current_region # setting from ~/.aws
# zone is set, set region from zone
if current_zone and not current_region:
current_region = current_zone[:-1]
os.environ['AWS_DEFAULT_REGION'] = current_region
# neither zone nor region not set, use default setting for region
# if default is not set, use SCLUSTER_DEFAULT_REGION
if not current_region:
current_region = u.get_session().region_name
if not current_region:
log(f"No default region available, using {SCLUSTER_DEFAULT_REGION}")
current_region = SCLUSTER_DEFAULT_REGION
os.environ['AWS_DEFAULT_REGION'] = current_region
logger.info(f"Using account {u.get_account_number()}, region {current_region}, zone {current_zone}")
| 38.576585
| 159
| 0.614628
|
8ef0f31039f6e87b5466a29befbf7e87317aa28b
| 835
|
py
|
Python
|
pykl/tiny/grapheneinfo/registry.py
|
wowngasb/pykl
|
6dc68acdd5b2598260647ae5dc22eb27d312b0d8
|
[
"MIT"
] | null | null | null |
pykl/tiny/grapheneinfo/registry.py
|
wowngasb/pykl
|
6dc68acdd5b2598260647ae5dc22eb27d312b0d8
|
[
"MIT"
] | null | null | null |
pykl/tiny/grapheneinfo/registry.py
|
wowngasb/pykl
|
6dc68acdd5b2598260647ae5dc22eb27d312b0d8
|
[
"MIT"
] | null | null | null |
#-*- coding: utf-8 -*-
class Registry(object):
def __init__(self):
self._registry = {}
self._registry_models = {}
self._registry_composites = {}
def register(self, cls):
from .types import SQLAlchemyObjectType
assert issubclass(
cls, SQLAlchemyObjectType), 'Only SQLAlchemyObjectType can be registered, received "{}"'.format(
cls.__name__)
assert cls._meta.registry == self, 'Registry for a Model have to match.'
self._registry[cls._meta.model] = cls
def get_type_for_model(self, model):
return self._registry.get(model)
registry = None
def get_global_registry():
global registry
if not registry:
registry = Registry()
return registry
def reset_global_registry():
global registry
registry = None
| 23.194444
| 108
| 0.644311
|
8b7b3fb5ef10f07b0c347c15d793bb399527a125
| 3,650
|
py
|
Python
|
analyzers/infer/bugswarm/wrapper.py
|
patrickjchap/Static-Bug-Detectors-ASE-Artifact
|
9104a6c9d8105725d3f2351893ff3f4022b02faa
|
[
"BSD-3-Clause"
] | 1
|
2022-01-07T09:32:48.000Z
|
2022-01-07T09:32:48.000Z
|
analyzers/infer/bugswarm/wrapper.py
|
patrickjchap/Static-Bug-Detectors-ASE-Artifact
|
9104a6c9d8105725d3f2351893ff3f4022b02faa
|
[
"BSD-3-Clause"
] | null | null | null |
analyzers/infer/bugswarm/wrapper.py
|
patrickjchap/Static-Bug-Detectors-ASE-Artifact
|
9104a6c9d8105725d3f2351893ff3f4022b02faa
|
[
"BSD-3-Clause"
] | 1
|
2021-11-19T00:33:30.000Z
|
2021-11-19T00:33:30.000Z
|
import inspect
import os
import subprocess
import sys
import time
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import as_completed
from distutils.dir_util import copy_tree
from multiprocessing import Pool
from multiprocessing import Queue
from multiprocessing import Process
from typing import Callable
from typing import List
from bugswarm.common.artifact_processing import utils as procutils
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
import utils
def _print_usage():
print('Usage: python3 main.py <image_tags_file> <dockerhub-repo> <number-of-processes>')
print('image_tags_file: Path to a file containing a newline-separated list of image tags to process.')
print('dockerhub-repo: Dockerhub repo slug, e.g. bugswarm/images')
print('number-of-processes: Number of processes')
def _validate_input(argv):
if len(argv) != 4:
_print_usage()
sys.exit()
image_tags_file = argv[1]
dockerhub_repo = argv[2]
num_processes = int(argv[3])
if not os.path.isfile(image_tags_file):
print('The image_tags_file argument is not a file or does not exist. Exiting.')
_print_usage()
sys.exit(1)
if num_processes <= 0:
print('The number of processes needs to be greater than 0. Exiting.')
_print_usage()
sys.exit(1)
return image_tags_file, dockerhub_repo, num_processes
def _thread_main(image_tag: str, f_or_p: str, dockerhub_repo: str):
return utils.run_command('python3 main.py tmp/{}-{}.txt {} {}'.format(image_tag, f_or_p, f_or_p, dockerhub_repo))
def main(argv=None):
argv = argv or sys.argv
image_tags_file, dockerhub_repo, num_processes = _validate_input(argv)
t_start = time.time()
utils.run_command('mkdir tmp')
with open(image_tags_file) as f:
image_tags = list([x.strip() for x in f.readlines()])
for image_tag in image_tags:
for f_or_p in ['failed', 'passed']:
with open('tmp/{}-{}.txt'.format(image_tag, f_or_p), 'w+') as tmp_f:
tmp_f.write('{}\n'.format(image_tag))
with Pool(min(len(image_tags) * 2, num_processes)) as pool:
q = Queue()
for image_tag in image_tags:
for f_or_p in ['failed', 'passed']:
q.put((image_tag, f_or_p, dockerhub_repo))
multiple_results = [pool.apply_async(_thread_main, (k[0], k[1], k[2],)) for k in [q.get() for i in range(q.qsize())]]
print([res.get(timeout=360) for res in multiple_results])
# with ThreadPoolExecutor(max_workers=4) as executor:
# for f_or_p in ['failed', 'passed']:
# future_to_image_tag = {executor.submit(_thread_main, '{}-{}'.format(image_tag, f_or_p), f_or_p, dockerhub_repo): image_tag
# for image_tag in image_tags}
# attempted = 0
# succeeded = 0
# errored = 0
# for future in as_completed(future_to_image_tag):
# attempted += 1
# try:
# data = future.result()
# if data:
# succeeded += 1
# else:
# errored += 1
# except Exception as e:
# print(e)
# errored += 1
# Clean up
utils.run_command('rm -rf tmp')
utils.run_command('docker rm $(docker ps -aq)')
t_end = time.time()
# print('attempted: {}, succeeded: {}, errored: {}'.format(attempted, succeeded, errored))
print('Running InferWrapper took {}s'.format(t_end-t_start))
if __name__ == '__main__':
sys.exit(main())
| 35.096154
| 136
| 0.652329
|
13d8ee4f6751dc79a4f2d5907e27ef7b4f3ef60d
| 5,866
|
py
|
Python
|
main.py
|
nishantarora94/jar-scanner
|
c6001043609232d3dd9ba33ee9c90a9b049bef20
|
[
"MIT",
"Unlicense"
] | null | null | null |
main.py
|
nishantarora94/jar-scanner
|
c6001043609232d3dd9ba33ee9c90a9b049bef20
|
[
"MIT",
"Unlicense"
] | 2
|
2021-06-20T12:03:53.000Z
|
2021-07-06T16:59:51.000Z
|
main.py
|
nishantarora94/jar-scanner
|
c6001043609232d3dd9ba33ee9c90a9b049bef20
|
[
"MIT",
"Unlicense"
] | null | null | null |
# importing python modules
import csv
import glob
import os
import shutil
from zipfile import ZipFile
import re
# path of the parent directory
current_dir = os.getcwd()
bucket_dir = os.path.join(current_dir, "Bucket")
if os.path.exists(bucket_dir):
os.chdir(bucket_dir)
else:
os.chdir(current_dir)
os.mkdir("Bucket")
os.chdir(bucket_dir)
# function to extract the EAR and WAR files
def extract():
zip_ext="*.zip"
war_ext="*.war"
ear_ext="*.ear"
if glob.glob(zip_ext):
zip_files=glob.glob(zip_ext)
print("The list of ear files detected:", zip_files)
for file in zip_files:
zip_folder = file + "_zip"
os.mkdir(zip_folder)
with ZipFile(file, 'r') as zipObj:
# Extract all the contents of zip file in current directory
zipObj.extractall(zip_folder)
print(file, "has been scanned ")
for files in glob.glob(os.path.join(zip_folder, ear_ext)):
shutil.copy(files, bucket_dir)
for files in glob.glob(os.path.join(zip_folder, war_ext)):
shutil.copy(files, bucket_dir)
if glob.glob(ear_ext):
ear_files=glob.glob(ear_ext)
print("\n\nThe list of ear files detected:", ear_files)
for file in ear_files:
ear_folder = file + "_ear"
os.mkdir(ear_folder)
with ZipFile(file, 'r') as zipObj:
# Extract all the contents of EAR file in current directory
zipObj.extractall(ear_folder)
print(file, "has been scanned ")
for files in glob.glob(os.path.join(ear_folder, war_ext)):
shutil.copy(files, bucket_dir)
if glob.glob(war_ext):
war_files=glob.glob(war_ext)
print("\n\nThe list of war files detected:", war_files)
for file in war_files:
name= file.split(".war")
war_folder = name[0]
os.mkdir(war_folder)
with ZipFile(file, 'r') as zipObj:
# Extract all the contents of WAR file in current directory
zipObj.extractall(war_folder)
print(file, "has been scanned ")
else:
print("There is no file to be extracted")
extract()
# Generating the list of all unique the jars
all_jars=[]
bucket_jars = os.path.join(bucket_dir + "\**\*.jar")
all_jars_files = glob.glob(bucket_jars,
recursive=True)
for file in all_jars_files:
all_jars.append(os.path.basename(file))
all_jars = list(set(all_jars))
# Importing the product created jar list from CSV to suppression list
requisite_path = os.path.join(current_dir, "Requisite")
if os.path.isfile(os.path.join(requisite_path, "suppression_jars.csv")):
os.chdir(requisite_path)
suppression = open('suppression_jars.csv', 'r')
# creating dictreader object
file = csv.DictReader(suppression)
# creating empty lists
suppression_jar = []
# iterating over each row and append
# values to empty list
for col in file:
suppression_jar.append(col['Jars'])
suppression_jar = list(set(suppression_jar))
# Reading the list of jars from the License CSV file
if os.path.isfile(os.path.join(requisite_path, "License.csv")):
os.chdir(requisite_path)
license = open('License.csv', 'r')
# creating dictreader object
file = csv.DictReader(license)
# creating empty lists
license_jar = []
# iterating over each row and append
# values to empty list
for col in file:
license_jar.append(col['jars'])
third_party = []
for element in all_jars:
if element not in suppression_jar:
third_party.append(element)
deprecated_jars = []
newly_added_jars = []
active_license = []
for element in license_jar:
if element not in third_party:
deprecated_jars.append(element)
elif element in third_party:
active_license.append(element)
for element in third_party:
if element not in license_jar:
newly_added_jars.append(element)
newly_added_jars= sorted(set(newly_added_jars))
########################################################
version = []
deprected_withoutext = []
jars_oldversion = []
jars_newversion = []
for element in deprecated_jars:
result = re.split(r"-\d.+", element)
deprected_withoutext.append(result[0])
final_new_jars= []
versionchanged_jars = []
for element in newly_added_jars:
name = re.split(r"-\d.+", element)
if name[0] not in deprected_withoutext:
final_new_jars.append(element)
else:
jars_newversion.append(element)
versionchanged_jars.append(name[0])
#######################################################
my_dict = {}
unutilized_jars = []
test = []
for element in deprecated_jars:
name = re.split(r"-\d.+", element)
nameOfJar = name[0];
ver = []
if nameOfJar in versionchanged_jars:
test.append(element)
if nameOfJar in my_dict.keys():
a = element.split(nameOfJar + "-")
b = a[1].split(".jar")
new_version = "approved version : " + b[0]
my_dict[nameOfJar].append(new_version)
else:
a = element.split(nameOfJar+"-")
b = a[1].split(".jar")
old_version = "approved version : "+b[0]
ver.append(old_version)
my_dict.setdefault(nameOfJar,ver)
else:
unutilized_jars.append(element)
for element in jars_newversion:
name = re.split(r"-\d.+", element)
nameOfJar = name[0]
if nameOfJar in my_dict.keys():
a = element.split(nameOfJar + "-")
b = a[1].split(".jar")
new_version = "new version : " + b[0]
my_dict[nameOfJar].append(new_version)
else:
print("check the flow if it enters this else condition")
| 31.368984
| 73
| 0.617968
|
d333cdc39e6ca033228f0173a87b5e3859424278
| 713
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/validators/waterfall/textfont/__init__.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
env/lib/python3.8/site-packages/plotly/validators/waterfall/textfont/__init__.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
env/lib/python3.8/site-packages/plotly/validators/waterfall/textfont/__init__.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import sys
if sys.version_info < (3, 7):
from ._sizesrc import SizesrcValidator
from ._size import SizeValidator
from ._familysrc import FamilysrcValidator
from ._family import FamilyValidator
from ._colorsrc import ColorsrcValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._sizesrc.SizesrcValidator",
"._size.SizeValidator",
"._familysrc.FamilysrcValidator",
"._family.FamilyValidator",
"._colorsrc.ColorsrcValidator",
"._color.ColorValidator",
],
)
| 28.52
| 55
| 0.649369
|
c098ed56d809e83369b65a4c6b2e4679b7964904
| 34
|
py
|
Python
|
tests/plugins/remove/__init__.py
|
jtpavlock/moe
|
6f053c8c53f92686013657bda676b00f97edd230
|
[
"MIT"
] | 14
|
2021-09-04T11:42:18.000Z
|
2022-02-04T05:11:46.000Z
|
tests/plugins/remove/__init__.py
|
jtpavlock/Moe
|
6f053c8c53f92686013657bda676b00f97edd230
|
[
"MIT"
] | 56
|
2021-05-26T00:00:46.000Z
|
2021-08-08T17:14:31.000Z
|
tests/plugins/remove/__init__.py
|
jtpavlock/moe
|
6f053c8c53f92686013657bda676b00f97edd230
|
[
"MIT"
] | 1
|
2021-07-22T21:55:21.000Z
|
2021-07-22T21:55:21.000Z
|
"""Test the ``remove`` plugin."""
| 17
| 33
| 0.558824
|
e2f0964c139d7ed363c5556162c19236f910732f
| 1,086
|
py
|
Python
|
src/capabilities/lint.py
|
textx-tools/textX-languageserver
|
292dcecb46e5e180755005f01d8302a491f658ca
|
[
"MIT"
] | 6
|
2017-11-07T00:48:38.000Z
|
2018-11-10T00:58:19.000Z
|
src/capabilities/lint.py
|
textx-tools/textX-languageserver
|
292dcecb46e5e180755005f01d8302a491f658ca
|
[
"MIT"
] | 5
|
2018-03-21T01:10:05.000Z
|
2018-10-16T18:00:18.000Z
|
src/capabilities/lint.py
|
textx-tools/textX-languageserver
|
292dcecb46e5e180755005f01d8302a491f658ca
|
[
"MIT"
] | 2
|
2018-12-27T21:58:04.000Z
|
2019-09-16T15:37:02.000Z
|
"""
This module is responsible for linting document file.
"""
from ..utils import _utils
from ..infrastructure.lsp import Diagnostic
__author__ = "Daniel Elero"
__copyright__ = "textX-tools"
__license__ = "MIT"
LINT_DEBOUNCE_S = 0.5
@_utils.debounce(LINT_DEBOUNCE_S)
def lint(doc_uri, workspace):
"""
Create and return diagnostic object which contains all parsing errors
"""
if doc_uri in workspace.documents:
diagnostic = Diagnostic()
txdoc = workspace.get_document(doc_uri)
errors = txdoc.all_errors
for e in errors:
try:
msg = e.args[0].decode("utf-8")
msg = msg.split(' at')[0]
if e.offset:
line, col = _utils.pos_to_line_col(txdoc.source, e.offset)
e.line = line
e.col = col
diagnostic.error(txdoc.lines, e.line, e.col, msg)
except:
diagnostic.error(txdoc.lines, e.line, e.col, str(e))
workspace.publish_diagnostics(doc_uri, diagnostic.get_diagnostics())
| 29.351351
| 78
| 0.601289
|
10dd1dc6957f1d90f34b2dcff4876a4dadcc7a43
| 390
|
py
|
Python
|
src/app/migrations/0003_auto_20180313_0622.py
|
510908220/heartbeats
|
b59938a6529f79db9a4a25984d3b2573d01b597e
|
[
"MIT"
] | 23
|
2018-03-11T03:34:29.000Z
|
2021-01-15T05:18:36.000Z
|
src/app/migrations/0003_auto_20180313_0622.py
|
510908220/heartbeats
|
b59938a6529f79db9a4a25984d3b2573d01b597e
|
[
"MIT"
] | 4
|
2019-03-29T02:38:38.000Z
|
2021-04-20T17:13:58.000Z
|
src/app/migrations/0003_auto_20180313_0622.py
|
510908220/heartbeats
|
b59938a6529f79db9a4a25984d3b2573d01b597e
|
[
"MIT"
] | 11
|
2018-04-12T01:25:54.000Z
|
2021-01-12T07:39:27.000Z
|
# Generated by Django 2.0.3 on 2018-03-13 06:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0002_remove_service_owner'),
]
operations = [
migrations.AlterField(
model_name='tag',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
]
| 20.526316
| 58
| 0.602564
|
838e1a77693fff1f1292b89240a74ebacdc8f9f0
| 14,394
|
py
|
Python
|
novaclient/utils.py
|
alvarolopez/python-novaclient
|
ef7cb1d44d47a1273810603fd96d982d7f0bd7d6
|
[
"Apache-1.1"
] | null | null | null |
novaclient/utils.py
|
alvarolopez/python-novaclient
|
ef7cb1d44d47a1273810603fd96d982d7f0bd7d6
|
[
"Apache-1.1"
] | null | null | null |
novaclient/utils.py
|
alvarolopez/python-novaclient
|
ef7cb1d44d47a1273810603fd96d982d7f0bd7d6
|
[
"Apache-1.1"
] | null | null | null |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import json
import os
import re
import textwrap
import time
import uuid
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
import pkg_resources
import prettytable
import six
from novaclient import exceptions
from novaclient.i18n import _
VALID_KEY_REGEX = re.compile(r"[\w\.\- :]+$", re.UNICODE)
def env(*args, **kwargs):
"""Returns the first environment variable set.
If all are empty, defaults to '' or keyword arg `default`.
"""
for arg in args:
value = os.environ.get(arg)
if value:
return value
return kwargs.get('default', '')
def get_service_type(f):
"""Retrieves service type from function."""
return getattr(f, 'service_type', None)
def unauthenticated(func):
"""Adds 'unauthenticated' attribute to decorated function.
Usage:
>>> @unauthenticated
... def mymethod(f):
... pass
"""
func.unauthenticated = True
return func
def isunauthenticated(func):
"""Checks if the function does not require authentication.
Mark such functions with the `@unauthenticated` decorator.
:returns: bool
"""
return getattr(func, 'unauthenticated', False)
def arg(*args, **kwargs):
"""Decorator for CLI args.
Example:
>>> @arg("name", help="Name of the new entity")
... def entity_create(args):
... pass
"""
def _decorator(func):
add_arg(func, *args, **kwargs)
return func
return _decorator
def add_arg(func, *args, **kwargs):
"""Bind CLI arguments to a shell.py `do_foo` function."""
if not hasattr(func, 'arguments'):
func.arguments = []
# NOTE(sirp): avoid dups that can occur when the module is shared across
# tests.
if (args, kwargs) not in func.arguments:
# Because of the semantics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
func.arguments.insert(0, (args, kwargs))
def service_type(stype):
"""Adds 'service_type' attribute to decorated function.
Usage:
.. code-block:: python
@service_type('volume')
def mymethod(f):
...
"""
def inner(f):
f.service_type = stype
return f
return inner
def add_resource_manager_extra_kwargs_hook(f, hook):
"""Add hook to bind CLI arguments to ResourceManager calls.
The `do_foo` calls in shell.py will receive CLI args and then in turn pass
them through to the ResourceManager. Before passing through the args, the
hooks registered here will be called, giving us a chance to add extra
kwargs (taken from the command-line) to what's passed to the
ResourceManager.
"""
if not hasattr(f, 'resource_manager_kwargs_hooks'):
f.resource_manager_kwargs_hooks = []
names = [h.__name__ for h in f.resource_manager_kwargs_hooks]
if hook.__name__ not in names:
f.resource_manager_kwargs_hooks.append(hook)
def get_resource_manager_extra_kwargs(f, args, allow_conflicts=False):
"""Return extra_kwargs by calling resource manager kwargs hooks."""
hooks = getattr(f, "resource_manager_kwargs_hooks", [])
extra_kwargs = {}
for hook in hooks:
hook_kwargs = hook(args)
hook_name = hook.__name__
conflicting_keys = set(hook_kwargs.keys()) & set(extra_kwargs.keys())
if conflicting_keys and not allow_conflicts:
msg = (_("Hook '%(hook_name)s' is attempting to redefine "
"attributes '%(conflicting_keys)s'") %
{'hook_name': hook_name,
'conflicting_keys': conflicting_keys})
raise exceptions.NoUniqueMatch(msg)
extra_kwargs.update(hook_kwargs)
return extra_kwargs
def pretty_choice_list(l):
return ', '.join("'%s'" % i for i in l)
def pretty_choice_dict(d):
"""Returns a formatted dict as 'key=value'."""
return pretty_choice_list(['%s=%s' % (k, d[k]) for k in sorted(d.keys())])
def print_list(objs, fields, formatters={}, sortby_index=None):
if sortby_index is None:
sortby = None
else:
sortby = fields[sortby_index]
mixed_case_fields = ['serverId']
pt = prettytable.PrettyTable([f for f in fields], caching=False)
pt.align = 'l'
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = getattr(o, field_name, '')
if data is None:
data = '-'
# '\r' would break the table, so remove it.
data = six.text_type(data).replace("\r", "")
row.append(data)
pt.add_row(row)
if sortby is not None:
result = encodeutils.safe_encode(pt.get_string(sortby=sortby))
else:
result = encodeutils.safe_encode(pt.get_string())
if six.PY3:
result = result.decode()
print(result)
def _flatten(data, prefix=None):
"""Flatten a dict, using name as a prefix for the keys of dict.
>>> _flatten('cpu_info', {'arch':'x86_64'})
[('cpu_info_arch': 'x86_64')]
"""
if isinstance(data, dict):
for key, value in six.iteritems(data):
new_key = '%s_%s' % (prefix, key) if prefix else key
if isinstance(value, (dict, list)):
for item in _flatten(value, new_key):
yield item
else:
yield new_key, value
else:
yield prefix, data
def flatten_dict(data):
"""Return a new dict whose sub-dicts have been merged into the
original. Each of the parents keys are prepended to the child's
to prevent collisions. Any string elements will be JSON parsed
before flattening.
>>> flatten_dict({'service': {'host':'cloud9@compute-068', 'id': 143}})
{'service_host': colud9@compute-068', 'service_id': 143}
"""
data = data.copy()
# Try and decode any nested JSON structures.
for key, value in six.iteritems(data):
if isinstance(value, six.string_types):
try:
data[key] = json.loads(value)
except ValueError:
pass
return dict(_flatten(data))
def print_dict(d, dict_property="Property", dict_value="Value", wrap=0):
pt = prettytable.PrettyTable([dict_property, dict_value], caching=False)
pt.align = 'l'
for k, v in sorted(d.items()):
# convert dict to str to check length
if isinstance(v, (dict, list)):
v = jsonutils.dumps(v)
if wrap > 0:
v = textwrap.fill(six.text_type(v), wrap)
# if value has a newline, add in multiple rows
# e.g. fault with stacktrace
if v and isinstance(v, six.string_types) and (r'\n' in v or '\r' in v):
# '\r' would break the table, so remove it.
if '\r' in v:
v = v.replace('\r', '')
lines = v.strip().split(r'\n')
col1 = k
for line in lines:
pt.add_row([col1, line])
col1 = ''
else:
if v is None:
v = '-'
pt.add_row([k, v])
result = encodeutils.safe_encode(pt.get_string())
if six.PY3:
result = result.decode()
print(result)
def find_resource(manager, name_or_id, wrap_exception=True, **find_args):
"""Helper for the _find_* methods."""
# for str id which is not uuid (for Flavor, Keypair and hypervsior in cells
# environments search currently)
if getattr(manager, 'is_alphanum_id_allowed', False):
try:
return manager.get(name_or_id)
except exceptions.NotFound:
pass
# first try to get entity as uuid
try:
tmp_id = encodeutils.safe_encode(name_or_id)
if six.PY3:
tmp_id = tmp_id.decode()
uuid.UUID(tmp_id)
return manager.get(tmp_id)
except (TypeError, ValueError, exceptions.NotFound):
pass
# then try to get entity as name
try:
try:
resource = getattr(manager, 'resource_class', None)
name_attr = resource.NAME_ATTR if resource else 'name'
kwargs = {name_attr: name_or_id}
kwargs.update(find_args)
return manager.find(**kwargs)
except exceptions.NotFound:
pass
# then try to find entity by human_id
try:
return manager.find(human_id=name_or_id, **find_args)
except exceptions.NotFound:
pass
except exceptions.NoUniqueMatch:
msg = (_("Multiple %(class)s matches found for '%(name)s', use an ID "
"to be more specific.") %
{'class': manager.resource_class.__name__.lower(),
'name': name_or_id})
if wrap_exception:
raise exceptions.CommandError(msg)
raise exceptions.NoUniqueMatch(msg)
# finally try to get entity as integer id
try:
return manager.get(int(name_or_id))
except (TypeError, ValueError, exceptions.NotFound):
msg = (_("No %(class)s with a name or ID of '%(name)s' exists.") %
{'class': manager.resource_class.__name__.lower(),
'name': name_or_id})
if wrap_exception:
raise exceptions.CommandError(msg)
raise exceptions.NotFound(404, msg)
def format_servers_list_networks(server):
output = []
for (network, addresses) in server.networks.items():
if len(addresses) == 0:
continue
addresses_csv = ', '.join(addresses)
group = "%s=%s" % (network, addresses_csv)
output.append(group)
return '; '.join(output)
def format_security_groups(groups):
return ', '.join(group['name'] for group in groups)
def _format_field_name(attr):
"""Format an object attribute in a human-friendly way."""
# Split at ':' and leave the extension name as-is.
parts = attr.rsplit(':', 1)
name = parts[-1].replace('_', ' ')
# Don't title() on mixed case
if name.isupper() or name.islower():
name = name.title()
parts[-1] = name
return ': '.join(parts)
def make_field_formatter(attr, filters=None):
"""
Given an object attribute, return a formatted field name and a
formatter suitable for passing to print_list.
Optionally pass a dict mapping attribute names to a function. The function
will be passed the value of the attribute and should return the string to
display.
"""
filter_ = None
if filters:
filter_ = filters.get(attr)
def get_field(obj):
field = getattr(obj, attr, '')
if field and filter_:
field = filter_(field)
return field
name = _format_field_name(attr)
formatter = get_field
return name, formatter
def safe_issubclass(*args):
"""Like issubclass, but will just return False if not a class."""
try:
if issubclass(*args):
return True
except TypeError:
pass
return False
def do_action_on_many(action, resources, success_msg, error_msg):
"""Helper to run an action on many resources."""
failure_flag = False
for resource in resources:
try:
action(resource)
print(success_msg % resource)
except Exception as e:
failure_flag = True
print(e)
if failure_flag:
raise exceptions.CommandError(error_msg)
def load_entry_point(ep_name, name=None):
"""Try to load the entry point ep_name that matches name."""
for ep in pkg_resources.iter_entry_points(ep_name, name=name):
try:
# FIXME(dhellmann): It would be better to use stevedore
# here, since it abstracts this difference in behavior
# between versions of setuptools, but this seemed like a
# more expedient fix.
if hasattr(ep, 'resolve') and hasattr(ep, 'require'):
return ep.resolve()
else:
return ep.load(require=False)
except (ImportError, pkg_resources.UnknownExtra, AttributeError):
continue
def is_integer_like(val):
"""Returns validation of a value as an integer."""
try:
int(val)
return True
except (TypeError, ValueError, AttributeError):
return False
def validate_flavor_metadata_keys(keys):
for key in keys:
valid_name = VALID_KEY_REGEX.match(key)
if not valid_name:
msg = _('Invalid key: "%s". Keys may only contain letters, '
'numbers, spaces, underscores, periods, colons and '
'hyphens.')
raise exceptions.CommandError(msg % key)
@contextlib.contextmanager
def record_time(times, enabled, *args):
"""Record the time of a specific action.
:param times: A list of tuples holds time data.
:type times: list
:param enabled: Whether timing is enabled.
:type enabled: bool
:param *args: Other data to be stored besides time data, these args
will be joined to a string.
"""
if not enabled:
yield
else:
start = time.time()
yield
end = time.time()
times.append((' '.join(args), start, end))
def get_function_name(func):
if six.PY2:
if hasattr(func, "im_class"):
return "%s.%s" % (func.im_class, func.__name__)
else:
return "%s.%s" % (func.__module__, func.__name__)
else:
return "%s.%s" % (func.__module__, func.__qualname__)
| 30.112971
| 79
| 0.611713
|
58b84fef2978a4dd40cacd53e1c4a7317cd0a0ea
| 66
|
py
|
Python
|
kaggle_utils/__init__.py
|
upura/solafune-light
|
ca9357466e8ecd866845da58ae89fcfcdd5c7f40
|
[
"MIT"
] | 2
|
2021-04-30T14:31:47.000Z
|
2021-05-01T12:18:10.000Z
|
kaggle_utils/__init__.py
|
upura/solafune-light
|
ca9357466e8ecd866845da58ae89fcfcdd5c7f40
|
[
"MIT"
] | null | null | null |
kaggle_utils/__init__.py
|
upura/solafune-light
|
ca9357466e8ecd866845da58ae89fcfcdd5c7f40
|
[
"MIT"
] | null | null | null |
from . import features, models, preprocess, utils, visualizations
| 33
| 65
| 0.80303
|
4983aed7da2f685f7f8373fd76dfe51b6b9b7782
| 43,992
|
py
|
Python
|
repo/subcmds/sync.py
|
oux/google-repo
|
10d9569981c38550564ee71771cf2df5f32daeb8
|
[
"Apache-2.0"
] | null | null | null |
repo/subcmds/sync.py
|
oux/google-repo
|
10d9569981c38550564ee71771cf2df5f32daeb8
|
[
"Apache-2.0"
] | null | null | null |
repo/subcmds/sync.py
|
oux/google-repo
|
10d9569981c38550564ee71771cf2df5f32daeb8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import netrc
from optparse import SUPPRESS_HELP
import os
import re
import socket
import subprocess
import sys
import tempfile
import time
from repo.pyversion import is_python3
if is_python3():
import http.cookiejar as cookielib
import urllib.error
import urllib.parse
import urllib.request
import xmlrpc.client
else:
import cookielib
import imp
import urllib2
import urlparse
import xmlrpclib
urllib = imp.new_module('urllib')
urllib.error = urllib2
urllib.parse = urlparse
urllib.request = urllib2
xmlrpc = imp.new_module('xmlrpc')
xmlrpc.client = xmlrpclib
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
try:
import resource
def _rlimit_nofile():
return resource.getrlimit(resource.RLIMIT_NOFILE)
except ImportError:
def _rlimit_nofile():
return (256, 256)
try:
import multiprocessing
except ImportError:
multiprocessing = None
from repo import event_log
from repo.git_command import GIT, git_require
from repo.git_config import GetUrlCookieFile
from repo.git_refs import R_HEADS, HEAD
from repo import gitc_utils
from repo.project import Project
from repo.project import RemoteSpec
from repo.command import Command, MirrorSafeCommand
from repo.error import RepoChangedException, GitError, ManifestParseError
from repo import platform_utils
from repo.project import SyncBuffer
from repo.progress import Progress
from repo import wrapper
from repo.manifest_xml import GitcManifest
_ONE_DAY_S = 24 * 60 * 60
class _FetchError(Exception):
"""Internal error thrown in _FetchHelper() when we don't want stack trace."""
pass
class _CheckoutError(Exception):
"""Internal error thrown in _CheckoutOne() when we don't want stack trace."""
class Sync(Command, MirrorSafeCommand):
jobs = 1
common = True
helpSummary = "Update working tree to the latest revision"
helpUsage = """
%prog [<project>...]
"""
helpDescription = """
The '%prog' command synchronizes local project directories
with the remote repositories specified in the manifest. If a local
project does not yet exist, it will clone a new local directory from
the remote repository and set up tracking branches as specified in
the manifest. If the local project already exists, '%prog'
will update the remote branches and rebase any new local changes
on top of the new remote changes.
'%prog' will synchronize all projects listed at the command
line. Projects can be specified either by name, or by a relative
or absolute path to the project's local directory. If no projects
are specified, '%prog' will synchronize all projects listed in
the manifest.
The -d/--detach option can be used to switch specified projects
back to the manifest revision. This option is especially helpful
if the project is currently on a topic branch, but the manifest
revision is temporarily needed.
The -s/--smart-sync option can be used to sync to a known good
build as specified by the manifest-server element in the current
manifest. The -t/--smart-tag option is similar and allows you to
specify a custom tag/label.
The -u/--manifest-server-username and -p/--manifest-server-password
options can be used to specify a username and password to authenticate
with the manifest server when using the -s or -t option.
If -u and -p are not specified when using the -s or -t option, '%prog'
will attempt to read authentication credentials for the manifest server
from the user's .netrc file.
'%prog' will not use authentication credentials from -u/-p or .netrc
if the manifest server specified in the manifest file already includes
credentials.
By default, all projects will be synced. The --fail-fast option can be used
to halt syncing as soon as possible when the the first project fails to sync.
The --force-sync option can be used to overwrite existing git
directories if they have previously been linked to a different
object direcotry. WARNING: This may cause data to be lost since
refs may be removed when overwriting.
The --force-remove-dirty option can be used to remove previously used
projects with uncommitted changes. WARNING: This may cause data to be
lost since uncommitted changes may be removed with projects that no longer
exist in the manifest.
The --no-clone-bundle option disables any attempt to use
$URL/clone.bundle to bootstrap a new Git repository from a
resumeable bundle file on a content delivery network. This
may be necessary if there are problems with the local Python
HTTP client or proxy configuration, but the Git binary works.
The --fetch-submodules option enables fetching Git submodules
of a project from server.
The -c/--current-branch option can be used to only fetch objects that
are on the branch specified by a project's revision.
The --optimized-fetch option can be used to only fetch projects that
are fixed to a sha1 revision if the sha1 revision does not already
exist locally.
The --prune option can be used to remove any refs that no longer
exist on the remote.
# SSH Connections
If at least one project remote URL uses an SSH connection (ssh://,
git+ssh://, or user@host:path syntax) repo will automatically
enable the SSH ControlMaster option when connecting to that host.
This feature permits other projects in the same '%prog' session to
reuse the same SSH tunnel, saving connection setup overheads.
To disable this behavior on UNIX platforms, set the GIT_SSH
environment variable to 'ssh'. For example:
export GIT_SSH=ssh
%prog
# Compatibility
This feature is automatically disabled on Windows, due to the lack
of UNIX domain socket support.
This feature is not compatible with url.insteadof rewrites in the
user's ~/.gitconfig. '%prog' is currently not able to perform the
rewrite early enough to establish the ControlMaster tunnel.
If the remote SSH daemon is Gerrit Code Review, version 2.0.10 or
later is required to fix a server side protocol bug.
"""
def _Options(self, p, show_smart=True):
try:
self.jobs = self.manifest.default.sync_j
except ManifestParseError:
self.jobs = 1
p.add_option('-f', '--force-broken',
dest='force_broken', action='store_true',
help='obsolete option (to be deleted in the future)')
p.add_option('--fail-fast',
dest='fail_fast', action='store_true',
help='stop syncing after first error is hit')
p.add_option('--force-sync',
dest='force_sync', action='store_true',
help="overwrite an existing git directory if it needs to "
"point to a different object directory. WARNING: this "
"may cause loss of data")
p.add_option('--force-remove-dirty',
dest='force_remove_dirty', action='store_true',
help="force remove projects with uncommitted modifications if "
"projects no longer exist in the manifest. "
"WARNING: this may cause loss of data")
p.add_option('-l', '--local-only',
dest='local_only', action='store_true',
help="only update working tree, don't fetch")
p.add_option('--no-manifest-update', '--nmu',
dest='mp_update', action='store_false', default='true',
help='use the existing manifest checkout as-is. '
'(do not update to the latest revision)')
p.add_option('-n', '--network-only',
dest='network_only', action='store_true',
help="fetch only, don't update working tree")
p.add_option('-d', '--detach',
dest='detach_head', action='store_true',
help='detach projects back to manifest revision')
p.add_option('-c', '--current-branch',
dest='current_branch_only', action='store_true',
help='fetch only current branch from server')
p.add_option('-v', '--verbose',
dest='output_mode', action='store_true',
help='show all sync output')
p.add_option('-q', '--quiet',
dest='output_mode', action='store_false',
help='only show errors')
p.add_option('-j', '--jobs',
dest='jobs', action='store', type='int',
help="projects to fetch simultaneously (default %d)" % self.jobs)
p.add_option('-m', '--manifest-name',
dest='manifest_name',
help='temporary manifest to use for this sync', metavar='NAME.xml')
p.add_option('--clone-bundle', action='store_true',
help='enable use of /clone.bundle on HTTP/HTTPS')
p.add_option('--no-clone-bundle', dest='clone_bundle', action='store_false',
help='disable use of /clone.bundle on HTTP/HTTPS')
p.add_option('-u', '--manifest-server-username', action='store',
dest='manifest_server_username',
help='username to authenticate with the manifest server')
p.add_option('-p', '--manifest-server-password', action='store',
dest='manifest_server_password',
help='password to authenticate with the manifest server')
p.add_option('--fetch-submodules',
dest='fetch_submodules', action='store_true',
help='fetch submodules from server')
p.add_option('--no-tags',
dest='tags', default=True, action='store_false',
help="don't fetch tags")
p.add_option('--optimized-fetch',
dest='optimized_fetch', action='store_true',
help='only fetch projects fixed to sha1 if revision does not exist locally')
p.add_option('--retry-fetches',
default=0, action='store', type='int',
help='number of times to retry fetches on transient errors')
p.add_option('--prune', dest='prune', action='store_true',
help='delete refs that no longer exist on the remote')
if show_smart:
p.add_option('-s', '--smart-sync',
dest='smart_sync', action='store_true',
help='smart sync using manifest from the latest known good build')
p.add_option('-t', '--smart-tag',
dest='smart_tag', action='store',
help='smart sync using manifest from a known tag')
g = p.add_option_group('repo Version options')
g.add_option('--no-repo-verify',
dest='repo_verify', default=True, action='store_false',
help='do not verify repo source code')
g.add_option('--repo-upgraded',
dest='repo_upgraded', action='store_true',
help=SUPPRESS_HELP)
def _FetchProjectList(self, opt, projects, sem, *args, **kwargs):
"""Main function of the fetch threads.
Delegates most of the work to _FetchHelper.
Args:
opt: Program options returned from optparse. See _Options().
projects: Projects to fetch.
sem: We'll release() this semaphore when we exit so that another thread
can be started up.
*args, **kwargs: Remaining arguments to pass to _FetchHelper. See the
_FetchHelper docstring for details.
"""
try:
for project in projects:
success = self._FetchHelper(opt, project, *args, **kwargs)
if not success and opt.fail_fast:
break
finally:
sem.release()
def _FetchHelper(self, opt, project, lock, fetched, pm, err_event,
clone_filter):
"""Fetch git objects for a single project.
Args:
opt: Program options returned from optparse. See _Options().
project: Project object for the project to fetch.
lock: Lock for accessing objects that are shared amongst multiple
_FetchHelper() threads.
fetched: set object that we will add project.gitdir to when we're done
(with our lock held).
pm: Instance of a Project object. We will call pm.update() (with our
lock held).
err_event: We'll set this event in the case of an error (after printing
out info about the error).
clone_filter: Filter for use in a partial clone.
Returns:
Whether the fetch was successful.
"""
# We'll set to true once we've locked the lock.
did_lock = False
# Encapsulate everything in a try/except/finally so that:
# - We always set err_event in the case of an exception.
# - We always make sure we unlock the lock if we locked it.
start = time.time()
success = False
try:
try:
success = project.Sync_NetworkHalf(
quiet=opt.quiet,
verbose=opt.verbose,
current_branch_only=opt.current_branch_only,
force_sync=opt.force_sync,
clone_bundle=opt.clone_bundle,
tags=opt.tags, archive=self.manifest.IsArchive,
optimized_fetch=opt.optimized_fetch,
retry_fetches=opt.retry_fetches,
prune=opt.prune,
clone_filter=clone_filter)
self._fetch_times.Set(project, time.time() - start)
# Lock around all the rest of the code, since printing, updating a set
# and Progress.update() are not thread safe.
lock.acquire()
did_lock = True
if not success:
err_event.set()
print('error: Cannot fetch %s from %s'
% (project.name, project.remote.url),
file=sys.stderr)
if opt.fail_fast:
raise _FetchError()
fetched.add(project.gitdir)
pm.update(msg=project.name)
except _FetchError:
pass
except Exception as e:
print('error: Cannot fetch %s (%s: %s)'
% (project.name, type(e).__name__, str(e)), file=sys.stderr)
err_event.set()
raise
finally:
if did_lock:
lock.release()
finish = time.time()
self.event_log.AddSync(project, event_log.TASK_SYNC_NETWORK,
start, finish, success)
return success
def _Fetch(self, projects, opt, err_event):
fetched = set()
lock = _threading.Lock()
pm = Progress('Fetching projects', len(projects),
always_print_percentage=opt.quiet)
objdir_project_map = dict()
for project in projects:
objdir_project_map.setdefault(project.objdir, []).append(project)
threads = set()
sem = _threading.Semaphore(self.jobs)
for project_list in objdir_project_map.values():
# Check for any errors before running any more tasks.
# ...we'll let existing threads finish, though.
if err_event.isSet() and opt.fail_fast:
break
sem.acquire()
kwargs = dict(opt=opt,
projects=project_list,
sem=sem,
lock=lock,
fetched=fetched,
pm=pm,
err_event=err_event,
clone_filter=self.manifest.CloneFilter)
if self.jobs > 1:
t = _threading.Thread(target=self._FetchProjectList,
kwargs=kwargs)
# Ensure that Ctrl-C will not freeze the repo process.
t.daemon = True
threads.add(t)
t.start()
else:
self._FetchProjectList(**kwargs)
for t in threads:
t.join()
pm.end()
self._fetch_times.Save()
if not self.manifest.IsArchive:
self._GCProjects(projects, opt, err_event)
return fetched
def _CheckoutWorker(self, opt, sem, project, *args, **kwargs):
"""Main function of the fetch threads.
Delegates most of the work to _CheckoutOne.
Args:
opt: Program options returned from optparse. See _Options().
projects: Projects to fetch.
sem: We'll release() this semaphore when we exit so that another thread
can be started up.
*args, **kwargs: Remaining arguments to pass to _CheckoutOne. See the
_CheckoutOne docstring for details.
"""
try:
return self._CheckoutOne(opt, project, *args, **kwargs)
finally:
sem.release()
def _CheckoutOne(self, opt, project, lock, pm, err_event, err_results):
"""Checkout work tree for one project
Args:
opt: Program options returned from optparse. See _Options().
project: Project object for the project to checkout.
lock: Lock for accessing objects that are shared amongst multiple
_CheckoutWorker() threads.
pm: Instance of a Project object. We will call pm.update() (with our
lock held).
err_event: We'll set this event in the case of an error (after printing
out info about the error).
err_results: A list of strings, paths to git repos where checkout
failed.
Returns:
Whether the fetch was successful.
"""
# We'll set to true once we've locked the lock.
did_lock = False
# Encapsulate everything in a try/except/finally so that:
# - We always set err_event in the case of an exception.
# - We always make sure we unlock the lock if we locked it.
start = time.time()
syncbuf = SyncBuffer(self.manifest.manifestProject.config,
detach_head=opt.detach_head)
success = False
try:
try:
project.Sync_LocalHalf(syncbuf, force_sync=opt.force_sync)
# Lock around all the rest of the code, since printing, updating a set
# and Progress.update() are not thread safe.
lock.acquire()
success = syncbuf.Finish()
did_lock = True
if not success:
err_event.set()
print('error: Cannot checkout %s' % (project.name),
file=sys.stderr)
raise _CheckoutError()
pm.update(msg=project.name)
except _CheckoutError:
pass
except Exception as e:
print('error: Cannot checkout %s: %s: %s' %
(project.name, type(e).__name__, str(e)),
file=sys.stderr)
err_event.set()
raise
finally:
if did_lock:
if not success:
err_results.append(project.relpath)
lock.release()
finish = time.time()
self.event_log.AddSync(project, event_log.TASK_SYNC_LOCAL,
start, finish, success)
return success
def _Checkout(self, all_projects, opt, err_event, err_results):
"""Checkout projects listed in all_projects
Args:
all_projects: List of all projects that should be checked out.
opt: Program options returned from optparse. See _Options().
err_event: We'll set this event in the case of an error (after printing
out info about the error).
err_results: A list of strings, paths to git repos where checkout
failed.
"""
# Perform checkouts in multiple threads when we are using partial clone.
# Without partial clone, all needed git objects are already downloaded,
# in this situation it's better to use only one process because the checkout
# would be mostly disk I/O; with partial clone, the objects are only
# downloaded when demanded (at checkout time), which is similar to the
# Sync_NetworkHalf case and parallelism would be helpful.
if self.manifest.CloneFilter:
syncjobs = self.jobs
else:
syncjobs = 1
lock = _threading.Lock()
pm = Progress('Checking out projects', len(all_projects))
threads = set()
sem = _threading.Semaphore(syncjobs)
for project in all_projects:
# Check for any errors before running any more tasks.
# ...we'll let existing threads finish, though.
if err_event.isSet() and opt.fail_fast:
break
sem.acquire()
if project.worktree:
kwargs = dict(opt=opt,
sem=sem,
project=project,
lock=lock,
pm=pm,
err_event=err_event,
err_results=err_results)
if syncjobs > 1:
t = _threading.Thread(target=self._CheckoutWorker,
kwargs=kwargs)
# Ensure that Ctrl-C will not freeze the repo process.
t.daemon = True
threads.add(t)
t.start()
else:
self._CheckoutWorker(**kwargs)
for t in threads:
t.join()
pm.end()
def _GCProjects(self, projects, opt, err_event):
gc_gitdirs = {}
for project in projects:
# Make sure pruning never kicks in with shared projects.
if (not project.use_git_worktrees and
len(project.manifest.GetProjectsWithName(project.name)) > 1):
print('%s: Shared project %s found, disabling pruning.' %
(project.relpath, project.name))
if git_require((2, 7, 0)):
project.EnableRepositoryExtension('preciousObjects')
else:
# This isn't perfect, but it's the best we can do with old git.
print('%s: WARNING: shared projects are unreliable when using old '
'versions of git; please upgrade to git-2.7.0+.'
% (project.relpath,),
file=sys.stderr)
project.config.SetString('gc.pruneExpire', 'never')
gc_gitdirs[project.gitdir] = project.bare_git
if multiprocessing:
cpu_count = multiprocessing.cpu_count()
else:
cpu_count = 1
jobs = min(self.jobs, cpu_count)
if jobs < 2:
for bare_git in gc_gitdirs.values():
bare_git.gc('--auto')
return
config = {'pack.threads': cpu_count // jobs if cpu_count > jobs else 1}
threads = set()
sem = _threading.Semaphore(jobs)
def GC(bare_git):
try:
try:
bare_git.gc('--auto', config=config)
except GitError:
err_event.set()
except Exception:
err_event.set()
raise
finally:
sem.release()
for bare_git in gc_gitdirs.values():
if err_event.isSet() and opt.fail_fast:
break
sem.acquire()
t = _threading.Thread(target=GC, args=(bare_git,))
t.daemon = True
threads.add(t)
t.start()
for t in threads:
t.join()
def _ReloadManifest(self, manifest_name=None):
if manifest_name:
# Override calls _Unload already
self.manifest.Override(manifest_name)
else:
self.manifest._Unload()
def UpdateProjectList(self, opt):
new_project_paths = []
for project in self.GetProjects(None, missing_ok=True):
if project.relpath:
new_project_paths.append(project.relpath)
file_name = 'project.list'
file_path = os.path.join(self.manifest.repodir, file_name)
old_project_paths = []
if os.path.exists(file_path):
with open(file_path, 'r') as fd:
old_project_paths = fd.read().split('\n')
# In reversed order, so subfolders are deleted before parent folder.
for path in sorted(old_project_paths, reverse=True):
if not path:
continue
if path not in new_project_paths:
# If the path has already been deleted, we don't need to do it
gitdir = os.path.join(self.manifest.topdir, path, '.git')
if os.path.exists(gitdir):
project = Project(
manifest=self.manifest,
name=path,
remote=RemoteSpec('origin'),
gitdir=gitdir,
objdir=gitdir,
use_git_worktrees=os.path.isfile(gitdir),
worktree=os.path.join(self.manifest.topdir, path),
relpath=path,
revisionExpr='HEAD',
revisionId=None,
groups=None)
if not project.DeleteWorktree(
quiet=opt.quiet,
force=opt.force_remove_dirty):
return 1
new_project_paths.sort()
with open(file_path, 'w') as fd:
fd.write('\n'.join(new_project_paths))
fd.write('\n')
return 0
def _SmartSyncSetup(self, opt, smart_sync_manifest_path):
if not self.manifest.manifest_server:
print('error: cannot smart sync: no manifest server defined in '
'manifest', file=sys.stderr)
sys.exit(1)
manifest_server = self.manifest.manifest_server
if not opt.quiet:
print('Using manifest server %s' % manifest_server)
if '@' not in manifest_server:
username = None
password = None
if opt.manifest_server_username and opt.manifest_server_password:
username = opt.manifest_server_username
password = opt.manifest_server_password
else:
try:
info = netrc.netrc()
except IOError:
# .netrc file does not exist or could not be opened
pass
else:
try:
parse_result = urllib.parse.urlparse(manifest_server)
if parse_result.hostname:
auth = info.authenticators(parse_result.hostname)
if auth:
username, _account, password = auth
else:
print('No credentials found for %s in .netrc'
% parse_result.hostname, file=sys.stderr)
except netrc.NetrcParseError as e:
print('Error parsing .netrc file: %s' % e, file=sys.stderr)
if (username and password):
manifest_server = manifest_server.replace('://', '://%s:%s@' %
(username, password),
1)
transport = PersistentTransport(manifest_server)
if manifest_server.startswith('persistent-'):
manifest_server = manifest_server[len('persistent-'):]
try:
server = xmlrpc.client.Server(manifest_server, transport=transport)
if opt.smart_sync:
p = self.manifest.manifestProject
b = p.GetBranch(p.CurrentBranch)
branch = b.merge
if branch.startswith(R_HEADS):
branch = branch[len(R_HEADS):]
if 'SYNC_TARGET' in os.environ:
target = os.environ['SYNC_TARGET']
[success, manifest_str] = server.GetApprovedManifest(branch, target)
elif ('TARGET_PRODUCT' in os.environ and
'TARGET_BUILD_VARIANT' in os.environ):
target = '%s-%s' % (os.environ['TARGET_PRODUCT'],
os.environ['TARGET_BUILD_VARIANT'])
[success, manifest_str] = server.GetApprovedManifest(branch, target)
else:
[success, manifest_str] = server.GetApprovedManifest(branch)
else:
assert(opt.smart_tag)
[success, manifest_str] = server.GetManifest(opt.smart_tag)
if success:
manifest_name = os.path.basename(smart_sync_manifest_path)
try:
with open(smart_sync_manifest_path, 'w') as f:
f.write(manifest_str)
except IOError as e:
print('error: cannot write manifest to %s:\n%s'
% (smart_sync_manifest_path, e),
file=sys.stderr)
sys.exit(1)
self._ReloadManifest(manifest_name)
else:
print('error: manifest server RPC call failed: %s' %
manifest_str, file=sys.stderr)
sys.exit(1)
except (socket.error, IOError, xmlrpc.client.Fault) as e:
print('error: cannot connect to manifest server %s:\n%s'
% (self.manifest.manifest_server, e), file=sys.stderr)
sys.exit(1)
except xmlrpc.client.ProtocolError as e:
print('error: cannot connect to manifest server %s:\n%d %s'
% (self.manifest.manifest_server, e.errcode, e.errmsg),
file=sys.stderr)
sys.exit(1)
return manifest_name
def _UpdateManifestProject(self, opt, mp, manifest_name):
"""Fetch & update the local manifest project."""
if not opt.local_only:
start = time.time()
success = mp.Sync_NetworkHalf(quiet=opt.quiet, verbose=opt.verbose,
current_branch_only=opt.current_branch_only,
tags=opt.tags,
optimized_fetch=opt.optimized_fetch,
retry_fetches=opt.retry_fetches,
submodules=self.manifest.HasSubmodules,
clone_filter=self.manifest.CloneFilter)
finish = time.time()
self.event_log.AddSync(mp, event_log.TASK_SYNC_NETWORK,
start, finish, success)
if mp.HasChanges:
syncbuf = SyncBuffer(mp.config)
start = time.time()
mp.Sync_LocalHalf(syncbuf, submodules=self.manifest.HasSubmodules)
clean = syncbuf.Finish()
self.event_log.AddSync(mp, event_log.TASK_SYNC_LOCAL,
start, time.time(), clean)
if not clean:
sys.exit(1)
self._ReloadManifest(opt.manifest_name)
if opt.jobs is None:
self.jobs = self.manifest.default.sync_j
def ValidateOptions(self, opt, args):
if opt.force_broken:
print('warning: -f/--force-broken is now the default behavior, and the '
'options are deprecated', file=sys.stderr)
if opt.network_only and opt.detach_head:
self.OptionParser.error('cannot combine -n and -d')
if opt.network_only and opt.local_only:
self.OptionParser.error('cannot combine -n and -l')
if opt.manifest_name and opt.smart_sync:
self.OptionParser.error('cannot combine -m and -s')
if opt.manifest_name and opt.smart_tag:
self.OptionParser.error('cannot combine -m and -t')
if opt.manifest_server_username or opt.manifest_server_password:
if not (opt.smart_sync or opt.smart_tag):
self.OptionParser.error('-u and -p may only be combined with -s or -t')
if None in [opt.manifest_server_username, opt.manifest_server_password]:
self.OptionParser.error('both -u and -p must be given')
def Execute(self, opt, args):
if opt.jobs:
self.jobs = opt.jobs
if self.jobs > 1:
soft_limit, _ = _rlimit_nofile()
self.jobs = min(self.jobs, (soft_limit - 5) // 3)
opt.quiet = opt.output_mode is False
opt.verbose = opt.output_mode is True
if opt.manifest_name:
self.manifest.Override(opt.manifest_name)
manifest_name = opt.manifest_name
smart_sync_manifest_path = os.path.join(
self.manifest.manifestProject.worktree, 'smart_sync_override.xml')
if opt.clone_bundle is None:
opt.clone_bundle = self.manifest.CloneBundle
if opt.smart_sync or opt.smart_tag:
manifest_name = self._SmartSyncSetup(opt, smart_sync_manifest_path)
else:
if os.path.isfile(smart_sync_manifest_path):
try:
platform_utils.remove(smart_sync_manifest_path)
except OSError as e:
print('error: failed to remove existing smart sync override manifest: %s' %
e, file=sys.stderr)
err_event = _threading.Event()
rp = self.manifest.repoProject
rp.PreSync()
cb = rp.CurrentBranch
if cb:
base = rp.GetBranch(cb).merge
if not base or not base.startswith('refs/heads/'):
print('warning: repo is not tracking a remote branch, so it will not '
'receive updates; run `repo init --repo-rev=stable` to fix.',
file=sys.stderr)
mp = self.manifest.manifestProject
mp.PreSync()
if opt.repo_upgraded:
_PostRepoUpgrade(self.manifest, quiet=opt.quiet)
if not opt.mp_update:
print('Skipping update of local manifest project.')
else:
self._UpdateManifestProject(opt, mp, manifest_name)
if self.gitc_manifest:
gitc_manifest_projects = self.GetProjects(args,
missing_ok=True)
gitc_projects = []
opened_projects = []
for project in gitc_manifest_projects:
if project.relpath in self.gitc_manifest.paths and \
self.gitc_manifest.paths[project.relpath].old_revision:
opened_projects.append(project.relpath)
else:
gitc_projects.append(project.relpath)
if not args:
gitc_projects = None
if gitc_projects != [] and not opt.local_only:
print('Updating GITC client: %s' % self.gitc_manifest.gitc_client_name)
manifest = GitcManifest(self.repodir, self.gitc_manifest.gitc_client_name)
if manifest_name:
manifest.Override(manifest_name)
else:
manifest.Override(self.manifest.manifestFile)
gitc_utils.generate_gitc_manifest(self.gitc_manifest,
manifest,
gitc_projects)
print('GITC client successfully synced.')
# The opened projects need to be synced as normal, therefore we
# generate a new args list to represent the opened projects.
# TODO: make this more reliable -- if there's a project name/path overlap,
# this may choose the wrong project.
args = [os.path.relpath(self.manifest.paths[path].worktree, os.getcwd())
for path in opened_projects]
if not args:
return
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
err_network_sync = False
err_update_projects = False
err_checkout = False
self._fetch_times = _FetchTimes(self.manifest)
if not opt.local_only:
to_fetch = []
to_fetch.extend(all_projects)
to_fetch.sort(key=self._fetch_times.Get, reverse=True)
fetched = self._Fetch(to_fetch, opt, err_event)
_PostRepoFetch(rp, opt.repo_verify)
if opt.network_only:
# bail out now; the rest touches the working tree
if err_event.isSet():
print('\nerror: Exited sync due to fetch errors.\n', file=sys.stderr)
sys.exit(1)
return
# Iteratively fetch missing and/or nested unregistered submodules
previously_missing_set = set()
while True:
self._ReloadManifest(manifest_name)
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
missing = []
for project in all_projects:
if project.gitdir not in fetched:
missing.append(project)
if not missing:
break
# Stop us from non-stopped fetching actually-missing repos: If set of
# missing repos has not been changed from last fetch, we break.
missing_set = set(p.name for p in missing)
if previously_missing_set == missing_set:
break
previously_missing_set = missing_set
fetched.update(self._Fetch(missing, opt, err_event))
# If we saw an error, exit with code 1 so that other scripts can check.
if err_event.isSet():
err_network_sync = True
if opt.fail_fast:
print('\nerror: Exited sync due to fetch errors.\n'
'Local checkouts *not* updated. Resolve network issues & '
'retry.\n'
'`repo sync -l` will update some local checkouts.',
file=sys.stderr)
sys.exit(1)
if self.manifest.IsMirror or self.manifest.IsArchive:
# bail out now, we have no working tree
return
if self.UpdateProjectList(opt):
err_event.set()
err_update_projects = True
if opt.fail_fast:
print('\nerror: Local checkouts *not* updated.', file=sys.stderr)
sys.exit(1)
err_results = []
self._Checkout(all_projects, opt, err_event, err_results)
if err_event.isSet():
err_checkout = True
# NB: We don't exit here because this is the last step.
# If there's a notice that's supposed to print at the end of the sync, print
# it now...
if self.manifest.notice:
print(self.manifest.notice)
# If we saw an error, exit with code 1 so that other scripts can check.
if err_event.isSet():
print('\nerror: Unable to fully sync the tree.', file=sys.stderr)
if err_network_sync:
print('error: Downloading network changes failed.', file=sys.stderr)
if err_update_projects:
print('error: Updating local project lists failed.', file=sys.stderr)
if err_checkout:
print('error: Checking out local projects failed.', file=sys.stderr)
if err_results:
print('Failing repos:\n%s' % '\n'.join(err_results), file=sys.stderr)
print('Try re-running with "-j1 --fail-fast" to exit at the first error.',
file=sys.stderr)
sys.exit(1)
if not opt.quiet:
print('repo sync has finished successfully.')
def _PostRepoUpgrade(manifest, quiet=False):
wrapper = Wrapper()
if wrapper.NeedSetupGnuPG():
wrapper.SetupGnuPG(quiet)
for project in manifest.projects:
if project.Exists:
project.PostRepoUpgrade()
def _PostRepoFetch(rp, repo_verify=True, verbose=False):
if rp.HasChanges:
print('info: A new version of repo is available', file=sys.stderr)
print(file=sys.stderr)
if not repo_verify or _VerifyTag(rp):
syncbuf = SyncBuffer(rp.config)
rp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
print('info: Restarting repo with latest version', file=sys.stderr)
raise RepoChangedException(['--repo-upgraded'])
else:
print('warning: Skipped upgrade to unverified version', file=sys.stderr)
else:
if verbose:
print('repo version %s is current' % rp.work_git.describe(HEAD),
file=sys.stderr)
def _VerifyTag(project):
gpg_dir = os.path.expanduser('~/.repoconfig/gnupg')
if not os.path.exists(gpg_dir):
print('warning: GnuPG was not available during last "repo init"\n'
'warning: Cannot automatically authenticate repo."""',
file=sys.stderr)
return True
try:
cur = project.bare_git.describe(project.GetRevisionId())
except GitError:
cur = None
if not cur \
or re.compile(r'^.*-[0-9]{1,}-g[0-9a-f]{1,}$').match(cur):
rev = project.revisionExpr
if rev.startswith(R_HEADS):
rev = rev[len(R_HEADS):]
print(file=sys.stderr)
print("warning: project '%s' branch '%s' is not signed"
% (project.name, rev), file=sys.stderr)
return False
env = os.environ.copy()
env['GIT_DIR'] = project.gitdir
env['GNUPGHOME'] = gpg_dir
cmd = [GIT, 'tag', '-v', cur]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
out = proc.stdout.read()
proc.stdout.close()
err = proc.stderr.read()
proc.stderr.close()
if proc.wait() != 0:
print(file=sys.stderr)
print(out, file=sys.stderr)
print(err, file=sys.stderr)
print(file=sys.stderr)
return False
return True
class _FetchTimes(object):
_ALPHA = 0.5
def __init__(self, manifest):
self._path = os.path.join(manifest.repodir, '.repo_fetchtimes.json')
self._times = None
self._seen = set()
def Get(self, project):
self._Load()
return self._times.get(project.name, _ONE_DAY_S)
def Set(self, project, t):
self._Load()
name = project.name
old = self._times.get(name, t)
self._seen.add(name)
a = self._ALPHA
self._times[name] = (a * t) + ((1 - a) * old)
def _Load(self):
if self._times is None:
try:
with open(self._path) as f:
self._times = json.load(f)
except (IOError, ValueError):
try:
platform_utils.remove(self._path)
except OSError:
pass
self._times = {}
def Save(self):
if self._times is None:
return
to_delete = []
for name in self._times:
if name not in self._seen:
to_delete.append(name)
for name in to_delete:
del self._times[name]
try:
with open(self._path, 'w') as f:
json.dump(self._times, f, indent=2)
except (IOError, TypeError):
try:
platform_utils.remove(self._path)
except OSError:
pass
# This is a replacement for xmlrpc.client.Transport using urllib2
# and supporting persistent-http[s]. It cannot change hosts from
# request to request like the normal transport, the real url
# is passed during initialization.
class PersistentTransport(xmlrpc.client.Transport):
def __init__(self, orig_host):
self.orig_host = orig_host
def request(self, host, handler, request_body, verbose=False):
with GetUrlCookieFile(self.orig_host, not verbose) as (cookiefile, proxy):
# Python doesn't understand cookies with the #HttpOnly_ prefix
# Since we're only using them for HTTP, copy the file temporarily,
# stripping those prefixes away.
if cookiefile:
tmpcookiefile = tempfile.NamedTemporaryFile(mode='w')
tmpcookiefile.write("# HTTP Cookie File")
try:
with open(cookiefile) as f:
for line in f:
if line.startswith("#HttpOnly_"):
line = line[len("#HttpOnly_"):]
tmpcookiefile.write(line)
tmpcookiefile.flush()
cookiejar = cookielib.MozillaCookieJar(tmpcookiefile.name)
try:
cookiejar.load()
except cookielib.LoadError:
cookiejar = cookielib.CookieJar()
finally:
tmpcookiefile.close()
else:
cookiejar = cookielib.CookieJar()
proxyhandler = urllib.request.ProxyHandler
if proxy:
proxyhandler = urllib.request.ProxyHandler({
"http": proxy,
"https": proxy})
opener = urllib.request.build_opener(
urllib.request.HTTPCookieProcessor(cookiejar),
proxyhandler)
url = urllib.parse.urljoin(self.orig_host, handler)
parse_results = urllib.parse.urlparse(url)
scheme = parse_results.scheme
if scheme == 'persistent-http':
scheme = 'http'
if scheme == 'persistent-https':
# If we're proxying through persistent-https, use http. The
# proxy itself will do the https.
if proxy:
scheme = 'http'
else:
scheme = 'https'
# Parse out any authentication information using the base class
host, extra_headers, _ = self.get_host_info(parse_results.netloc)
url = urllib.parse.urlunparse((
scheme,
host,
parse_results.path,
parse_results.params,
parse_results.query,
parse_results.fragment))
request = urllib.request.Request(url, request_body)
if extra_headers is not None:
for (name, header) in extra_headers:
request.add_header(name, header)
request.add_header('Content-Type', 'text/xml')
try:
response = opener.open(request)
except urllib.error.HTTPError as e:
if e.code == 501:
# We may have been redirected through a login process
# but our POST turned into a GET. Retry.
response = opener.open(request)
else:
raise
p, u = xmlrpc.client.getparser()
while 1:
data = response.read(1024)
if not data:
break
p.feed(data)
p.close()
return u.close()
def close(self):
pass
| 35.678832
| 93
| 0.634093
|
ef7787858c17aea9a00858a209849ffc15807e80
| 3,711
|
py
|
Python
|
modules/residual.py
|
tansyab1/LightNetPlus
|
ed226e5454b2144063a6d8132b07c90e6a64e2d3
|
[
"MIT"
] | 240
|
2019-02-27T08:39:06.000Z
|
2021-05-31T19:38:17.000Z
|
modules/residual.py
|
tansyab1/LightNetPlus
|
ed226e5454b2144063a6d8132b07c90e6a64e2d3
|
[
"MIT"
] | 8
|
2019-04-22T10:59:47.000Z
|
2021-03-19T15:38:52.000Z
|
modules/residual.py
|
tansyab1/LightNetPlus
|
ed226e5454b2144063a6d8132b07c90e6a64e2d3
|
[
"MIT"
] | 56
|
2019-04-18T03:34:17.000Z
|
2021-04-25T09:32:50.000Z
|
import torch.nn as nn
from .attentions import SEBlock
from collections import OrderedDict
from modules.inplace_abn.iabn import ABN
class IdentityResidualBlock(nn.Module):
def __init__(self,
in_channels,
channels,
stride=1,
dilation=1,
groups=1,
norm_act=ABN,
use_se=False,
dropout=None):
"""Configurable identity-mapping residual block
Parameters
----------
in_channels : int
Number of input channels.
channels : list of int
Number of channels in the internal feature maps. Can either have two or three elements: if three construct
a residual block with two `3 x 3` convolutions, otherwise construct a bottleneck block with `1 x 1`, then
`3 x 3` then `1 x 1` convolutions.
stride : int
Stride of the first `3 x 3` convolution
dilation : int
Dilation to apply to the `3 x 3` convolutions.
groups : int
Number of convolution groups. This is used to create ResNeXt-style blocks and is only compatible with
bottleneck blocks.
norm_act : callable
Function to create normalization / activation Module.
dropout: callable
Function to create Dropout Module.
"""
super(IdentityResidualBlock, self).__init__()
# Check parameters for inconsistencies
if len(channels) != 2 and len(channels) != 3:
raise ValueError("channels must contain either two or three values")
if len(channels) == 2 and groups != 1:
raise ValueError("groups > 1 are only valid if len(channels) == 3")
is_bottleneck = len(channels) == 3
need_proj_conv = stride != 1 or in_channels != channels[-1]
self.bn1 = norm_act(in_channels)
if not is_bottleneck:
layers = [
("conv1", nn.Conv2d(in_channels, channels[0], 3, stride=stride, padding=dilation, bias=False,
dilation=dilation)),
("bn2", norm_act(channels[0])),
("conv2", nn.Conv2d(channels[0], channels[1], 3, stride=1, padding=dilation, bias=False,
dilation=dilation))
]
if dropout is not None:
layers = layers[0:2] + [("dropout", dropout())] + layers[2:]
else:
layers = [
("conv1", nn.Conv2d(in_channels, channels[0], 1, stride=stride, padding=0, bias=False)),
("bn2", norm_act(channels[0])),
("conv2", nn.Conv2d(channels[0], channels[1], 3, stride=1, padding=dilation, bias=False,
groups=groups, dilation=dilation)),
("bn3", norm_act(channels[1])),
("conv3", nn.Conv2d(channels[1], channels[2], 1, stride=1, padding=0, bias=False))
]
if use_se:
layers.append(("se_block", SEBlock(channels[2], reduct_ratio=16)))
if dropout is not None:
layers = layers[0:5] + [("dropout", dropout())] + layers[5:]
self.convs = nn.Sequential(OrderedDict(layers))
if need_proj_conv:
self.proj_conv = nn.Conv2d(in_channels, channels[-1], 1, stride=stride, padding=0, bias=False)
def forward(self, x):
if hasattr(self, "proj_conv"):
bn1 = self.bn1(x)
shortcut = self.proj_conv(bn1)
else:
shortcut = x.clone()
bn1 = self.bn1(x)
out = self.convs(bn1)
out.add_(shortcut)
return out
| 40.336957
| 118
| 0.551334
|
4ff7404227a619b99c8ad665c500939f7e51250c
| 1,198
|
py
|
Python
|
ask-sdk-webservice-support/ask_sdk_webservice_support/__version__.py
|
NotTheEconomist/alexa-skills-kit-sdk-for-python
|
ecb0c212d7f92dfdd19b873e1e718ed108875108
|
[
"Apache-2.0"
] | null | null | null |
ask-sdk-webservice-support/ask_sdk_webservice_support/__version__.py
|
NotTheEconomist/alexa-skills-kit-sdk-for-python
|
ecb0c212d7f92dfdd19b873e1e718ed108875108
|
[
"Apache-2.0"
] | null | null | null |
ask-sdk-webservice-support/ask_sdk_webservice_support/__version__.py
|
NotTheEconomist/alexa-skills-kit-sdk-for-python
|
ecb0c212d7f92dfdd19b873e1e718ed108875108
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the
# License.
#
__pip_package_name__ = 'ask-sdk-webservice-support'
__description__ = ('The ASK SDK Webservice package provides support for '
'running skills, built using the Alexa Skills Kit '
'SDK, as web applications.')
__url__ = 'https://github.com/alexa/alexa-skills-kit-sdk-for-python'
__version__ = '0.1.2'
__author__ = 'Alexa Skills Kit'
__author_email__ = 'ask-sdk-dynamic@amazon.com'
__license__ = 'Apache 2.0'
__keywords__ = ['ASK SDK', 'Alexa Skills Kit', 'Alexa', 'WebApp']
__install_requires__ = ["ask-sdk-model>=1.0.0", "ask-sdk-core>=1.1.0",
"cryptography>=2.3.0"]
| 38.645161
| 73
| 0.700334
|
029b2b23d97fe778a8680492ad41be3735d105f0
| 664
|
py
|
Python
|
Chapter01/test_simul.py
|
arifmudi/Advanced-Python-Programming-Second-Edition
|
55e8fb16127873b5285980301f9a0331d135253c
|
[
"MIT"
] | 19
|
2021-06-11T11:23:44.000Z
|
2022-03-27T21:15:26.000Z
|
Chapter01/test_simul.py
|
arifmudi/Advanced-Python-Programming-Second-Edition
|
55e8fb16127873b5285980301f9a0331d135253c
|
[
"MIT"
] | null | null | null |
Chapter01/test_simul.py
|
arifmudi/Advanced-Python-Programming-Second-Edition
|
55e8fb16127873b5285980301f9a0331d135253c
|
[
"MIT"
] | 16
|
2021-04-18T12:43:02.000Z
|
2022-03-26T05:47:34.000Z
|
from simul import Particle, ParticleSimulator
def test_evolve(benchmark):
particles = [
Particle(0.3, 0.5, +1),
Particle(0.0, -0.5, -1),
Particle(-0.1, -0.4, +3),
]
simulator = ParticleSimulator(particles)
simulator.evolve(0.1)
p0, p1, p2 = particles
def fequal(a, b):
return abs(a - b) < 1e-5
assert fequal(p0.x, 0.2102698450356825)
assert fequal(p0.y, 0.5438635787296997)
assert fequal(p1.x, -0.0993347660567358)
assert fequal(p1.y, -0.4900342888538049)
assert fequal(p2.x, 0.1913585038252641)
assert fequal(p2.y, -0.3652272210744360)
benchmark(simulator.evolve, 0.1)
| 22.133333
| 45
| 0.634036
|
37d83f54cd19d048995fc88806ceaa63aefa9442
| 2,185
|
py
|
Python
|
pyqldbsamples/get_digest.py
|
simonz-bq/amazon-qldb-dmv-sample-python
|
ca27d40d992e63e5dec8e1a431517fc745f8185c
|
[
"MIT-0"
] | null | null | null |
pyqldbsamples/get_digest.py
|
simonz-bq/amazon-qldb-dmv-sample-python
|
ca27d40d992e63e5dec8e1a431517fc745f8185c
|
[
"MIT-0"
] | null | null | null |
pyqldbsamples/get_digest.py
|
simonz-bq/amazon-qldb-dmv-sample-python
|
ca27d40d992e63e5dec8e1a431517fc745f8185c
|
[
"MIT-0"
] | null | null | null |
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# This code expects that you have AWS credentials setup per:
# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/quickstart.html
from logging import basicConfig, getLogger, INFO
from boto3 import client
from pyqldbsamples.constants import Constants
from pyqldbsamples.qldb.qldb_string_utils import digest_response_to_string
logger = getLogger(__name__)
basicConfig(level=INFO)
qldb_client = client('qldb')
def get_digest_result(name):
"""
Get the digest of a ledger's journal.
:type name: str
:param name: Name of the ledger to operate on.
:rtype: dict
:return: The digest in a 256-bit hash value and a block address.
"""
logger.info("Let's get the current digest of the ledger named {}".format(name))
result = qldb_client.get_digest(Name=name)
logger.info('Success. LedgerDigest: {}.'.format(digest_response_to_string(result)))
return result
if __name__ == '__main__':
"""
This is an example for retrieving the digest of a particular ledger.
"""
try:
digest = get_digest_result(Constants.LEDGER_NAME)
except Exception as e:
logger.exception('Unable to get a ledger digest!')
raise e
| 39.017857
| 87
| 0.746911
|
5d8ac58464c237c6175560f650af0ba21f18e58b
| 13,096
|
py
|
Python
|
src/models.py
|
le773/open-solution-home-credit
|
3f3e82ce4dd19b44f3b22d3d86669b2c13b893b5
|
[
"MIT"
] | 1
|
2019-01-21T08:11:28.000Z
|
2019-01-21T08:11:28.000Z
|
src/models.py
|
le773/open-solution-home-credit
|
3f3e82ce4dd19b44f3b22d3d86669b2c13b893b5
|
[
"MIT"
] | null | null | null |
src/models.py
|
le773/open-solution-home-credit
|
3f3e82ce4dd19b44f3b22d3d86669b2c13b893b5
|
[
"MIT"
] | null | null | null |
from attrdict import AttrDict
from deepsense import neptune
import lightgbm as lgb
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from steppy.base import BaseTransformer
from keras import backend as K
from keras.models import Sequential
from keras.optimizers import Adam, SGD
from toolkit.keras_transformers.models import ClassifierXY
from toolkit.sklearn_transformers.models import SklearnClassifier
from keras.layers import Dense, Activation, BatchNormalization, Dropout
from keras.regularizers import l1_l2
import xgboost as xgb
import catboost as ctb
from .callbacks import neptune_monitor_lgbm, NeptuneMonitor
from .utils import get_logger
logger = get_logger()
ctx = neptune.Context()
# class IdentityOperation(BaseTransformer):
# """Transformer that performs identity operation, f(x)=x."""
#
# def transform(self, **kwargs):
# return kwargs
#
# def persist(self, filepath):
# logger.info('"IdentityOperation" is not persistable.')
# pass
class XGBoost(BaseTransformer):
def __init__(self, **params):
super().__init__()
logger.info('initializing XGBoost...')
self.params = params
self.training_params = ['nrounds', 'early_stopping_rounds']
self.evaluation_function = None
@property
def model_config(self):
return AttrDict({param: value for param, value in self.params.items()
if param not in self.training_params})
@property
def training_config(self):
return AttrDict({param: value for param, value in self.params.items()
if param in self.training_params})
def fit(self,
X, y,
X_valid, y_valid,
feature_names=None,
feature_types=None,
**kwargs):
train = xgb.DMatrix(X,
label=y,
feature_names=feature_names,
feature_types=feature_types)
valid = xgb.DMatrix(X_valid,
label=y_valid,
feature_names=feature_names,
feature_types=feature_types)
evaluation_results = {}
self.estimator = xgb.train(params=self.model_config,
dtrain=train,
evals=[(train, 'train'), (valid, 'valid')],
evals_result=evaluation_results,
num_boost_round=self.training_config.nrounds,
early_stopping_rounds=self.training_config.early_stopping_rounds,
verbose_eval=self.model_config.verbose,
feval=self.evaluation_function)
return self
def transform(self, X, y=None, feature_names=None, feature_types=None, **kwargs):
X_DMatrix = xgb.DMatrix(X,
label=y,
feature_names=feature_names,
feature_types=feature_types)
prediction = self.estimator.predict(X_DMatrix)
return {'prediction': prediction}
def load(self, filepath):
self.estimator = xgb.Booster(params=self.model_config)
self.estimator.load_model(filepath)
return self
def persist(self, filepath):
self.estimator.save_model(filepath)
# Params
# {'device': 'cpu',
# 'boosting_type': 'gbdt',
# 'objective': 'binary',
# 'metric': 'auc',
# 'is_unbalance': False,
# 'scale_pos_weight': 1.0,
# 'learning_rate': 0.02,
# 'max_bin': 300,
# 'max_depth': -1,
# 'num_leaves': 42,
# 'min_child_samples': 70,
# 'subsample': 1.0,
# 'colsample_bytree': 0.03,
# 'subsample_freq': 1,
# 'min_gain_to_split': 0.5,
# 'reg_lambda': 100.0,
# 'reg_alpha': 0.0,
# 'nthread': 8,
# 'number_boosting_rounds': 5000,
# 'early_stopping_rounds': 100,
# 'verbose': 1,
# 'callbacks_on': 1
# }
class LightGBM(BaseTransformer):
def __init__(self, name=None, **params):
super().__init__()
logger.info('initializing LightGBM...')
self.params = params
# logger.info('self.params')
# logger.info(self.params)
self.training_params = ['number_boosting_rounds', 'early_stopping_rounds']
self.evaluation_function = None
if params['callbacks_on']:
self.callbacks = callbacks(channel_prefix=name)
else:
self.callbacks = []
@property
def model_config(self):
return AttrDict({param: value for param, value in self.params.items()
if param not in self.training_params})
@property
def training_config(self):
return AttrDict({param: value for param, value in self.params.items()
if param in self.training_params})
def fit(self,
X,
y,
X_valid,
y_valid,
feature_names='auto',
categorical_features='auto',
**kwargs):
evaluation_results = {}
self._check_target_shape_and_type(y, 'y')
self._check_target_shape_and_type(y_valid, 'y_valid')
y = self._format_target(y)
y_valid = self._format_target(y_valid)
logger.info('LightGBM, train data shape {}'.format(X.shape))
logger.info('LightGBM, validation data shape {}'.format(X_valid.shape))
logger.info('LightGBM, train labels shape {}'.format(y.shape))
logger.info('LightGBM, validation labels shape {}'.format(y_valid.shape))
data_train = lgb.Dataset(data=X[0:10000],
label=y[0:10000],
feature_name=feature_names,
categorical_feature=categorical_features,
**kwargs)
data_valid = lgb.Dataset(X_valid[0:2000],
label=y_valid[0:2000],
feature_name=feature_names,
categorical_feature=categorical_features,
**kwargs)
self.estimator = lgb.train(self.model_config,
data_train,
feature_name=feature_names,
categorical_feature=categorical_features,
valid_sets=[data_train, data_valid],
valid_names=['data_train', 'data_valid'],
evals_result=evaluation_results,
num_boost_round=self.training_config.number_boosting_rounds,
early_stopping_rounds=self.training_config.early_stopping_rounds,
verbose_eval=self.model_config.verbose,
feval=self.evaluation_function,
callbacks=self.callbacks,
**kwargs)
return self
def transform(self, X, **kwargs):
prediction = self.estimator.predict(X)
return {'prediction': prediction}
def load(self, filepath):
self.estimator = joblib.load(filepath)
return self
def persist(self, filepath):
joblib.dump(self.estimator, filepath)
def _check_target_shape_and_type(self, target, name):
if not any([isinstance(target, obj_type) for obj_type in [pd.Series, np.ndarray, list]]):
raise TypeError(
'"target" must be "numpy.ndarray" or "Pandas.Series" or "list", got {} instead.'.format(type(target)))
try:
assert len(target.shape) == 1, '"{}" must be 1-D. It is {}-D instead.'.format(name,
len(target.shape))
except AttributeError:
print('Cannot determine shape of the {}. '
'Type must be "numpy.ndarray" or "Pandas.Series" or "list", got {} instead'.format(name,
type(target)))
def _format_target(self, target):
if isinstance(target, pd.Series):
return target.values
elif isinstance(target, np.ndarray):
return target
elif isinstance(target, list):
return np.array(target)
else:
raise TypeError(
'"target" must be "numpy.ndarray" or "Pandas.Series" or "list", got {} instead.'.format(type(target)))
class CatBoost(BaseTransformer):
def __init__(self, **kwargs):
self.estimator = ctb.CatBoostClassifier(**kwargs)
def fit(self,
X,
y,
X_valid,
y_valid,
feature_names=None,
categorical_features=None,
**kwargs):
logger.info('Catboost, train data shape {}'.format(X.shape))
logger.info('Catboost, validation data shape {}'.format(X_valid.shape))
logger.info('Catboost, train labels shape {}'.format(y.shape))
logger.info('Catboost, validation labels shape {}'.format(y_valid.shape))
categorical_indeces = self._get_categorical_indeces(feature_names, categorical_features)
self.estimator.fit(X[0:5000], y[0:5000],
eval_set=(X_valid[0:1000], y_valid[0:1000]),
cat_features=categorical_indeces)
return self
def transform(self, X, **kwargs):
prediction = self.estimator.predict_proba(X)[:, 1]
return {'prediction': prediction}
def load(self, filepath):
self.estimator.load_model(filepath)
return self
def persist(self, filepath):
self.estimator.save_model(filepath)
def _get_categorical_indeces(self, feature_names, categorical_features):
if categorical_features:
return [feature_names.index(feature) for feature in categorical_features]
else:
return None
def get_sklearn_classifier(ClassifierClass, **kwargs):
class SklearnBinaryClassifier(SklearnClassifier):
def transform(self, X, y=None, target=1, **kwargs):
prediction = self.estimator.predict_proba(X)[:, target]
return {'prediction': prediction}
return SklearnBinaryClassifier(ClassifierClass(**kwargs))
def callbacks(channel_prefix):
neptune_monitor = neptune_monitor_lgbm(channel_prefix)
return [neptune_monitor]
class NeuralNetwork(ClassifierXY):
def __init__(self, architecture_config, training_config, callbacks_config, **kwargs):
super().__init__(architecture_config, training_config, callbacks_config)
logger.info('initializing Neural Network...')
self.params = kwargs
self.name = 'NeuralNetwork{}'.format(kwargs['suffix'])
self.model_params = architecture_config['model_params']
self.optimizer_params = architecture_config['optimizer_params']
def _build_optimizer(self, **kwargs):
return Adam(**self.optimizer_params)
def _build_loss(self, **kwargs):
return 'binary_crossentropy'
def _build_model(self, input_shape, **kwargs):
K.clear_session()
model = Sequential()
for layer in range(self.model_params['layers']):
config = {key: val[layer] for key, val in self.model_params.items() if key != 'layers'}
if layer == 0:
model.add(Dense(config['neurons'],
kernel_regularizer=l1_l2(l1=config['l1'], l2=config['l2']),
input_shape=input_shape))
else:
model.add(Dense(config['neurons'],
kernel_regularizer=l1_l2(l1=config['l1'], l2=config['l2'])))
if config['batch_norm']:
model.add(BatchNormalization())
model.add(Activation(config['activation']))
model.add(Dropout(config['dropout']))
return model
def _compile_model(self, input_shape):
model = self._build_model(input_shape)
optimizer = self._build_optimizer()
loss = self._build_loss()
model.compile(optimizer=optimizer, loss=loss)
return model
def fit(self, X, y, validation_data, *args, **kwargs):
self.callbacks = self._create_callbacks()
self.model = self._compile_model(input_shape=(X.shape[1], ))
self.model.fit(X[0:5000], y[0:5000],
validation_data=validation_data,
verbose=1,
callbacks=self.callbacks,
**self.training_config)
return self
def _create_callbacks(self, **kwargs):
neptune = NeptuneMonitor(self.name)
return [neptune]
def transform(self, X, y=None, validation_data=None, *args, **kwargs):
predictions = self.model.predict(X, verbose=1)
return {'predicted': np.array([x[0] for x in predictions])}
| 38.180758
| 118
| 0.581323
|
1d9239afaaa201fa5332544fa1063bd635f0e342
| 1,870
|
py
|
Python
|
blog/models.py
|
Nayakpriya/sushiksha-website
|
4880e7f4fad61e73bdfe75bdf9936fd3d252f68d
|
[
"Apache-2.0"
] | 31
|
2020-11-07T03:23:55.000Z
|
2022-03-16T18:21:45.000Z
|
blog/models.py
|
Nayakpriya/sushiksha-website
|
4880e7f4fad61e73bdfe75bdf9936fd3d252f68d
|
[
"Apache-2.0"
] | 124
|
2020-11-07T03:27:49.000Z
|
2022-03-20T05:28:06.000Z
|
blog/models.py
|
Nayakpriya/sushiksha-website
|
4880e7f4fad61e73bdfe75bdf9936fd3d252f68d
|
[
"Apache-2.0"
] | 44
|
2020-11-09T04:39:39.000Z
|
2022-03-12T09:48:19.000Z
|
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from users.models import Profile
from tinymce import HTMLField
from PIL import Image
class Categories(models.Model):
title = models.CharField(max_length=20)
def __str__(self):
return self.title
class Post(models.Model):
title = models.CharField(max_length=100)
overview = models.TextField()
timestamp = models.DateTimeField(auto_now_add=True)
content = HTMLField()
featured = models.BooleanField(default=False)
author = models.ForeignKey(Profile, on_delete=models.CASCADE)
thumbnail = models.ImageField(upload_to='blog-post-thumbnail')
categories = models.ManyToManyField(Categories)
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
img = Image.open(self.thumbnail.path)
if img.height > 700 or img.width > 900:
output_size = (600, 900)
img.thumbnail(output_size)
img.save(self.thumbnail.path)
def __str__(self):
return f"{self.author.user.username} post {self.title}"
def get_absolute_url(self):
return reverse('blog-detail', kwargs={'id': str(self.pk) + "--" + self.title})
def get_update_url(self):
return reverse('blog-update', kwargs={'id': self.pk})
def get_delete_url(self):
return reverse('blog-delete', kwargs={'id': self.pk})
@property
def get_comments(self):
return self.comments.all().order_by('-timestamp')
class Comment(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
timestamp = models.DateTimeField(auto_now_add=True)
content = models.TextField()
post = models.ForeignKey(Post, related_name='comments', on_delete=models.CASCADE)
def __str__(self):
return f"{self.user.username}'s Comment"
| 29.21875
| 86
| 0.683957
|
2fd6ca4682275b67dad573bf786204295ad38243
| 28,565
|
py
|
Python
|
src/waldur_core/structure/tests/test_project.py
|
geant-multicloud/MCMS-mastermind
|
81333180f5e56a0bc88d7dad448505448e01f24e
|
[
"MIT"
] | 26
|
2017-10-18T13:49:58.000Z
|
2021-09-19T04:44:09.000Z
|
src/waldur_core/structure/tests/test_project.py
|
geant-multicloud/MCMS-mastermind
|
81333180f5e56a0bc88d7dad448505448e01f24e
|
[
"MIT"
] | 14
|
2018-12-10T14:14:51.000Z
|
2021-06-07T10:33:39.000Z
|
src/waldur_core/structure/tests/test_project.py
|
geant-multicloud/MCMS-mastermind
|
81333180f5e56a0bc88d7dad448505448e01f24e
|
[
"MIT"
] | 32
|
2017-09-24T03:10:45.000Z
|
2021-10-16T16:41:09.000Z
|
import datetime
from unittest import mock
from ddt import data, ddt
from django.test import TransactionTestCase
from django.urls import reverse
from freezegun import freeze_time
from mock_django import mock_signal_receiver
from rest_framework import status, test
from waldur_core.structure import executors, models, permissions, signals
from waldur_core.structure.models import CustomerRole, Project, ProjectRole
from waldur_core.structure.tests import factories, fixtures
from waldur_core.structure.tests import models as test_models
from waldur_core.structure.utils import move_project
class ProjectPermissionGrantTest(TransactionTestCase):
def setUp(self):
self.project = factories.ProjectFactory()
self.user = factories.UserFactory()
def test_add_user_returns_created_if_grant_didnt_exist_before(self):
_, created = self.project.add_user(self.user, ProjectRole.ADMINISTRATOR)
self.assertTrue(
created, 'Project permission should have been reported as created'
)
def test_add_user_returns_not_created_if_grant_existed_before(self):
self.project.add_user(self.user, ProjectRole.ADMINISTRATOR)
_, created = self.project.add_user(self.user, ProjectRole.ADMINISTRATOR)
self.assertFalse(
created, 'Project permission should have been reported as not created'
)
def test_add_user_returns_membership(self):
membership, _ = self.project.add_user(self.user, ProjectRole.ADMINISTRATOR)
self.assertEqual(membership.user, self.user)
self.assertEqual(membership.project, self.project)
def test_add_user_returns_same_membership_for_consequent_calls_with_same_arguments(
self,
):
membership1, _ = self.project.add_user(self.user, ProjectRole.ADMINISTRATOR)
membership2, _ = self.project.add_user(self.user, ProjectRole.ADMINISTRATOR)
self.assertEqual(membership1, membership2)
def test_add_user_emits_structure_role_granted_if_grant_didnt_exist_before(self):
with mock_signal_receiver(signals.structure_role_granted) as receiver:
self.project.add_user(self.user, ProjectRole.ADMINISTRATOR)
receiver.assert_called_once_with(
structure=self.project,
user=self.user,
role=ProjectRole.ADMINISTRATOR,
created_by=None,
sender=Project,
signal=signals.structure_role_granted,
expiration_time=None,
)
def test_add_user_doesnt_emit_structure_role_granted_if_grant_existed_before(self):
self.project.add_user(self.user, ProjectRole.ADMINISTRATOR)
with mock_signal_receiver(signals.structure_role_granted) as receiver:
self.project.add_user(self.user, ProjectRole.ADMINISTRATOR)
self.assertFalse(
receiver.called, 'structure_role_granted should not be emitted'
)
class ProjectPermissionRevokeTest(TransactionTestCase):
def setUp(self):
self.project = factories.ProjectFactory()
self.user = factories.UserFactory()
self.removed_by = factories.UserFactory()
def test_remove_user_emits_structure_role_revoked_for_each_role_user_had_in_project(
self,
):
self.project.add_user(self.user, ProjectRole.ADMINISTRATOR)
self.project.add_user(self.user, ProjectRole.MANAGER)
with mock_signal_receiver(signals.structure_role_revoked) as receiver:
self.project.remove_user(self.user, removed_by=self.removed_by)
calls = [
mock.call(
structure=self.project,
user=self.user,
removed_by=self.removed_by,
role=ProjectRole.MANAGER,
sender=Project,
signal=signals.structure_role_revoked,
),
mock.call(
structure=self.project,
user=self.user,
removed_by=self.removed_by,
role=ProjectRole.ADMINISTRATOR,
sender=Project,
signal=signals.structure_role_revoked,
),
]
receiver.assert_has_calls(calls, any_order=True)
self.assertEqual(receiver.call_count, 2, 'Excepted exactly 2 signals emitted')
def test_remove_user_emits_structure_role_revoked_if_grant_existed_before(self):
self.project.add_user(self.user, ProjectRole.MANAGER)
with mock_signal_receiver(signals.structure_role_revoked) as receiver:
self.project.remove_user(self.user, ProjectRole.MANAGER, self.removed_by)
receiver.assert_called_once_with(
structure=self.project,
user=self.user,
role=ProjectRole.MANAGER,
sender=Project,
signal=signals.structure_role_revoked,
removed_by=self.removed_by,
)
def test_remove_user_doesnt_emit_structure_role_revoked_if_grant_didnt_exist_before(
self,
):
with mock_signal_receiver(signals.structure_role_revoked) as receiver:
self.project.remove_user(self.user, ProjectRole.MANAGER)
self.assertFalse(receiver.called, 'structure_role_remove should not be emitted')
@ddt
class ProjectUpdateDeleteTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ServiceFixture()
# Update tests:
def test_user_can_change_single_project_field(self):
self.client.force_authenticate(self.fixture.staff)
data = {'name': 'New project name'}
response = self.client.patch(
factories.ProjectFactory.get_url(self.fixture.project), data
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('New project name', response.data['name'])
self.assertTrue(Project.objects.filter(name=data['name']).exists())
def test_update_backend_id(self):
self.client.force_authenticate(self.fixture.staff)
data = {'backend_id': 'backend_id'}
response = self.client.patch(
factories.ProjectFactory.get_url(self.fixture.project), data
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('backend_id', response.data['backend_id'])
self.assertTrue(Project.objects.filter(backend_id=data['backend_id']).exists())
@data('staff', 'owner')
def test_user_can_update_end_date(self, user):
self.client.force_authenticate(getattr(self.fixture, user))
with freeze_time('2020-01-01'):
data = {'end_date': '2021-01-01'}
response = self.client.patch(
factories.ProjectFactory.get_url(self.fixture.project), data
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.fixture.project.refresh_from_db()
self.assertTrue(self.fixture.project.end_date)
@data('manager', 'admin')
def test_user_cannot_update_end_date(self, user):
self.client.force_authenticate(getattr(self.fixture, user))
with freeze_time('2020-01-01'):
data = {'end_date': '2021-01-01'}
response = self.client.patch(
factories.ProjectFactory.get_url(self.fixture.project), data
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.fixture.project.refresh_from_db()
self.assertFalse(self.fixture.project.end_date)
# Delete tests:
def test_user_can_delete_project_belonging_to_the_customer_he_owns(self):
self.client.force_authenticate(self.fixture.owner)
project = self.fixture.project
response = self.client.delete(factories.ProjectFactory.get_url(project))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertFalse(Project.objects.filter(pk=project.pk).exists())
def test_soft_delete(self):
project = self.fixture.project
pk = project.pk
project.delete()
self.assertFalse(Project.objects.filter(pk=pk).exists())
self.assertTrue(Project.structure_objects.filter(pk=pk).exists())
@ddt
class ProjectCreateTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ServiceFixture()
def test_staff_can_create_any_project(self):
self.client.force_authenticate(self.fixture.owner)
data = self._get_valid_project_payload(self.fixture.customer)
response = self.client.post(factories.ProjectFactory.get_list_url(), data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(Project.objects.filter(name=data['name']).exists())
def test_owner_can_create_project_belonging_to_the_customer_he_owns(self):
self.client.force_authenticate(self.fixture.owner)
data = self._get_valid_project_payload(self.fixture.customer)
response = self.client.post(factories.ProjectFactory.get_list_url(), data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(Project.objects.filter(name=data['name']).exists())
def test_owner_cannot_create_project_not_belonging_to_the_customer_he_owns(self):
self.client.force_authenticate(self.fixture.owner)
data = self._get_valid_project_payload(factories.CustomerFactory())
data['name'] = 'unique name 2'
response = self.client.post(factories.ProjectFactory.get_list_url(), data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(Project.objects.filter(name=data['name']).exists())
def test_customer_support_cannot_create_project(self):
self.client.force_authenticate(self.fixture.customer_support)
data = self._get_valid_project_payload(self.fixture.customer)
response = self.client.post(factories.ProjectFactory.get_list_url(), data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertFalse(Project.objects.filter(name=data['name']).exists())
def test_validate_end_date(self):
self.client.force_authenticate(self.fixture.staff)
payload = self._get_valid_project_payload(self.fixture.customer)
payload['end_date'] = '2021-06-01'
with freeze_time('2021-07-01'):
response = self.client.post(
factories.ProjectFactory.get_list_url(), payload
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue(
'Cannot be earlier than the current date.' in str(response.data)
)
self.assertFalse(Project.objects.filter(name=payload['name']).exists())
with freeze_time('2021-06-01'):
response = self.client.post(
factories.ProjectFactory.get_list_url(), payload
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(
Project.objects.filter(
name=payload['name'],
end_date=datetime.datetime(year=2021, month=6, day=1).date(),
).exists()
)
@data('staff', 'owner')
def test_user_can_set_end_date(self, user):
self.client.force_authenticate(getattr(self.fixture, user))
payload = self._get_valid_project_payload(self.fixture.customer)
payload['end_date'] = '2021-06-01'
with freeze_time('2021-01-01'):
response = self.client.post(
factories.ProjectFactory.get_list_url(), payload
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(
Project.objects.filter(
name=payload['name'],
end_date=datetime.datetime(year=2021, month=6, day=1).date(),
).exists()
)
@data('manager', 'admin')
def test_user_cannot_set_end_date(self, user):
self.client.force_authenticate(getattr(self.fixture, user))
payload = self._get_valid_project_payload(self.fixture.customer)
payload['end_date'] = '2021-06-01'
with freeze_time('2021-01-01'):
response = self.client.post(
factories.ProjectFactory.get_list_url(), payload
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_project_oecd_fos_2007_code(self):
self.client.force_authenticate(self.fixture.owner)
payload = self._get_valid_project_payload(self.fixture.customer)
payload['oecd_fos_2007_code'] = '1.1'
response = self.client.post(factories.ProjectFactory.get_list_url(), payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual('1.1', response.data['oecd_fos_2007_code'])
def _get_valid_project_payload(self, customer):
return {
'name': 'New project name',
'customer': factories.CustomerFactory.get_url(customer),
}
class ProjectApiPermissionTest(test.APITransactionTestCase):
forbidden_combinations = (
# User role, Project
('admin', 'manager'),
('admin', 'inaccessible'),
('manager', 'admin'),
('manager', 'inaccessible'),
('no_role', 'admin'),
('no_role', 'manager'),
('no_role', 'inaccessible'),
)
def setUp(self):
self.users = {
'owner': factories.UserFactory(),
'admin': factories.UserFactory(),
'manager': factories.UserFactory(),
'no_role': factories.UserFactory(),
'multirole': factories.UserFactory(),
}
self.projects = {
'owner': factories.ProjectFactory(),
'admin': factories.ProjectFactory(),
'manager': factories.ProjectFactory(),
'inaccessible': factories.ProjectFactory(),
}
self.projects['admin'].add_user(self.users['admin'], ProjectRole.ADMINISTRATOR)
self.projects['manager'].add_user(self.users['manager'], ProjectRole.MANAGER)
self.projects['admin'].add_user(
self.users['multirole'], ProjectRole.ADMINISTRATOR
)
self.projects['manager'].add_user(self.users['multirole'], ProjectRole.MANAGER)
self.projects['owner'].customer.add_user(
self.users['owner'], CustomerRole.OWNER
)
# TODO: Test for customer owners
# Creation tests
def test_anonymous_user_cannot_create_project(self):
for old_project in self.projects.values():
project = factories.ProjectFactory(customer=old_project.customer)
response = self.client.post(
reverse('project-list'), self._get_valid_payload(project)
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_user_cannot_create_project_within_customer_he_doesnt_own_but_admins_its_project(
self,
):
self.client.force_authenticate(user=self.users['admin'])
customer = self.projects['admin'].customer
project = factories.ProjectFactory(customer=customer)
response = self.client.post(
reverse('project-list'), self._get_valid_payload(project)
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertDictContainsSubset(
{'detail': 'You do not have permission to perform this action.'},
response.data,
)
def test_user_cannot_create_project_within_customer_he_doesnt_own_but_manages_its_project(
self,
):
self.client.force_authenticate(user=self.users['manager'])
customer = self.projects['manager'].customer
project = factories.ProjectFactory(customer=customer)
response = self.client.post(
reverse('project-list'), self._get_valid_payload(project)
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertDictContainsSubset(
{'detail': 'You do not have permission to perform this action.'},
response.data,
)
def test_user_cannot_create_project_within_customer_he_is_not_affiliated_with(self):
self.client.force_authenticate(user=self.users['admin'])
project = factories.ProjectFactory()
response = self.client.post(
reverse('project-list'), self._get_valid_payload(project)
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertDictContainsSubset(
{'customer': ['Invalid hyperlink - Object does not exist.']}, response.data
)
def test_user_can_create_project_within_customer_he_owns(self):
self.client.force_authenticate(user=self.users['owner'])
customer = self.projects['owner'].customer
project = factories.ProjectFactory(customer=customer)
response = self.client.post(
reverse('project-list'), self._get_valid_payload(project)
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_staff_user_can_create_project(self):
staff = factories.UserFactory(is_staff=True)
self.client.force_authenticate(user=staff)
customer = self.projects['inaccessible'].customer
project = factories.ProjectFactory(customer=customer)
response = self.client.post(
reverse('project-list'), self._get_valid_payload(project)
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# List filtration tests
def test_anonymous_user_cannot_list_projects(self):
response = self.client.get(reverse('project-list'))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_user_can_list_projects_belonging_to_customer_he_owns(self):
self._ensure_list_access_allowed('owner')
def test_user_can_list_projects_he_is_administrator_of(self):
self._ensure_list_access_allowed('admin')
def test_user_can_list_projects_he_is_manager_of(self):
self._ensure_list_access_allowed('manager')
def test_user_cannot_list_projects_he_has_no_role_in(self):
for user_role, project in self.forbidden_combinations:
self._ensure_list_access_forbidden(user_role, project)
def test_user_can_filter_by_projects_where_he_has_manager_role(self):
self.client.force_authenticate(user=self.users['multirole'])
response = self.client.get(reverse('project-list') + '?can_manage')
self.assertEqual(response.status_code, status.HTTP_200_OK)
managed_project_url = self._get_project_url(self.projects['manager'])
administrated_project_url = self._get_project_url(self.projects['admin'])
self.assertIn(
managed_project_url, [resource['url'] for resource in response.data]
)
self.assertNotIn(
administrated_project_url, [resource['url'] for resource in response.data]
)
# Direct instance access tests
def test_anonymous_user_cannot_access_project(self):
project = factories.ProjectFactory()
response = self.client.get(self._get_project_url(project))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_user_can_access_project_belonging_to_customer_he_owns(self):
self._ensure_direct_access_allowed('owner')
def test_user_can_access_project_he_is_administrator_of(self):
self._ensure_direct_access_allowed('admin')
def test_user_can_access_project_he_is_manager_of(self):
self._ensure_direct_access_allowed('manager')
def test_user_cannot_access_project_he_has_no_role_in(self):
for user_role, project in self.forbidden_combinations:
self._ensure_direct_access_forbidden(user_role, project)
# Helper methods
def _get_project_url(self, project):
return factories.ProjectFactory.get_url(project)
def _get_valid_payload(self, resource=None):
resource = resource or factories.ProjectFactory()
return {
'name': resource.name,
'customer': factories.CustomerFactory.get_url(resource.customer),
}
def _ensure_list_access_allowed(self, user_role):
self.client.force_authenticate(user=self.users[user_role])
response = self.client.get(reverse('project-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
project_url = self._get_project_url(self.projects[user_role])
self.assertIn(project_url, [instance['url'] for instance in response.data])
def _ensure_list_access_forbidden(self, user_role, project):
self.client.force_authenticate(user=self.users[user_role])
response = self.client.get(reverse('project-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
project_url = self._get_project_url(self.projects[project])
self.assertNotIn(project_url, [resource['url'] for resource in response.data])
def _ensure_direct_access_allowed(self, user_role):
self.client.force_authenticate(user=self.users[user_role])
response = self.client.get(self._get_project_url(self.projects[user_role]))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def _ensure_direct_access_forbidden(self, user_role, project):
self.client.force_authenticate(user=self.users[user_role])
response = self.client.get(self._get_project_url(self.projects[project]))
# 404 is used instead of 403 to hide the fact that the resource exists at all
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
@ddt
class ProjectUsersListTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ProjectFixture()
self.admin = self.fixture.admin
self.manager = self.fixture.manager
self.project = self.fixture.project
self.url = factories.ProjectFactory.get_url(self.project, action='users')
@data('staff', 'owner', 'manager', 'admin')
def test_user_can_list_project_users(self, user):
self.client.force_authenticate(getattr(self.fixture, user))
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 2)
self.assertSetEqual(
{user['role'] for user in response.data}, {'admin', 'manager'}
)
self.assertSetEqual(
{user['uuid'] for user in response.data},
{self.admin.uuid.hex, self.manager.uuid.hex},
)
def test_user_can_not_list_project_users(self):
self.client.force_authenticate(self.fixture.user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class ProjectCountersListTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ServiceFixture()
self.owner = self.fixture.owner
self.admin = self.fixture.admin
self.manager = self.fixture.manager
self.project = self.fixture.project
self.url = factories.ProjectFactory.get_url(self.project, action='counters')
def test_user_can_get_project_counters(self):
self.client.force_authenticate(self.fixture.owner)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'users': 2})
class TestExecutor(executors.BaseCleanupExecutor):
pre_models = (test_models.TestNewInstance,)
@mock.patch('waldur_core.core.WaldurExtension.get_extensions')
class ProjectCleanupTest(test.APITransactionTestCase):
def test_executors_are_sorted_in_topological_order(self, get_extensions):
class ParentExecutor(executors.BaseCleanupExecutor):
pass
class ParentExtension:
@staticmethod
def get_cleanup_executor():
return ParentExecutor
class ChildExecutor(executors.BaseCleanupExecutor):
related_executor = ParentExecutor
class ChildExtension:
@staticmethod
def get_cleanup_executor():
return ChildExecutor
get_extensions.return_value = [ParentExtension, ChildExtension]
self.assertEqual(
[ChildExecutor, ParentExecutor],
executors.ProjectCleanupExecutor.get_executors(),
)
def test_project_without_resources_is_deleted(self, get_extensions):
fixture = fixtures.ServiceFixture()
project = fixture.project
get_extensions.return_value = []
executors.ProjectCleanupExecutor.execute(fixture.project, is_async=False)
self.assertFalse(models.Project.objects.filter(id=project.id).exists())
def test_project_with_resources_and_executors_is_deleted(self, get_extensions):
fixture = fixtures.ServiceFixture()
project = fixture.project
resource = fixture.resource
class TestExtension:
@staticmethod
def get_cleanup_executor():
return TestExecutor
get_extensions.return_value = [TestExtension]
executors.ProjectCleanupExecutor.execute(fixture.project, is_async=False)
self.assertFalse(models.Project.objects.filter(id=project.id).exists())
self.assertFalse(
test_models.TestNewInstance.objects.filter(id=resource.id).exists()
)
class ChangeProjectCustomerTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ProjectFixture()
self.project = self.fixture.project
self.old_customer = self.project.customer
self.new_customer = factories.CustomerFactory()
def change_customer(self):
move_project(self.project, self.new_customer)
self.project.refresh_from_db()
def test_change_customer(self):
self.change_customer()
self.assertEqual(self.new_customer, self.project.customer)
def test_if_project_customer_has_been_changed_then_users_permissions_must_be_deleted(
self,
):
self.fixture.admin
self.change_customer()
self.assertFalse(
permissions._has_admin_access(self.fixture.admin, self.project)
)
def test_recalculate_quotas(self):
self.assertEqual(
self.old_customer.quotas.get(name='nc_project_count').usage, 1.0
)
self.assertEqual(self.new_customer.quotas.get(name='nc_project_count').usage, 0)
self.change_customer()
self.assertEqual(self.old_customer.quotas.get(name='nc_project_count').usage, 0)
self.assertEqual(
self.new_customer.quotas.get(name='nc_project_count').usage, 1.0
)
class ProjectMoveTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ProjectFixture()
self.project = self.fixture.project
self.url = factories.ProjectFactory.get_url(self.project, action='move_project')
self.customer = factories.CustomerFactory()
def get_response(self, role, customer):
self.client.force_authenticate(role)
payload = {'customer': {'url': factories.CustomerFactory.get_url(customer)}}
return self.client.post(self.url, payload)
def test_move_project_rest(self):
response = self.get_response(self.fixture.staff, self.customer)
self.project.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.project.customer, self.customer)
def test_move_project_is_not_possible_when_customer_the_same(self):
old_customer = self.project.customer
response = self.get_response(self.fixture.staff, old_customer)
self.project.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(self.project.customer, old_customer)
def test_move_project_is_not_possible_when_new_customer_is_blocked(self):
old_customer = self.project.customer
self.customer.blocked = True
self.customer.save(update_fields=['blocked'])
response = self.get_response(self.fixture.staff, self.customer)
self.project.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(self.project.customer, old_customer)
| 39.951049
| 94
| 0.69053
|
26817ae155a2fc0c1d230b2a0c47a3cb471cc190
| 4,794
|
py
|
Python
|
isi_sdk/models/node_sensors_node_sensor.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
isi_sdk/models/node_sensors_node_sensor.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
isi_sdk/models/node_sensors_node_sensor.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
class NodeSensorsNodeSensor(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
NodeSensorsNodeSensor - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'count': 'int',
'name': 'str',
'values': 'list[NodeSensorsNodeSensorValue]'
}
self.attribute_map = {
'count': 'count',
'name': 'name',
'values': 'values'
}
self._count = None
self._name = None
self._values = None
@property
def count(self):
"""
Gets the count of this NodeSensorsNodeSensor.
The count of values in this sensor group.
:return: The count of this NodeSensorsNodeSensor.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""
Sets the count of this NodeSensorsNodeSensor.
The count of values in this sensor group.
:param count: The count of this NodeSensorsNodeSensor.
:type: int
"""
self._count = count
@property
def name(self):
"""
Gets the name of this NodeSensorsNodeSensor.
The name of this sensor group.
:return: The name of this NodeSensorsNodeSensor.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this NodeSensorsNodeSensor.
The name of this sensor group.
:param name: The name of this NodeSensorsNodeSensor.
:type: str
"""
self._name = name
@property
def values(self):
"""
Gets the values of this NodeSensorsNodeSensor.
The list of specific sensor value info in this sensor group.
:return: The values of this NodeSensorsNodeSensor.
:rtype: list[NodeSensorsNodeSensorValue]
"""
return self._values
@values.setter
def values(self, values):
"""
Sets the values of this NodeSensorsNodeSensor.
The list of specific sensor value info in this sensor group.
:param values: The values of this NodeSensorsNodeSensor.
:type: list[NodeSensorsNodeSensorValue]
"""
self._values = values
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.394286
| 77
| 0.565499
|
64c9a367b172d6194902be36fd462581e274b69a
| 264
|
py
|
Python
|
polymetis/polymetis/python/torchcontrol/policies/__init__.py
|
CowherdChris/droidlet
|
8d965c1ebc38eceb6f8083c52b1146c1bc17d5e1
|
[
"MIT"
] | 26
|
2021-06-28T15:35:20.000Z
|
2022-03-12T04:34:34.000Z
|
polymetis/polymetis/python/torchcontrol/policies/__init__.py
|
CowherdChris/droidlet
|
8d965c1ebc38eceb6f8083c52b1146c1bc17d5e1
|
[
"MIT"
] | 2
|
2021-06-28T20:16:56.000Z
|
2021-12-16T21:25:44.000Z
|
polymetis/polymetis/python/torchcontrol/policies/__init__.py
|
CowherdChris/droidlet
|
8d965c1ebc38eceb6f8083c52b1146c1bc17d5e1
|
[
"MIT"
] | 3
|
2021-06-29T14:14:34.000Z
|
2022-03-12T13:26:12.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .ilqr import *
from .move_to import *
from .joint_pd import *
from .ee_pd import *
| 29.333333
| 65
| 0.75
|
34a78beca13e2901887b23454e62a25032241c52
| 13,043
|
py
|
Python
|
nidmm_sfp.py
|
texasaggie97/nimi-sfp
|
2a0af44c2ceebb7898d66c35837de8ada79a36cc
|
[
"MIT"
] | 2
|
2018-02-14T16:03:52.000Z
|
2018-05-23T21:35:36.000Z
|
nidmm_sfp.py
|
texasaggie97/nimi-sfp
|
2a0af44c2ceebb7898d66c35837de8ada79a36cc
|
[
"MIT"
] | 5
|
2018-05-22T21:15:53.000Z
|
2018-08-16T15:27:43.000Z
|
nidmm_sfp.py
|
texasaggie97/nimi-sfp
|
2a0af44c2ceebb7898d66c35837de8ada79a36cc
|
[
"MIT"
] | 2
|
2018-05-14T15:19:12.000Z
|
2018-05-23T14:00:34.000Z
|
import math
import nidmm
import nimodinst
import warnings
import wx
USE_WIT = True
AppBaseClass = wx.App
if USE_WIT:
from wx.lib.mixins.inspection import InspectableApp
AppBaseClass = InspectableApp
def format_meas(reading, function, range, resolution):
unit_modifiers = {-12: "p", -9: "n", -6: "u", -3: "m", 0: "", 3: "k", 6: "M", 9: "G"} # noqa: E501
function_units = {
nidmm.Function.AC_VOLTS: "V AC",
nidmm.Function.DC_VOLTS: "V DC",
nidmm.Function.AC_CURRENT: "A AC",
nidmm.Function.DC_CURRENT: "A DC",
nidmm.Function.DIODE: "V Diode",
nidmm.Function.TWO_WIRE_RES: "Ohm",
nidmm.Function.FOUR_WIRE_RES: "Ohm",
nidmm.Function.PERIOD: "s",
nidmm.Function.FREQ: "Hz",
nidmm.Function.AC_VOLTS_DC_COUPLED: "V AC",
nidmm.Function.CAPACITANCE: "F",
nidmm.Function.INDUCTANCE: "H",
nidmm.Function.TEMPERATURE: "deg C",
}
data_width = int(math.floor(resolution) + 1)
# calculate reading string
temp_range = range
if (range * 1.2) < math.fabs(reading):
temp_range = math.fabs(reading)
order_of_subunit = int(math.floor(math.log10(temp_range) / 3))
if order_of_subunit < -4:
order_of_subunit = -4
elif order_of_subunit > 3:
order_of_subunit = 3
range_order_of_subunit = int(math.floor(math.log10(range) / 3))
if range_order_of_subunit < -4:
range_order_of_subunit = -4
elif range_order_of_subunit > 3:
range_order_of_subunit = 3
# function specific overrides
if function == nidmm.Function.CAPACITANCE:
if order_of_subunit == -1:
order_of_subunit = -2
if range_order_of_subunit == -1:
range_order_of_subunit = -2
elif function == nidmm.Function.DC_VOLTS:
if order_of_subunit == 1:
order_of_subunit = 0
if range_order_of_subunit == 1:
range_order_of_subunit = 0
elif function == nidmm.Function.TEMPERATURE:
order_of_subunit = 0
range_order_of_subunit = 0
# Calculate the divisor, the number by which to divide the reading to account # noqa: E501
# for the subunit (u,m,k,M). The number of digits after the decimal point # noqa: E501
# is equal to the total number of digits minus the digits before the decimal # noqa: E501
# point. Remeber that there needs to be one character for the decimal point # noqa: E501
# and one character for the - sign if present (+ does not appers, just a sp). # noqa: E501
divisor = math.pow(10.0, 3 * order_of_subunit)
range_divisor = math.pow(10.0, 3 * range_order_of_subunit)
if math.isnan(reading):
reading_string = 'OVLD'
elif math.isinf(reading):
reading_string = 'UNDRNG'
else:
precision = data_width - 1 - int(math.floor(1e-9 + math.log10(temp_range / divisor))) # noqa: E501
if temp_range != range:
reading_exp = math.floor(math.log10(math.fabs(reading)))
reading_base = math.fabs(reading / math.pow(10.0, reading_exp))
if 1.2 < reading_base:
precision -= 1
if precision < 0:
precision = 0
if precision == 0:
width = data_width
else:
width = data_width + 1
final_reading = math.fabs(reading / divisor)
final_reading = math.pow(10.0, -precision) * math.floor(0.5 + math.pow(10.0, precision) * final_reading) # noqa: E501
final_reading = math.fabs(final_reading)
sign = '-' if reading < 0 and (reading / divisor) * math.pow(10.0, precision) <= -0.5 else ' ' # noqa: E501
reading_string = sign + "{value:0{width}.{precision}f}".format(value=float(final_reading), width=width, precision=precision, sign=sign) # noqa: E501
# calculate range string
range_string = '{:.2f}'.format(range / range_divisor)
# calculate function string
function_units = function_units[function] if function in function_units else '' # noqa: E501
function_unit_modifiers = unit_modifiers[order_of_subunit * 3]
function_string = function_unit_modifiers + function_units
return function_string, range_string, reading_string
class SFP(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: SFP.__init__
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.SetSize((500, 600))
# Menu Bar
self.menu_bar = wx.MenuBar()
self.SetMenuBar(self.menu_bar)
# Menu Bar end
self._devices = wx.ComboBox(self, wx.ID_ANY, choices=[], style=wx.CB_DROPDOWN) # noqa: E501
self._function = wx.ComboBox(self, wx.ID_ANY, choices=[], style=wx.CB_DROPDOWN) # noqa: E501
self._digits = wx.ComboBox(self, wx.ID_ANY, choices=[], style=wx.CB_DROPDOWN) # noqa: E501
self._range = wx.SpinCtrlDouble(self, wx.ID_ANY, "1", min=0, max=1000)
self.Bind(wx.EVT_SPINCTRLDOUBLE, self.OnConfigUpdate, self._range)
self._range_display = wx.StaticText(self, wx.ID_ANY, "")
self._reading_display = wx.StaticText(self, wx.ID_ANY, "")
self._status = wx.StaticText(self, wx.ID_ANY, "Good!")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_TEXT, self.OnConfigUpdate, self._function)
self.Bind(wx.EVT_TEXT_ENTER, self.OnConfigUpdate, self._function)
self.Bind(wx.EVT_COMBOBOX, self.OnConfigUpdate, self._digits)
self.Bind(wx.EVT_TEXT, self.OnConfigUpdate, self._digits)
self.Bind(wx.EVT_TEXT_ENTER, self.OnConfigUpdate, self._digits)
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
# end wxGlade
self._session = None
self._modinst_session = None
self._dev_name = None
# and a menu
menu = wx.Menu()
# add an item to the menu, using \tKeyName automatically
# creates an accelerator, the third param is some help text
# that will show up in the statusbar
menu.Append(wx.ID_EXIT, "E&xit\tAlt-X", "Exit this simple sample")
# bind the menu event to an event handler
# self.Bind(wx.EVT_MENU, self.OnTimeToClose, id=wx.ID_EXIT)
# and put the menu on the menubar
self.menu_bar.Append(menu, "&File")
self.SetMenuBar(self.menu_bar)
self.CreateStatusBar()
self._modinst_session = nimodinst.Session('nidmm')
for dev in self._modinst_session.devices:
dev_name = dev.device_name
self._devices.Append('{0}'.format(dev_name))
self._devices.SetSelection(0)
for f in list(nidmm.Function.__members__.keys()):
self._function.Append('{0}'.format(f))
self._function.SetSelection(0)
digits = [3.5, 4.5, 5.5, 6.5, 7.5]
for d in digits:
self._digits.Append('{0}'.format(d))
self._digits.SetSelection(2)
self._timer = wx.Timer(self, wx.ID_ANY)
self.Bind(wx.EVT_TIMER, self.OnUpdate, self._timer)
self.OnConfigUpdate(None)
self._timer.Start(250)
def __set_properties(self):
# begin wxGlade: SFP.__set_properties
self.SetTitle("NI-DMM Simple Soft Front Panel")
self._range_display.SetFont(wx.Font(20, wx.MODERN, wx.NORMAL, wx.NORMAL, 0, "")) # noqa: E501
self._reading_display.SetFont(wx.Font(20, wx.MODERN, wx.NORMAL, wx.NORMAL, 0, "")) # noqa: E501
# end wxGlade
def __do_layout(self):
# begin wxGlade: SFP.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_10 = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, "Status"), wx.HORIZONTAL) # noqa: E501
sizer_5 = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, "Results:"), wx.VERTICAL) # noqa: E501
sizer_7 = wx.BoxSizer(wx.HORIZONTAL)
sizer_6 = wx.BoxSizer(wx.HORIZONTAL)
sizer_2 = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, "Configuration"), wx.VERTICAL) # noqa: E501
sizer_9 = wx.BoxSizer(wx.HORIZONTAL)
sizer_8 = wx.BoxSizer(wx.HORIZONTAL)
sizer_3 = wx.BoxSizer(wx.HORIZONTAL)
label_1 = wx.StaticText(self, wx.ID_ANY, "Device: ")
label_1.SetFont(wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, ""))
sizer_3.Add(label_1, 0, 0, 0)
sizer_3.Add(self._devices, 0, 0, 0)
sizer_1.Add(sizer_3, 3, wx.EXPAND, 0)
static_line_1 = wx.StaticLine(self, wx.ID_ANY)
sizer_1.Add(static_line_1, 1, wx.EXPAND, 0)
sizer_1.Add((20, 20), 1, 0, 0)
label_8 = wx.StaticText(self, wx.ID_ANY, "Function:")
sizer_8.Add(label_8, 0, 0, 0)
sizer_8.Add(self._function, 0, 0, 0)
sizer_8.Add((20, 20), 0, 0, 0)
label_9 = wx.StaticText(self, wx.ID_ANY, "Resolution")
sizer_8.Add(label_9, 0, 0, 0)
sizer_8.Add(self._digits, 0, 0, 0)
sizer_8.Add((20, 20), 0, 0, 0)
sizer_2.Add(sizer_8, 1, wx.EXPAND, 0)
label_10 = wx.StaticText(self, wx.ID_ANY, "Range:")
sizer_9.Add(label_10, 0, 0, 0)
sizer_9.Add(self._range, 0, 0, 0)
sizer_9.Add((20, 20), 0, 0, 0)
sizer_2.Add(sizer_9, 1, wx.EXPAND, 0)
sizer_1.Add(sizer_2, 15, wx.EXPAND, 0)
sizer_1.Add((20, 20), 1, 0, 0)
label_6 = wx.StaticText(self, wx.ID_ANY, "Range: ")
sizer_6.Add(label_6, 20, 0, 0)
sizer_6.Add(self._range_display, 30, 0, 0)
sizer_6.Add((20, 20), 50, 0, 0)
sizer_5.Add(sizer_6, 1, wx.EXPAND, 0)
label_7 = wx.StaticText(self, wx.ID_ANY, "Reading: ")
sizer_7.Add(label_7, 20, 0, 0)
sizer_7.Add(self._reading_display, 30, 0, 0)
sizer_7.Add((20, 20), 50, 0, 0)
sizer_5.Add(sizer_7, 1, wx.EXPAND, 0)
sizer_1.Add(sizer_5, 15, wx.EXPAND, 0)
sizer_1.Add((20, 20), 1, 0, 0)
sizer_10.Add(self._status, 100, 0, 0)
sizer_1.Add(sizer_10, 25, wx.EXPAND, 0)
self.SetSizer(sizer_1)
self.Layout()
# end wxGlade
def OnUpdate(self, event): # noqa: N802
points_ready, _ = self._session.read_status()
with warnings.catch_warnings(record=True) as w:
points = self._session.fetch_multi_point(points_ready)
if len(w) > 0: # that means we got a warning so we will put it in the status area # noqa: E501
self._status.SetLabel(str(w[0].message))
self._status.Wrap(450)
actual_range = self._session.range
if len(points) > 0:
mode_str, range_str, data_str = format_meas(points[-1], nidmm.Function[self._dev_function], actual_range, self._dev_digits) # noqa: E501
reading_display = data_str + ' ' + mode_str
range_display = range_str + ' ' + mode_str
self._reading_display.SetLabel(reading_display)
self._range_display.SetLabel(range_display)
def OnConfigUpdate(self, event): # noqa: N802
current_dev_name = self._devices.GetStringSelection()
current_function = self._function.GetStringSelection()
try:
current_range = float(self._range.GetValue())
except TypeError:
current_range = 1.0
current_digits = float(self._digits.GetStringSelection())
try:
if current_dev_name != self._dev_name:
if self._session is not None:
self._session.close()
self._session = nidmm.Session(current_dev_name)
self._session.configure_multi_point(trigger_count=0, sample_count=0, sample_trigger=nidmm.SampleTrigger.IMMEDIATE, sample_interval=-1) # noqa: E501
self._session.configure_measurement_digits(nidmm.Function[current_function], current_range, current_digits) # noqa: E501
self._session._initiate()
self._status.SetLabel("Good!")
except nidmm.Error as e:
self._status.SetLabel(str(e))
self._status.Wrap(450)
self._dev_name = current_dev_name
self._dev_function = current_function
self._dev_range = current_range
self._dev_digits = current_digits
def OnCloseWindow(self, event): # noqa: N802
if self._session is not None:
self._session.close()
self.Destroy()
def OnIdle(self, event): # noqa: N802
self.idleCtrl.SetValue(str(self.count))
self.count = self.count + 1
def OnSize(self, event): # noqa: N802
size = event.GetSize()
self.sizeCtrl.SetValue("%s, %s" % (size.width, size.height))
event.Skip()
def OnMove(self, event): # noqa: N802
pos = event.GetPosition()
self.posCtrl.SetValue("%s, %s" % (pos.x, pos.y))
class SFPApp(AppBaseClass):
def OnInit(self): # noqa: N802
self.frame = SFP(None, wx.ID_ANY, "NI-DMM Python SFP")
self.SetTopWindow(self.frame)
if USE_WIT:
self.InitInspection()
self.frame.Show(True)
return True
app = SFPApp(False)
app.MainLoop()
| 39.524242
| 164
| 0.62524
|
ac427ad564ff303ec98b396d9bfe3111f3da1157
| 305
|
py
|
Python
|
scripts/templates/fastApiModule/partials/handle_route.py
|
sulthonzh/zaruba
|
ec9262f43da17d86330da2c593b7da451aabd60f
|
[
"Apache-2.0"
] | null | null | null |
scripts/templates/fastApiModule/partials/handle_route.py
|
sulthonzh/zaruba
|
ec9262f43da17d86330da2c593b7da451aabd60f
|
[
"Apache-2.0"
] | null | null | null |
scripts/templates/fastApiModule/partials/handle_route.py
|
sulthonzh/zaruba
|
ec9262f43da17d86330da2c593b7da451aabd60f
|
[
"Apache-2.0"
] | null | null | null |
@self.app.zarubaHttpMethod('zarubaUrl')
def zarubaHttpMethod_zaruba_url():
try:
return 'OK'
except HTTPException as error:
raise error
except Exception as error:
print(traceback.format_exc())
raise HTTPException(status_code=500, detail='Internal Server Error')
| 33.888889
| 76
| 0.695082
|
f51ab0b7c7cb11e209db934958296889dc9a2af1
| 3,233
|
py
|
Python
|
tests/integration/proxy/test_simple.py
|
fake-name/salt
|
d8f04936e4407f51946e32e8166159778f6c31a5
|
[
"Apache-2.0"
] | 1
|
2021-09-06T00:14:04.000Z
|
2021-09-06T00:14:04.000Z
|
tests/integration/proxy/test_simple.py
|
fake-name/salt
|
d8f04936e4407f51946e32e8166159778f6c31a5
|
[
"Apache-2.0"
] | 2
|
2021-04-30T21:17:57.000Z
|
2021-12-13T20:40:23.000Z
|
tests/integration/proxy/test_simple.py
|
fake-name/salt
|
d8f04936e4407f51946e32e8166159778f6c31a5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Simple Smoke Tests for Connected Proxy Minion
"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
class ProxyMinionSimpleTestCase(ModuleCase):
"""
Test proxy minion functionality
"""
def test_can_it_ping(self):
"""
Ensure the proxy can ping
"""
ret = self.run_function("test.ping", minion_tgt="proxytest")
self.assertEqual(ret, True)
def test_list_pkgs(self):
"""
Package test 1, really just tests that the virtual function capability
is working OK.
"""
ret = self.run_function("pkg.list_pkgs", minion_tgt="proxytest")
self.assertIn("coreutils", ret)
self.assertIn("apache", ret)
self.assertIn("redbull", ret)
def test_install_pkgs(self):
"""
Package test 2, really just tests that the virtual function capability
is working OK.
"""
ret = self.run_function("pkg.install", ["thispkg"], minion_tgt="proxytest")
self.assertEqual(ret["thispkg"], "1.0")
ret = self.run_function("pkg.list_pkgs", minion_tgt="proxytest")
self.assertEqual(ret["apache"], "2.4")
self.assertEqual(ret["redbull"], "999.99")
self.assertEqual(ret["thispkg"], "1.0")
def test_remove_pkgs(self):
ret = self.run_function("pkg.remove", ["apache"], minion_tgt="proxytest")
self.assertNotIn("apache", ret)
def test_upgrade(self):
ret = self.run_function("pkg.upgrade", minion_tgt="proxytest")
self.assertEqual(ret["coreutils"]["new"], "2.0")
self.assertEqual(ret["redbull"]["new"], "1000.99")
def test_service_list(self):
ret = self.run_function("service.list", minion_tgt="proxytest")
self.assertIn("ntp", ret)
def test_service_stop(self):
ret = self.run_function("service.stop", ["ntp"], minion_tgt="proxytest")
ret = self.run_function("service.status", ["ntp"], minion_tgt="proxytest")
self.assertFalse(ret)
def test_service_start(self):
ret = self.run_function("service.start", ["samba"], minion_tgt="proxytest")
ret = self.run_function("service.status", ["samba"], minion_tgt="proxytest")
self.assertTrue(ret)
def test_service_get_all(self):
ret = self.run_function("service.get_all", minion_tgt="proxytest")
self.assertTrue(ret)
self.assertIn("samba", " ".join(ret))
def test_grains_items(self):
ret = self.run_function("grains.items", minion_tgt="proxytest")
self.assertEqual(ret["kernel"], "proxy")
self.assertEqual(ret["kernelrelease"], "proxy")
def test_state_apply(self):
ret = self.run_function("state.apply", ["core"], minion_tgt="proxytest")
for key, value in ret.items():
self.assertTrue(value["result"])
@skipIf(True, "SLOWTEST skip")
def test_state_highstate(self):
ret = self.run_function("state.highstate", minion_tgt="proxytest")
for key, value in ret.items():
self.assertTrue(value["result"])
| 34.763441
| 84
| 0.639344
|
6c6a7f5c42ee31af0a506a5abfa71ec6defaa1b7
| 9,427
|
py
|
Python
|
Python/libraries/recognizers-number/recognizers_number/number/chinese/extractors.py
|
Irrelevances/Recognizers-Text
|
630ce12bb47e201f663d72c31c680f6d40171962
|
[
"MIT"
] | 1
|
2018-06-07T05:14:03.000Z
|
2018-06-07T05:14:03.000Z
|
Python/libraries/recognizers-number/recognizers_number/number/chinese/extractors.py
|
Irrelevances/Recognizers-Text
|
630ce12bb47e201f663d72c31c680f6d40171962
|
[
"MIT"
] | null | null | null |
Python/libraries/recognizers-number/recognizers_number/number/chinese/extractors.py
|
Irrelevances/Recognizers-Text
|
630ce12bb47e201f663d72c31c680f6d40171962
|
[
"MIT"
] | 1
|
2018-06-05T05:26:57.000Z
|
2018-06-05T05:26:57.000Z
|
from typing import List
from enum import Enum
from recognizers_number.number.extractors import ReVal, BaseNumberExtractor
from recognizers_text.utilities import RegExpUtility
from recognizers_number.number.constants import Constants
from recognizers_number.resources.chinese_numeric import ChineseNumeric
class ChineseNumberExtractorMode(Enum):
DEFAULT = 0
EXTRACT_ALL = 1
class ChineseNumberExtractor(BaseNumberExtractor):
@property
def regexes(self) -> List[ReVal]:
return self.__regexes
@property
def _extract_type(self) -> str:
return Constants.SYS_NUM
def __init__(self, mode: ChineseNumberExtractorMode = ChineseNumberExtractorMode.DEFAULT):
self.__regexes: List[ReVal] = list()
cardinal_ex = ChineseCardinalExtractor(mode)
self.__regexes.extend(cardinal_ex.regexes)
fraction_ex = ChineseFractionExtractor()
self.__regexes.extend(fraction_ex.regexes)
class ChineseCardinalExtractor(BaseNumberExtractor):
@property
def regexes(self) -> List[ReVal]:
return self.__regexes
@property
def _extract_type(self) -> str:
return Constants.SYS_NUM_CARDINAL
def __init__(self, mode: ChineseNumberExtractorMode = ChineseNumberExtractorMode.DEFAULT):
self.__regexes: List[ReVal] = list()
integer_ex = ChineseIntegerExtractor(mode)
self.__regexes.extend(integer_ex.regexes)
double_ex = ChineseDoubleExtractor()
self.__regexes.extend(double_ex.regexes)
class ChineseIntegerExtractor(BaseNumberExtractor):
@property
def regexes(self) -> List[ReVal]:
return self.__regexes
@property
def _extract_type(self) -> str:
return Constants.SYS_NUM_INTEGER
def __init__(self, mode: ChineseNumberExtractorMode = ChineseNumberExtractorMode.DEFAULT):
self.__regexes = [
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.NumbersSpecialsChars),
val='IntegerNum'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.NumbersSpecialsCharsWithSuffix),
val='IntegerNum'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.DottedNumbersSpecialsChar),
val='IntegerNum'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.NumbersWithHalfDozen),
val='IntegerChs'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.NumbersWithDozen),
val='IntegerChs')
]
if mode == ChineseNumberExtractorMode.DEFAULT:
self.__regexes.append(
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.NumbersWithAllowListRegex),
val='IntegerChs'
)
)
elif mode == ChineseNumberExtractorMode.EXTRACT_ALL:
self.__regexes.append(
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.NumbersAggressiveRegex),
val='IntegerChs'
)
)
class ChineseDoubleExtractor(BaseNumberExtractor):
@property
def regexes(self) -> List[ReVal]:
return self.__regexes
@property
def _extract_type(self) -> str:
return Constants.SYS_NUM_DOUBLE
def __init__(self):
self.__regexes = [
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.DoubleSpecialsChars),
val='DoubleNum'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.DoubleSpecialsCharsWithNegatives),
val='DoubleNum'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.SimpleDoubleSpecialsChars),
val='DoubleNum'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.DoubleWithMultiplierRegex),
val='DoubleNum'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.DoubleWithThousandsRegex),
val='DoubleChs'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.DoubleAllFloatRegex),
val='DoubleChs'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.DoubleExponentialNotationRegex),
val='DoublePow'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.DoubleScientificNotationRegex),
val='DoublePow')
]
class ChineseFractionExtractor(BaseNumberExtractor):
@property
def regexes(self) -> List[ReVal]:
return self.__regexes
@property
def _extract_type(self) -> str:
return Constants.SYS_NUM_FRACTION
def __init__(self):
self.__regexes = [
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.FractionNotationSpecialsCharsRegex),
val='FracNum'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.FractionNotationRegex),
val='FracNum'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.AllFractionNumber),
val='FracChs')
]
class ChineseOrdinalExtractor(BaseNumberExtractor):
@property
def regexes(self) -> List[ReVal]:
return self.__regexes
@property
def _extract_type(self) -> str:
return Constants.SYS_NUM_ORDINAL
def __init__(self):
self.__regexes = [
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.OrdinalRegex),
val='OrdinalChs'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.OrdinalNumbersRegex),
val='OrdinalChs')
]
class ChinesePercentageExtractor(BaseNumberExtractor):
@property
def regexes(self) -> List[ReVal]:
return self.__regexes
@property
def _extract_type(self) -> str:
return Constants.SYS_NUM_PERCENTAGE
def __init__(self):
self.__regexes = [
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.PercentagePointRegex),
val='PerChs'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.SimplePercentageRegex),
val='PerChs'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.NumbersPercentagePointRegex),
val='PerNum'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.NumbersPercentageWithSeparatorRegex),
val='PerNum'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.NumbersPercentageWithMultiplierRegex),
val='PerNum'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.FractionPercentagePointRegex),
val='PerNum'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.FractionPercentageWithSeparatorRegex),
val='PerNum'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.FractionPercentageWithMultiplierRegex),
val='PerNum'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.SimpleNumbersPercentageRegex),
val='PerNum'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.SimpleNumbersPercentageWithMultiplierRegex),
val='PerNum'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.SimpleNumbersPercentagePointRegex),
val='PerNum'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.IntegerPercentageRegex),
val='PerNum'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.IntegerPercentageWithMultiplierRegex),
val='PerNum'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.NumbersFractionPercentageRegex),
val='PerNum'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.SimpleIntegerPercentageRegex),
val='PerNum'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.NumbersFoldsPercentageRegex),
val='PerSpe'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.FoldsPercentageRegex),
val='PerSpe'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.SimpleFoldsPercentageRegex),
val='PerSpe'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.SpecialsPercentageRegex),
val='PerSpe'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.NumbersSpecialsPercentageRegex),
val='PerSpe'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.SimpleSpecialsPercentageRegex),
val='PerSpe'),
ReVal(
re=RegExpUtility.get_safe_reg_exp(ChineseNumeric.SpecialsFoldsPercentageRegex),
val='PerSpe')
]
| 38.165992
| 109
| 0.625756
|
8f8ef4ffd29d64072046a0469444654d1b6cac98
| 5,809
|
py
|
Python
|
docs/source/conf.py
|
akai10tsuki/mkvbatchmultiplex
|
0a45cf915c4817d377c4b717e95fc4ea21758f85
|
[
"MIT"
] | 22
|
2019-01-29T23:12:16.000Z
|
2022-03-26T07:42:19.000Z
|
docs/source/conf.py
|
akai10tsuki/mkvbatchmultiplex
|
0a45cf915c4817d377c4b717e95fc4ea21758f85
|
[
"MIT"
] | 12
|
2020-06-28T16:53:42.000Z
|
2021-12-14T00:33:31.000Z
|
docs/source/conf.py
|
akai10tsuki/mkvbatchmultiplex
|
0a45cf915c4817d377c4b717e95fc4ea21758f85
|
[
"MIT"
] | 1
|
2021-10-18T16:27:47.000Z
|
2021-10-18T16:27:47.000Z
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from pathlib import Path
sys.path.insert(0, os.path.abspath("../.."))
from MKVBatchMultiplex import config
html_show_sourcelink = False
autodoc_mock_imports = ["vsutillib"]
# pylint: skip-file
# -- Project information -----------------------------------------------------
project = "mkvbatchmultiplex"
copyright = "2018-2020, Efrain Vergara"
author = "Efrain Vergara"
# The short X.Y version
version = "2.0.0"
# The full version, including alpha/beta/rc tags
release = config.VERSION
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.ifconfig",
"sphinx.ext.githubpages",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "mkvbatchmultiplexdoc"
# -- Options for LaTeX output ------------------------------------------------
# latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
# }
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
# latex_documents = [
# (master_doc, 'mkvbatchmultiplex.tex', 'mkvbatchmultiplex Documentation',
# 'Efrain Vergara', 'manual'),
# ]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
# man_pages = [
# (master_doc, 'mkvbatchmultiplex', 'mkvbatchmultiplex Documentation',
# [author], 1)
# ]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
# texinfo_documents = [
# (master_doc, 'mkvbatchmultiplex', 'mkvbatchmultiplex Documentation',
# author, 'mkvbatchmultiplex', 'One line description of project.',
# 'Miscellaneous'),
# ]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
# epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
# epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"https://docs.python.org/": None}
| 29.789744
| 79
| 0.66001
|
e607528e3a742ab28465920c3cb40caffbcb5103
| 887
|
py
|
Python
|
other/validate_email.py
|
Curiouspaul1/Python
|
b5810c116901160b897aa2a5e3dde46937beec49
|
[
"MIT"
] | 1
|
2021-10-16T07:34:43.000Z
|
2021-10-16T07:34:43.000Z
|
other/validate_email.py
|
Curiouspaul1/Python
|
b5810c116901160b897aa2a5e3dde46937beec49
|
[
"MIT"
] | null | null | null |
other/validate_email.py
|
Curiouspaul1/Python
|
b5810c116901160b897aa2a5e3dde46937beec49
|
[
"MIT"
] | null | null | null |
"""
The code below features a function that can tell
whether a given string argument is a valid email
address or not.
"""
import re
def validate_email(email: str) -> bool:
"""
validate_email uses regular expressions to check if the input is
a valid email address.
-------
:param email: a string that represents the email to be validated.
Examples:
>>> validate_email('joker01@gmail.com')
True
>>> validate_email('joker01@gmail-com')
False
"""
regex_ = re.compile(
r"""
# email prefix
([a-zA-Z0-9_\-+%]+|[a-zA-Z0-9\-_%+]+(.\.))
# @ symbol
[@]
# email domain
[a-zA-Z0-9.-]+
# email suffix
[\.]
([a-zA-Z]{2,4})
""",
re.VERBOSE,
)
return bool(regex_.search(email))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20.627907
| 69
| 0.553551
|
1b38a94d26c7860dc254d5d1e3ef88a6d43179ec
| 46,842
|
py
|
Python
|
scipy/interpolate/_fitpack_impl.py
|
Ennosigaeon/scipy
|
2d872f7cf2098031b9be863ec25e366a550b229c
|
[
"BSD-3-Clause"
] | 12
|
2017-06-15T18:17:07.000Z
|
2022-01-26T12:28:19.000Z
|
scipy/interpolate/_fitpack_impl.py
|
Ennosigaeon/scipy
|
2d872f7cf2098031b9be863ec25e366a550b229c
|
[
"BSD-3-Clause"
] | 95
|
2015-07-04T08:40:38.000Z
|
2022-03-30T06:04:34.000Z
|
scipy/interpolate/_fitpack_impl.py
|
Ennosigaeon/scipy
|
2d872f7cf2098031b9be863ec25e366a550b229c
|
[
"BSD-3-Clause"
] | 20
|
2021-11-07T13:55:56.000Z
|
2021-12-02T10:54:01.000Z
|
"""
fitpack (dierckx in netlib) --- A Python-C wrapper to FITPACK (by P. Dierckx).
FITPACK is a collection of FORTRAN programs for curve and surface
fitting with splines and tensor product splines.
See
https://web.archive.org/web/20010524124604/http://www.cs.kuleuven.ac.be:80/cwis/research/nalag/research/topics/fitpack.html
or
http://www.netlib.org/dierckx/
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the SciPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
TODO: Make interfaces to the following fitpack functions:
For univariate splines: cocosp, concon, fourco, insert
For bivariate splines: profil, regrid, parsur, surev
"""
__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
import warnings
import numpy as np
from . import _fitpack
from numpy import (atleast_1d, array, ones, zeros, sqrt, ravel, transpose,
empty, iinfo, asarray)
# Try to replace _fitpack interface with
# f2py-generated version
from . import dfitpack
dfitpack_int = dfitpack.types.intvar.dtype
def _int_overflow(x, msg=None):
"""Cast the value to an dfitpack_int and raise an OverflowError if the value
cannot fit.
"""
if x > iinfo(dfitpack_int).max:
if msg is None:
msg = '%r cannot fit into an %r' % (x, dfitpack_int)
raise OverflowError(msg)
return dfitpack_int.type(x)
_iermess = {
0: ["The spline has a residual sum of squares fp such that "
"abs(fp-s)/s<=0.001", None],
-1: ["The spline is an interpolating spline (fp=0)", None],
-2: ["The spline is weighted least-squares polynomial of degree k.\n"
"fp gives the upper bound fp0 for the smoothing factor s", None],
1: ["The required storage space exceeds the available storage space.\n"
"Probable causes: data (x,y) size is too small or smoothing parameter"
"\ns is too small (fp>s).", ValueError],
2: ["A theoretically impossible result when finding a smoothing spline\n"
"with fp = s. Probable cause: s too small. (abs(fp-s)/s>0.001)",
ValueError],
3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
"spline with fp=s has been reached. Probable cause: s too small.\n"
"(abs(fp-s)/s>0.001)", ValueError],
10: ["Error on input data", ValueError],
'unknown': ["An error occurred", TypeError]
}
_iermess2 = {
0: ["The spline has a residual sum of squares fp such that "
"abs(fp-s)/s<=0.001", None],
-1: ["The spline is an interpolating spline (fp=0)", None],
-2: ["The spline is weighted least-squares polynomial of degree kx and ky."
"\nfp gives the upper bound fp0 for the smoothing factor s", None],
-3: ["Warning. The coefficients of the spline have been computed as the\n"
"minimal norm least-squares solution of a rank deficient system.",
None],
1: ["The required storage space exceeds the available storage space.\n"
"Probable causes: nxest or nyest too small or s is too small. (fp>s)",
ValueError],
2: ["A theoretically impossible result when finding a smoothing spline\n"
"with fp = s. Probable causes: s too small or badly chosen eps.\n"
"(abs(fp-s)/s>0.001)", ValueError],
3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
"spline with fp=s has been reached. Probable cause: s too small.\n"
"(abs(fp-s)/s>0.001)", ValueError],
4: ["No more knots can be added because the number of B-spline\n"
"coefficients already exceeds the number of data points m.\n"
"Probable causes: either s or m too small. (fp>s)", ValueError],
5: ["No more knots can be added because the additional knot would\n"
"coincide with an old one. Probable cause: s too small or too large\n"
"a weight to an inaccurate data point. (fp>s)", ValueError],
10: ["Error on input data", ValueError],
11: ["rwrk2 too small, i.e., there is not enough workspace for computing\n"
"the minimal least-squares solution of a rank deficient system of\n"
"linear equations.", ValueError],
'unknown': ["An error occurred", TypeError]
}
_parcur_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], dfitpack_int), 'u': array([], float),
'ub': 0, 'ue': 1}
def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
full_output=0, nest=None, per=0, quiet=1):
"""
Find the B-spline representation of an N-D curve.
Given a list of N rank-1 arrays, `x`, which represent a curve in
N-dimensional space parametrized by `u`, find a smooth approximating
spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
Parameters
----------
x : array_like
A list of sample vector arrays representing the curve.
w : array_like, optional
Strictly positive rank-1 array of weights the same length as `x[0]`.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the `x` values have standard-deviation given by
the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
u : array_like, optional
An array of parameter values. If not given, these values are
calculated automatically as ``M = len(x[0])``, where
v[0] = 0
v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
u[i] = v[i] / v[M-1]
ub, ue : int, optional
The end-points of the parameters interval. Defaults to
u[0] and u[-1].
k : int, optional
Degree of the spline. Cubic splines are recommended.
Even values of `k` should be avoided especially with a small s-value.
``1 <= k <= 5``, default is 3.
task : int, optional
If task==0 (default), find t and c for a given smoothing factor, s.
If task==1, find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1
for the same set of data.
If task=-1 find the weighted least square spline for a given set of
knots, t.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
where g(x) is the smoothed interpolation of (x,y). The user can
use `s` to control the trade-off between closeness and smoothness
of fit. Larger `s` means more smoothing while smaller values of `s`
indicate less smoothing. Recommended values of `s` depend on the
weights, w. If the weights represent the inverse of the
standard-deviation of y, then a good `s` value should be found in
the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
data points in x, y, and w.
t : int, optional
The knots needed for task=-1.
full_output : int, optional
If non-zero, then return optional outputs.
nest : int, optional
An over-estimate of the total number of knots of the spline to
help in determining the storage space. By default nest=m/2.
Always large enough is nest=m+k+1.
per : int, optional
If non-zero, data points are considered periodic with period
``x[m-1] - x[0]`` and a smooth periodic spline approximation is
returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
quiet : int, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
u : array
An array of the values of the parameter.
fp : float
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated
if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splrep, splev, sproot, spalde, splint,
bisplrep, bisplev
UnivariateSpline, BivariateSpline
Notes
-----
See `splev` for evaluation of the spline and its derivatives.
The number of dimensions N must be smaller than 11.
References
----------
.. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines, Computer Graphics and Image Processing",
20 (1982) 171-184.
.. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines", report tw55, Dept. Computer Science,
K.U.Leuven, 1981.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
if task <= 0:
_parcur_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], dfitpack_int), 'u': array([], float),
'ub': 0, 'ue': 1}
x = atleast_1d(x)
idim, m = x.shape
if per:
for i in range(idim):
if x[i][0] != x[i][-1]:
if quiet < 2:
warnings.warn(RuntimeWarning('Setting x[%d][%d]=x[%d][0]' %
(i, m, i)))
x[i][-1] = x[i][0]
if not 0 < idim < 11:
raise TypeError('0 < idim < 11 must hold')
if w is None:
w = ones(m, float)
else:
w = atleast_1d(w)
ipar = (u is not None)
if ipar:
_parcur_cache['u'] = u
if ub is None:
_parcur_cache['ub'] = u[0]
else:
_parcur_cache['ub'] = ub
if ue is None:
_parcur_cache['ue'] = u[-1]
else:
_parcur_cache['ue'] = ue
else:
_parcur_cache['u'] = zeros(m, float)
if not (1 <= k <= 5):
raise TypeError('1 <= k= %d <=5 must hold' % k)
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if (not len(w) == m) or (ipar == 1 and (not len(u) == m)):
raise TypeError('Mismatch of input dimensions')
if s is None:
s = m - sqrt(2*m)
if t is None and task == -1:
raise TypeError('Knots must be given for task=-1')
if t is not None:
_parcur_cache['t'] = atleast_1d(t)
n = len(_parcur_cache['t'])
if task == -1 and n < 2*k + 2:
raise TypeError('There must be at least 2*k+2 knots for task=-1')
if m <= k:
raise TypeError('m > k must hold')
if nest is None:
nest = m + 2*k
if (task >= 0 and s == 0) or (nest < 0):
if per:
nest = m + 2*k
else:
nest = m + k + 1
nest = max(nest, 2*k + 3)
u = _parcur_cache['u']
ub = _parcur_cache['ub']
ue = _parcur_cache['ue']
t = _parcur_cache['t']
wrk = _parcur_cache['wrk']
iwrk = _parcur_cache['iwrk']
t, c, o = _fitpack._parcur(ravel(transpose(x)), w, u, ub, ue, k,
task, ipar, s, t, nest, wrk, iwrk, per)
_parcur_cache['u'] = o['u']
_parcur_cache['ub'] = o['ub']
_parcur_cache['ue'] = o['ue']
_parcur_cache['t'] = t
_parcur_cache['wrk'] = o['wrk']
_parcur_cache['iwrk'] = o['iwrk']
ier = o['ier']
fp = o['fp']
n = len(t)
u = o['u']
c.shape = idim, n - k - 1
tcku = [t, list(c), k], u
if ier <= 0 and not quiet:
warnings.warn(RuntimeWarning(_iermess[ier][0] +
"\tk=%d n=%d m=%d fp=%f s=%f" %
(k, len(t), m, fp, s)))
if ier > 0 and not full_output:
if ier in [1, 2, 3]:
warnings.warn(RuntimeWarning(_iermess[ier][0]))
else:
try:
raise _iermess[ier][1](_iermess[ier][0])
except KeyError as e:
raise _iermess['unknown'][1](_iermess['unknown'][0]) from e
if full_output:
try:
return tcku, fp, ier, _iermess[ier][0]
except KeyError:
return tcku, fp, ier, _iermess['unknown'][0]
else:
return tcku
_curfit_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], dfitpack_int)}
def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
full_output=0, per=0, quiet=1):
"""
Find the B-spline representation of 1-D curve.
Given the set of data points ``(x[i], y[i])`` determine a smooth spline
approximation of degree k on the interval ``xb <= x <= xe``.
Parameters
----------
x, y : array_like
The data points defining a curve y = f(x).
w : array_like, optional
Strictly positive rank-1 array of weights the same length as x and y.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the y values have standard-deviation given by the
vector d, then w should be 1/d. Default is ones(len(x)).
xb, xe : float, optional
The interval to fit. If None, these default to x[0] and x[-1]
respectively.
k : int, optional
The order of the spline fit. It is recommended to use cubic splines.
Even order splines should be avoided especially with small s values.
1 <= k <= 5
task : {1, 0, -1}, optional
If task==0 find t and c for a given smoothing factor, s.
If task==1 find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1 for the same
set of data (t will be stored an used internally)
If task=-1 find the weighted least square spline for a given set of
knots, t. These should be interior knots as knots on the ends will be
added automatically.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s, where g(x)
is the smoothed interpolation of (x,y). The user can use s to control
the tradeoff between closeness and smoothness of fit. Larger s means
more smoothing while smaller values of s indicate less smoothing.
Recommended values of s depend on the weights, w. If the weights
represent the inverse of the standard-deviation of y, then a good s
value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is
the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if
weights are supplied. s = 0.0 (interpolating) if no weights are
supplied.
t : array_like, optional
The knots needed for task=-1. If given then task is automatically set
to -1.
full_output : bool, optional
If non-zero, then return optional outputs.
per : bool, optional
If non-zero, data points are considered periodic with period x[m-1] -
x[0] and a smooth periodic spline approximation is returned. Values of
y[m-1] and w[m-1] are not used.
quiet : bool, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
(t,c,k) a tuple containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
fp : array, optional
The weighted sum of squared residuals of the spline approximation.
ier : int, optional
An integer flag about splrep success. Success is indicated if ier<=0.
If ier in [1,2,3] an error occurred but was not raised. Otherwise an
error is raised.
msg : str, optional
A message corresponding to the integer flag, ier.
See Also
--------
UnivariateSpline, BivariateSpline
splprep, splev, sproot, spalde, splint
bisplrep, bisplev
Notes
-----
See splev for evaluation of the spline and its derivatives. Uses the
FORTRAN routine curfit from FITPACK.
The user is responsible for assuring that the values of *x* are unique.
Otherwise, *splrep* will not return sensible results.
If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
References
----------
Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
.. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
integration of experimental data using spline functions",
J.Comp.Appl.Maths 1 (1975) 165-184.
.. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
1286-1304.
.. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
.. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import splev, splrep
>>> x = np.linspace(0, 10, 10)
>>> y = np.sin(x)
>>> tck = splrep(x, y)
>>> x2 = np.linspace(0, 10, 200)
>>> y2 = splev(x2, tck)
>>> plt.plot(x, y, 'o', x2, y2)
>>> plt.show()
"""
if task <= 0:
_curfit_cache = {}
x, y = map(atleast_1d, [x, y])
m = len(x)
if w is None:
w = ones(m, float)
if s is None:
s = 0.0
else:
w = atleast_1d(w)
if s is None:
s = m - sqrt(2*m)
if not len(w) == m:
raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
if (m != len(y)) or (m != len(w)):
raise TypeError('Lengths of the first three arguments (x,y,w) must '
'be equal')
if not (1 <= k <= 5):
raise TypeError('Given degree of the spline (k=%d) is not supported. '
'(1<=k<=5)' % k)
if m <= k:
raise TypeError('m > k must hold')
if xb is None:
xb = x[0]
if xe is None:
xe = x[-1]
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if t is not None:
task = -1
if task == -1:
if t is None:
raise TypeError('Knots must be given for task=-1')
numknots = len(t)
_curfit_cache['t'] = empty((numknots + 2*k + 2,), float)
_curfit_cache['t'][k+1:-k-1] = t
nest = len(_curfit_cache['t'])
elif task == 0:
if per:
nest = max(m + 2*k, 2*k + 3)
else:
nest = max(m + k + 1, 2*k + 3)
t = empty((nest,), float)
_curfit_cache['t'] = t
if task <= 0:
if per:
_curfit_cache['wrk'] = empty((m*(k + 1) + nest*(8 + 5*k),), float)
else:
_curfit_cache['wrk'] = empty((m*(k + 1) + nest*(7 + 3*k),), float)
_curfit_cache['iwrk'] = empty((nest,), dfitpack_int)
try:
t = _curfit_cache['t']
wrk = _curfit_cache['wrk']
iwrk = _curfit_cache['iwrk']
except KeyError as e:
raise TypeError("must call with task=1 only after"
" call with task=0,-1") from e
if not per:
n, c, fp, ier = dfitpack.curfit(task, x, y, w, t, wrk, iwrk,
xb, xe, k, s)
else:
n, c, fp, ier = dfitpack.percur(task, x, y, w, t, wrk, iwrk, k, s)
tck = (t[:n], c[:n], k)
if ier <= 0 and not quiet:
_mess = (_iermess[ier][0] + "\tk=%d n=%d m=%d fp=%f s=%f" %
(k, len(t), m, fp, s))
warnings.warn(RuntimeWarning(_mess))
if ier > 0 and not full_output:
if ier in [1, 2, 3]:
warnings.warn(RuntimeWarning(_iermess[ier][0]))
else:
try:
raise _iermess[ier][1](_iermess[ier][0])
except KeyError as e:
raise _iermess['unknown'][1](_iermess['unknown'][0]) from e
if full_output:
try:
return tck, fp, ier, _iermess[ier][0]
except KeyError:
return tck, fp, ier, _iermess['unknown'][0]
else:
return tck
def splev(x, tck, der=0, ext=0):
"""
Evaluate a B-spline or its derivatives.
Given the knots and coefficients of a B-spline representation, evaluate
the value of the smoothing polynomial and its derivatives. This is a
wrapper around the FORTRAN routines splev and splder of FITPACK.
Parameters
----------
x : array_like
An array of points at which to return the value of the smoothed
spline or its derivatives. If `tck` was returned from `splprep`,
then the parameter values, u should be given.
tck : tuple
A sequence of length 3 returned by `splrep` or `splprep` containing
the knots, coefficients, and degree of the spline.
der : int, optional
The order of derivative of the spline to compute (must be less than
or equal to k).
ext : int, optional
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0, return the extrapolated value.
* if ext=1, return 0
* if ext=2, raise a ValueError
* if ext=3, return the boundary value.
The default value is 0.
Returns
-------
y : ndarray or list of ndarrays
An array of values representing the spline function evaluated at
the points in ``x``. If `tck` was returned from `splprep`, then this
is a list of arrays representing the curve in N-D space.
See Also
--------
splprep, splrep, sproot, spalde, splint
bisplrep, bisplev
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except Exception:
parametric = False
if parametric:
return list(map(lambda c, x=x, t=t, k=k, der=der:
splev(x, [t, c, k], der, ext), c))
else:
if not (0 <= der <= k):
raise ValueError("0<=der=%d<=k=%d must hold" % (der, k))
if ext not in (0, 1, 2, 3):
raise ValueError("ext = %s not in (0, 1, 2, 3) " % ext)
x = asarray(x)
shape = x.shape
x = atleast_1d(x).ravel()
y, ier = _fitpack._spl_(x, der, t, c, k, ext)
if ier == 10:
raise ValueError("Invalid input data")
if ier == 1:
raise ValueError("Found x value not in the domain")
if ier:
raise TypeError("An error occurred")
return y.reshape(shape)
def splint(a, b, tck, full_output=0):
"""
Evaluate the definite integral of a B-spline.
Given the knots and coefficients of a B-spline, evaluate the definite
integral of the smoothing polynomial between two given points.
Parameters
----------
a, b : float
The end-points of the integration interval.
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline (see `splev`).
full_output : int, optional
Non-zero to return optional output.
Returns
-------
integral : float
The resulting integral.
wrk : ndarray
An array containing the integrals of the normalized B-splines
defined on the set of knots.
Notes
-----
splint silently assumes that the spline function is zero outside the data
interval (a, b).
See Also
--------
splprep, splrep, sproot, spalde, splev
bisplrep, bisplev
UnivariateSpline, BivariateSpline
References
----------
.. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
J. Inst. Maths Applics, 17, p.37-41, 1976.
.. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except Exception:
parametric = False
if parametric:
return list(map(lambda c, a=a, b=b, t=t, k=k:
splint(a, b, [t, c, k]), c))
else:
aint, wrk = _fitpack._splint(t, c, k, a, b)
if full_output:
return aint, wrk
else:
return aint
def sproot(tck, mest=10):
"""
Find the roots of a cubic B-spline.
Given the knots (>=8) and coefficients of a cubic B-spline return the
roots of the spline.
Parameters
----------
tck : tuple
A tuple (t,c,k) containing the vector of knots,
the B-spline coefficients, and the degree of the spline.
The number of knots must be >= 8, and the degree must be 3.
The knots must be a montonically increasing sequence.
mest : int, optional
An estimate of the number of zeros (Default is 10).
Returns
-------
zeros : ndarray
An array giving the roots of the spline.
See also
--------
splprep, splrep, splint, spalde, splev
bisplrep, bisplev
UnivariateSpline, BivariateSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
if k != 3:
raise ValueError("sproot works only for cubic (k=3) splines")
try:
c[0][0]
parametric = True
except Exception:
parametric = False
if parametric:
return list(map(lambda c, t=t, k=k, mest=mest:
sproot([t, c, k], mest), c))
else:
if len(t) < 8:
raise TypeError("The number of knots %d>=8" % len(t))
z, ier = _fitpack._sproot(t, c, k, mest)
if ier == 10:
raise TypeError("Invalid input data. "
"t1<=..<=t4<t5<..<tn-3<=..<=tn must hold.")
if ier == 0:
return z
if ier == 1:
warnings.warn(RuntimeWarning("The number of zeros exceeds mest"))
return z
raise TypeError("Unknown error")
def spalde(x, tck):
"""
Evaluate all derivatives of a B-spline.
Given the knots and coefficients of a cubic B-spline compute all
derivatives up to order k at a point (or set of points).
Parameters
----------
x : array_like
A point or a set of points at which to evaluate the derivatives.
Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
tck : tuple
A tuple (t,c,k) containing the vector of knots,
the B-spline coefficients, and the degree of the spline.
Returns
-------
results : {ndarray, list of ndarrays}
An array (or a list of arrays) containing all derivatives
up to order k inclusive for each point `x`.
See Also
--------
splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
UnivariateSpline, BivariateSpline
References
----------
.. [1] de Boor C : On calculating with b-splines, J. Approximation Theory
6 (1972) 50-62.
.. [2] Cox M.G. : The numerical evaluation of b-splines, J. Inst. Maths
applics 10 (1972) 134-149.
.. [3] Dierckx P. : Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except Exception:
parametric = False
if parametric:
return list(map(lambda c, x=x, t=t, k=k:
spalde(x, [t, c, k]), c))
else:
x = atleast_1d(x)
if len(x) > 1:
return list(map(lambda x, tck=tck: spalde(x, tck), x))
d, ier = _fitpack._spalde(t, c, k, x[0])
if ier == 0:
return d
if ier == 10:
raise TypeError("Invalid input data. t(k)<=x<=t(n-k+1) must hold.")
raise TypeError("Unknown error")
# def _curfit(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None,
# full_output=0,nest=None,per=0,quiet=1):
_surfit_cache = {'tx': array([], float), 'ty': array([], float),
'wrk': array([], float), 'iwrk': array([], dfitpack_int)}
def bisplrep(x, y, z, w=None, xb=None, xe=None, yb=None, ye=None,
kx=3, ky=3, task=0, s=None, eps=1e-16, tx=None, ty=None,
full_output=0, nxest=None, nyest=None, quiet=1):
"""
Find a bivariate B-spline representation of a surface.
Given a set of data points (x[i], y[i], z[i]) representing a surface
z=f(x,y), compute a B-spline representation of the surface. Based on
the routine SURFIT from FITPACK.
Parameters
----------
x, y, z : ndarray
Rank-1 arrays of data points.
w : ndarray, optional
Rank-1 array of weights. By default ``w=np.ones(len(x))``.
xb, xe : float, optional
End points of approximation interval in `x`.
By default ``xb = x.min(), xe=x.max()``.
yb, ye : float, optional
End points of approximation interval in `y`.
By default ``yb=y.min(), ye = y.max()``.
kx, ky : int, optional
The degrees of the spline (1 <= kx, ky <= 5).
Third order (kx=ky=3) is recommended.
task : int, optional
If task=0, find knots in x and y and coefficients for a given
smoothing factor, s.
If task=1, find knots and coefficients for another value of the
smoothing factor, s. bisplrep must have been previously called
with task=0 or task=1.
If task=-1, find coefficients for a given set of knots tx, ty.
s : float, optional
A non-negative smoothing factor. If weights correspond
to the inverse of the standard-deviation of the errors in z,
then a good s-value should be found in the range
``(m-sqrt(2*m),m+sqrt(2*m))`` where m=len(x).
eps : float, optional
A threshold for determining the effective rank of an
over-determined linear system of equations (0 < eps < 1).
`eps` is not likely to need changing.
tx, ty : ndarray, optional
Rank-1 arrays of the knots of the spline for task=-1
full_output : int, optional
Non-zero to return optional outputs.
nxest, nyest : int, optional
Over-estimates of the total number of knots. If None then
``nxest = max(kx+sqrt(m/2),2*kx+3)``,
``nyest = max(ky+sqrt(m/2),2*ky+3)``.
quiet : int, optional
Non-zero to suppress printing of messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : array_like
A list [tx, ty, c, kx, ky] containing the knots (tx, ty) and
coefficients (c) of the bivariate B-spline representation of the
surface along with the degree of the spline.
fp : ndarray
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated if
ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splprep, splrep, splint, sproot, splev
UnivariateSpline, BivariateSpline
Notes
-----
See `bisplev` to evaluate the value of the B-spline given its tck
representation.
References
----------
.. [1] Dierckx P.:An algorithm for surface fitting with spline functions
Ima J. Numer. Anal. 1 (1981) 267-283.
.. [2] Dierckx P.:An algorithm for surface fitting with spline functions
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
.. [3] Dierckx P.:Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Examples are given :ref:`in the tutorial <tutorial-interpolate_2d_spline>`.
"""
x, y, z = map(ravel, [x, y, z]) # ensure 1-d arrays.
m = len(x)
if not (m == len(y) == len(z)):
raise TypeError('len(x)==len(y)==len(z) must hold.')
if w is None:
w = ones(m, float)
else:
w = atleast_1d(w)
if not len(w) == m:
raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
if xb is None:
xb = x.min()
if xe is None:
xe = x.max()
if yb is None:
yb = y.min()
if ye is None:
ye = y.max()
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if s is None:
s = m - sqrt(2*m)
if tx is None and task == -1:
raise TypeError('Knots_x must be given for task=-1')
if tx is not None:
_surfit_cache['tx'] = atleast_1d(tx)
nx = len(_surfit_cache['tx'])
if ty is None and task == -1:
raise TypeError('Knots_y must be given for task=-1')
if ty is not None:
_surfit_cache['ty'] = atleast_1d(ty)
ny = len(_surfit_cache['ty'])
if task == -1 and nx < 2*kx+2:
raise TypeError('There must be at least 2*kx+2 knots_x for task=-1')
if task == -1 and ny < 2*ky+2:
raise TypeError('There must be at least 2*ky+2 knots_x for task=-1')
if not ((1 <= kx <= 5) and (1 <= ky <= 5)):
raise TypeError('Given degree of the spline (kx,ky=%d,%d) is not '
'supported. (1<=k<=5)' % (kx, ky))
if m < (kx + 1)*(ky + 1):
raise TypeError('m >= (kx+1)(ky+1) must hold')
if nxest is None:
nxest = int(kx + sqrt(m/2))
if nyest is None:
nyest = int(ky + sqrt(m/2))
nxest, nyest = max(nxest, 2*kx + 3), max(nyest, 2*ky + 3)
if task >= 0 and s == 0:
nxest = int(kx + sqrt(3*m))
nyest = int(ky + sqrt(3*m))
if task == -1:
_surfit_cache['tx'] = atleast_1d(tx)
_surfit_cache['ty'] = atleast_1d(ty)
tx, ty = _surfit_cache['tx'], _surfit_cache['ty']
wrk = _surfit_cache['wrk']
u = nxest - kx - 1
v = nyest - ky - 1
km = max(kx, ky) + 1
ne = max(nxest, nyest)
bx, by = kx*v + ky + 1, ky*u + kx + 1
b1, b2 = bx, bx + v - ky
if bx > by:
b1, b2 = by, by + u - kx
msg = "Too many data points to interpolate"
lwrk1 = _int_overflow(u*v*(2 + b1 + b2) +
2*(u + v + km*(m + ne) + ne - kx - ky) + b2 + 1,
msg=msg)
lwrk2 = _int_overflow(u*v*(b2 + 1) + b2, msg=msg)
tx, ty, c, o = _fitpack._surfit(x, y, z, w, xb, xe, yb, ye, kx, ky,
task, s, eps, tx, ty, nxest, nyest,
wrk, lwrk1, lwrk2)
_curfit_cache['tx'] = tx
_curfit_cache['ty'] = ty
_curfit_cache['wrk'] = o['wrk']
ier, fp = o['ier'], o['fp']
tck = [tx, ty, c, kx, ky]
ierm = min(11, max(-3, ier))
if ierm <= 0 and not quiet:
_mess = (_iermess2[ierm][0] +
"\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
(kx, ky, len(tx), len(ty), m, fp, s))
warnings.warn(RuntimeWarning(_mess))
if ierm > 0 and not full_output:
if ier in [1, 2, 3, 4, 5]:
_mess = ("\n\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
(kx, ky, len(tx), len(ty), m, fp, s))
warnings.warn(RuntimeWarning(_iermess2[ierm][0] + _mess))
else:
try:
raise _iermess2[ierm][1](_iermess2[ierm][0])
except KeyError as e:
raise _iermess2['unknown'][1](_iermess2['unknown'][0]) from e
if full_output:
try:
return tck, fp, ier, _iermess2[ierm][0]
except KeyError:
return tck, fp, ier, _iermess2['unknown'][0]
else:
return tck
def bisplev(x, y, tck, dx=0, dy=0):
"""
Evaluate a bivariate B-spline and its derivatives.
Return a rank-2 array of spline function values (or spline derivative
values) at points given by the cross-product of the rank-1 arrays `x` and
`y`. In special cases, return an array or just a float if either `x` or
`y` or both are floats. Based on BISPEV from FITPACK.
Parameters
----------
x, y : ndarray
Rank-1 arrays specifying the domain over which to evaluate the
spline or its derivative.
tck : tuple
A sequence of length 5 returned by `bisplrep` containing the knot
locations, the coefficients, and the degree of the spline:
[tx, ty, c, kx, ky].
dx, dy : int, optional
The orders of the partial derivatives in `x` and `y` respectively.
Returns
-------
vals : ndarray
The B-spline or its derivative evaluated over the set formed by
the cross-product of `x` and `y`.
See Also
--------
splprep, splrep, splint, sproot, splev
UnivariateSpline, BivariateSpline
Notes
-----
See `bisplrep` to generate the `tck` representation.
References
----------
.. [1] Dierckx P. : An algorithm for surface fitting
with spline functions
Ima J. Numer. Anal. 1 (1981) 267-283.
.. [2] Dierckx P. : An algorithm for surface fitting
with spline functions
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
.. [3] Dierckx P. : Curve and surface fitting with splines,
Monographs on Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Examples are given :ref:`in the tutorial <tutorial-interpolate_2d_spline>`.
"""
tx, ty, c, kx, ky = tck
if not (0 <= dx < kx):
raise ValueError("0 <= dx = %d < kx = %d must hold" % (dx, kx))
if not (0 <= dy < ky):
raise ValueError("0 <= dy = %d < ky = %d must hold" % (dy, ky))
x, y = map(atleast_1d, [x, y])
if (len(x.shape) != 1) or (len(y.shape) != 1):
raise ValueError("First two entries should be rank-1 arrays.")
z, ier = _fitpack._bispev(tx, ty, c, kx, ky, x, y, dx, dy)
if ier == 10:
raise ValueError("Invalid input data")
if ier:
raise TypeError("An error occurred")
z.shape = len(x), len(y)
if len(z) > 1:
return z
if len(z[0]) > 1:
return z[0]
return z[0][0]
def dblint(xa, xb, ya, yb, tck):
"""Evaluate the integral of a spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
tck : list [tx, ty, c, kx, ky]
A sequence of length 5 returned by bisplrep containing the knot
locations tx, ty, the coefficients c, and the degrees kx, ky
of the spline.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx, ty, c, kx, ky = tck
return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)
def insert(x, tck, m=1, per=0):
"""
Insert knots into a B-spline.
Given the knots and coefficients of a B-spline representation, create a
new B-spline with a knot inserted `m` times at point `x`.
This is a wrapper around the FORTRAN routine insert of FITPACK.
Parameters
----------
x (u) : array_like
A 1-D point at which to insert a new knot(s). If `tck` was returned
from ``splprep``, then the parameter values, u should be given.
tck : tuple
A tuple (t,c,k) returned by ``splrep`` or ``splprep`` containing
the vector of knots, the B-spline coefficients,
and the degree of the spline.
m : int, optional
The number of times to insert the given knot (its multiplicity).
Default is 1.
per : int, optional
If non-zero, the input spline is considered periodic.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the new spline.
``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
In case of a periodic spline (``per != 0``) there must be
either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x``
or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``.
Notes
-----
Based on algorithms from [1]_ and [2]_.
References
----------
.. [1] W. Boehm, "Inserting new knots into b-spline curves.",
Computer Aided Design, 12, p.199-201, 1980.
.. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on
Numerical Analysis", Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except Exception:
parametric = False
if parametric:
cc = []
for c_vals in c:
tt, cc_val, kk = insert(x, [t, c_vals, k], m)
cc.append(cc_val)
return (tt, cc, kk)
else:
tt, cc, ier = _fitpack._insert(per, t, c, k, x, m)
if ier == 10:
raise ValueError("Invalid input data")
if ier:
raise TypeError("An error occurred")
return (tt, cc, k)
def splder(tck, n=1):
"""
Compute the spline representation of the derivative of a given spline
Parameters
----------
tck : tuple of (t, c, k)
Spline whose derivative to compute
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
tck_der : tuple of (t2, c2, k2)
Spline of order k2=k-n representing the derivative
of the input spline.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, splev, spalde
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import splrep, splder, sproot
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = splrep(x, y, k=4)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> dspl = splder(spl)
>>> sproot(dspl) / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\\pi/2 + n\\pi` of
:math:`\\cos(x) = \\sin'(x)`.
"""
if n < 0:
return splantider(tck, -n)
t, c, k = tck
if n > k:
raise ValueError(("Order of derivative (n = %r) must be <= "
"order of spline (k = %r)") % (n, tck[2]))
# Extra axes for the trailing dims of the `c` array:
sh = (slice(None),) + ((None,)*len(c.shape[1:]))
with np.errstate(invalid='raise', divide='raise'):
try:
for j in range(n):
# See e.g. Schumaker, Spline Functions: Basic Theory, Chapter 5
# Compute the denominator in the differentiation formula.
# (and append traling dims, if necessary)
dt = t[k+1:-1] - t[1:-k-1]
dt = dt[sh]
# Compute the new coefficients
c = (c[1:-1-k] - c[:-2-k]) * k / dt
# Pad coefficient array to same size as knots (FITPACK
# convention)
c = np.r_[c, np.zeros((k,) + c.shape[1:])]
# Adjust knots
t = t[1:-1]
k -= 1
except FloatingPointError as e:
raise ValueError(("The spline has internal repeated knots "
"and is not differentiable %d times") % n) from e
return t, c, k
def splantider(tck, n=1):
"""
Compute the spline for the antiderivative (integral) of a given spline.
Parameters
----------
tck : tuple of (t, c, k)
Spline whose antiderivative to compute
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
tck_ader : tuple of (t2, c2, k2)
Spline of order k2=k+n representing the antiderivative of the input
spline.
See Also
--------
splder, splev, spalde
Notes
-----
The `splder` function is the inverse operation of this function.
Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
rounding error.
.. versionadded:: 0.13.0
Examples
--------
>>> from scipy.interpolate import splrep, splder, splantider, splev
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = splrep(x, y)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = splantider(spl)
>>> splev(np.pi/2, ispl) - splev(0, ispl)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
if n < 0:
return splder(tck, -n)
t, c, k = tck
# Extra axes for the trailing dims of the `c` array:
sh = (slice(None),) + (None,)*len(c.shape[1:])
for j in range(n):
# This is the inverse set of operations to splder.
# Compute the multiplier in the antiderivative formula.
dt = t[k+1:] - t[:-k-1]
dt = dt[sh]
# Compute the new coefficients
c = np.cumsum(c[:-k-1] * dt, axis=0) / (k + 1)
c = np.r_[np.zeros((1,) + c.shape[1:]),
c,
[c[-1]] * (k+2)]
# New knots
t = np.r_[t[0], t, t[-1]]
k += 1
return t, c, k
| 35.567198
| 124
| 0.575915
|
c454508430405d8b12d4e7cba3c0c3ca30a5a3ef
| 12,665
|
py
|
Python
|
python/decoder/rv_decoder.py
|
hossein1387/pito_riscv
|
94df6f2201798765984017c82d1fdf0355f68d45
|
[
"MIT"
] | 3
|
2021-05-25T08:40:55.000Z
|
2022-01-05T07:05:39.000Z
|
python/decoder/rv_decoder.py
|
hossein1387/pito_riscv
|
94df6f2201798765984017c82d1fdf0355f68d45
|
[
"MIT"
] | 5
|
2021-07-25T01:12:16.000Z
|
2022-02-17T20:30:38.000Z
|
python/decoder/rv_decoder.py
|
hossein1387/pito_riscv
|
94df6f2201798765984017c82d1fdf0355f68d45
|
[
"MIT"
] | 3
|
2021-08-11T07:59:07.000Z
|
2022-01-05T07:05:43.000Z
|
import json
from collections import defaultdict
from rv_instruction_table import instruction_table
def get_hex(binary_str):
"""
Returns the hexadecimal string literal for the given binary string
literal input
:param str binary_str: Binary string to be converted to hex
"""
return "{0:#0{1}x}".format(int(binary_str, base=2), 4)
def get_int(binary_str):
"""
Returns the integer string literal for the given binary string
literal input
:param str binary_str: Binary string to be converted to int
"""
return str(int(binary_str, base=2))
def get_output(debug=False, instr=None, rs1=None, rs2=None, imm12lo=None, imm12hi=None, rd=None, imm20=None, imm12=None,
shamt=None, shamtw=None, rm=None):
"""
Wraps the non-empty arguments and the instruction name into a dictionary with
arguments as keys and vals as values
:param str instr: Name of the instruction
:param str rs1: Source register 1
:param str rs2: Source register 2
:param str rd: Destination register
:param str rm: Extended register
:param str imm12lo: Lower 6 bits of Immediate 12
:param str imm12hi: Higher 6 bits of Immediate 12
:param str imm12: Immediate 12
:param str imm20: Immediate 20
:param str shamt: Shift args
:param str shamtw: Shift args
"""
arg_list = [rs1, rs2, imm12lo, imm12hi, rd, imm20, imm12, shamt, shamtw, rm]
arg_keys = ['rs1', 'rs2', 'imm12lo', 'imm12hi', 'rd', 'imm20', 'imm12', 'shamt', 'shamtw', 'rm']
output_dict = defaultdict()
output_dict['instr'] = instr
for i in range(len(arg_list)):
if arg_list[i] is not None:
output_dict[arg_keys[i]] = arg_list[i]
if debug is True:
print_dic(output_dict)
return output_dict
def print_dic(dictionary):
"""
Utility function to print the output dictionary for
debug purposes
:param dictionary dictionary: Dictionary object of the decoded instruction
"""
json_dict = json.dumps(dictionary, sort_keys=False, indent=4)
print(json_dict)
def decode(instruction, debug=False):
"""
Decodes the binary instruction string input and returns a
dictionary with the instruction name and arguments as keys and
their vals as values
:param str instruction: Binary string that contains the encoded instruction
:param bool debug: Flag to print decoded dictionary (if true).
"""
# import ipdb as pdb; pdb.set_trace()
family = instruction[-7:-2]
if get_hex(family) == '0x18':
funct3 = get_int(instruction[-15:-12])
instruction_name = instruction_table[get_hex(family)][funct3]
rs1 = instruction[-20:-15]
rs2 = instruction[-25:-20]
imm12hi = instruction[0] + instruction[-8] + instruction[-31:-27]
imm12lo = instruction[-27:-25] + instruction[-12:-8]
return get_output(instr=instruction_name, rs1=rs1, rs2=rs2, imm12lo=imm12lo, imm12hi=imm12hi, debug=debug)
elif get_hex(family) == '0x1b':
instruction_name = instruction_table[get_hex(family)]
rd = instruction[-12:-7]
imm20 = instruction[0] + instruction[-20:-12] + instruction[-21] + instruction[-31:-21]
return get_output(instr=instruction_name, rd=rd, imm20=imm20, debug=debug)
elif get_hex(family) == '0x19':
instruction_name = instruction_table[get_hex(family)]
rs1 = instruction[-20:-15]
rd = instruction[-12:-7]
imm12 = instruction[:12]
return get_output(instr=instruction_name, rd=rd, imm12=imm12, rs1=rs1, debug=debug)
elif get_hex(family) == '0x0d' or get_hex(family) == '0x05':
instruction_name = instruction_table[get_hex(family)]
imm20 = instruction[:20]
rd = instruction[-12:-7]
return get_output(instr=instruction_name, rd=rd, imm20=imm20, debug=debug)
elif get_hex(family) == '0x04':
funct3 = get_int(instruction[-15:-12])
if funct3 in ['0', '2', '3', '4', '6', '7']:
instruction_name = instruction_table[get_hex(family)][funct3]
rd = instruction[-12:-7]
rs1 = instruction[-20:-15]
imm12 = instruction[:12]
return get_output(instr=instruction_name, rs1=rs1, rd=rd, imm12=imm12, debug=debug)
elif funct3 in ['1', '5']:
if funct3 == '5':
slice_5 = str(get_int(instruction[:7]))
instruction_name = instruction_table[get_hex(family)][funct3][slice_5]
else:
instruction_name = instruction_table[get_hex(family)][funct3]
rd = instruction[-12:-7]
rs1 = instruction[-20:-15]
shamt = instruction[-25:-20]
return get_output(instr=instruction_name, rs1=rs1, rd=rd, shamt=shamt, debug=debug)
elif get_hex(family) == '0x0c':
funct3 = get_int(instruction[-15:-12])
slice_7 = get_int(instruction[:7])
instruction_name = instruction_table[get_hex(family)][funct3][slice_7]
rd = instruction[-12:-7]
rs1 = instruction[-20:-15]
rs2 = instruction[-25:-20]
return get_output(instr=instruction_name, rs1=rs1, rs2=rs2, rd=rd, debug=debug)
elif get_hex(family) == '0x06':
funct3 = get_int(instruction[-15:-12])
rs1 = instruction[-20:-15]
rd = instruction[-12:-7]
if funct3 == '0':
imm12 = instruction[:12]
instruction_name = instruction_table[get_hex(family)][funct3]
return get_output(instr=instruction_name, rs1=rs1, rd=rd, imm12=imm12, debug=debug)
elif funct3 == '1':
shamtw = instruction[-25:-20]
instruction_name = instruction_table[get_hex(family)][funct3]
return get_output(instr=instruction_name, rs1=rs1, rd=rd, shamtw=shamtw, debug=debug)
else:
shamtw = instruction[-25:-20]
slice_6 = get_int(instruction[:6])
instruction_name = instruction_table[get_hex(family)][funct3][slice_6]
return get_output(instr=instruction_name, rs1=rs1, rd=rd, shamtw=shamtw, debug=debug)
elif get_hex(family) == '0x0e':
funct3 = get_int(instruction[-15:-12])
slice_7 = get_int(instruction[:7])
instruction_name = instruction_table[get_hex(family)][funct3][slice_7]
rd = instruction[-12:-7]
rs1 = instruction[-20:-15]
rs2 = instruction[-25:-20]
return get_output(instr=instruction_name, rs1=rs1, rs2=rs2, rd=rd, debug=debug)
elif get_hex(family) == '0x00':
funct3 = get_int(instruction[-15:-12])
instruction_name = instruction_table[get_hex(family)][funct3]
rd = instruction[-12:-7]
rs1 = instruction[-25:-20]
imm12 = instruction[:12]
return get_output(instr=instruction_name, rs1=rs1, imm12=imm12, rd=rd, debug=debug)
elif get_hex(family) == '0x08':
funct3 = get_int(instruction[-15:-12])
instruction_name = instruction_table[get_hex(family)][funct3]
rs1 = instruction[-20:-15]
rs2 = instruction[-25:-20]
imm12lo = instruction[6] + instruction[-12:-7]
imm12hi = instruction[:6]
return get_output(instr=instruction_name, rs1=rs1, rs2=rs2, imm12lo=imm12lo, imm12hi=imm12hi, debug=debug)
elif get_hex(family) == '0x03':
funct3 = get_int(instruction[-15:-12])
instruction_name = instruction_table[get_hex(family)][funct3]
rs1 = instruction[-20:-15]
rd = instruction[-12:-7]
if funct3 == '0':
return get_output(instr=instruction_name, rs1=rs1, rd=rd, debug=debug)
else:
imm12 = instruction[:12]
return get_output(instr=instruction_name, rs1=rs1, rd=rd, imm12=imm12, debug=debug)
elif get_hex(family) == '0x0c' or get_hex(family) == '0x0e':
funct3 = get_int(instruction[-15:-12])
slice_7 = get_int(instruction[:7])
instruction_name = instruction_table[get_hex(family)][funct3][slice_7]
rs1 = instruction[-20:-15]
rs2 = instruction[-25:-20]
rd = instruction[-12:-7]
return get_output(instr=instruction_name, rs1=rs1, rd=rd, rs2=rs2, debug=debug)
elif get_hex(family) == '0x0b':
funct3 = get_int(instruction[-15:-12])
slice_3 = get_int(instruction[:3])
slice_2 = get_int(instruction[-29:-27])
instruction_name = instruction_table[get_hex(family)][funct3][slice_2][slice_3]
rs1 = instruction[-20:-15]
rd = instruction[-12:-7]
if slice_2 != '2':
rs2 = instruction[-25:-20]
return get_output(instr=instruction_name, rs1=rs1, rd=rd, rs2=rs2, debug=debug)
else:
return get_output(instr=instruction_name, rs1=rs1, rd=rd, debug=debug)
elif get_hex(family) == '0x14':
slice_5 = get_int(instruction[:5])
slice_2 = get_int(instruction[-27:-25])
rs2 = instruction[-25:-20]
rs1 = instruction[-20:-15]
rd = instruction[-12:-7]
if slice_5 in ['4', '5', '20', '30']:
funct3 = get_int(instruction[-15:-12])
instruction_name = instruction_table[get_hex(family)][slice_5][slice_2][funct3]
if slice_5 == '30':
return get_output(instr=instruction_name, rs1=rs1, rd=rd, debug=debug)
else:
return get_output(instr=instruction_name, rs1=rs1, rs2=rs2, rd=rd, debug=debug)
elif slice_5 == '8':
instruction_name = instruction_table[get_hex(family)][slice_5][get_int(rs2)]
rm = instruction[-15:-12]
return get_output(instr=instruction_name, rs1=rs1, rd=rd, rm=rm, debug=debug)
elif slice_5 == '24' or slice_5 == '26':
instruction_name = instruction_table[get_hex(family)][slice_5][slice_2][get_int(rs2)]
rm = instruction[-15:-12]
return get_output(instr=instruction_name, rs1=rs1, rd=rd, rm=rm, debug=debug)
elif slice_5 == '28':
funct3 = get_int(instruction[-15:-12])
instruction_name = instruction_table[get_hex(family)][slice_5][slice_2][get_int(rs2)][funct3]
return get_output(instr=instruction_name, rs1=rs1, rd=rd, debug=debug)
else:
instruction_name = instruction_table[get_hex(family)][slice_5][slice_2]
rm = instruction[-15:-12]
return get_output(instr=instruction_name, rs1=rs1, rs2=rs2, rd=rd, rm=rm, debug=debug)
elif get_hex(family) == '0x01':
funct3 = get_int(instruction[-15:-12])
instruction_name = instruction_table[get_hex(family)][funct3]
rs1 = instruction[-20:-15]
rd = instruction[-12:-7]
imm12 = instruction[:12]
return get_output(instr=instruction_name, rd=rd, imm12=imm12, rs1=rs1, debug=debug)
elif get_hex(family) == '0x09':
funct3 = get_int(instruction[-15:-12])
instruction_name = instruction_table[get_hex(family)][funct3]
rs1 = instruction[-20:-15]
rs2 = instruction[-25:-20]
imm12lo = instruction[6] + instruction[-12:-7]
imm12hi = instruction[:6]
return get_output(instr=instruction_name, rs1=rs1, imm12lo=imm12lo, imm12hi=imm12hi, rs2=rs2, debug=debug)
elif get_hex(family) in ['0x10', '0x11', '0x12', '0x13']:
slice_2 = get_int(instruction[-27:-25])
instruction_name = instruction_table[get_hex(family)][slice_2]
rs1 = instruction[-20:-15]
rs2 = instruction[-25:-20]
rs3 = instruction[:5]
rm = instruction[-15:-12]
return get_output(instr=instruction_name, rs1=rs1, rs2=rs2, rm=rm, debug=debug)
elif get_hex(family) == '0x1c':
funct3 = get_int(instruction[-15:-12])
if funct3 == '0':
slice_12 = get_int(instruction[:12])
if slice_12 == '260':
instruction_name = instruction_table[get_hex(family)][funct3][slice_12]
rs1 = instruction[-20:-15]
return get_output(instr=instruction_name, rs1=rs1, debug=debug)
else:
instruction_name = instruction_table[get_hex(family)][funct3][slice_12]
return get_output(instr=instruction_name, debug=debug)
else:
instruction_name = instruction_table[get_hex(family)][funct3]
rs1 = instruction[-20:-15]
rd = instruction[-12:-7]
imm12 = instruction[:12]
return get_output(instr=instruction_name, rd=rd, imm12=imm12, rs1=rs1, debug=debug)
else:
print("Instruction does not match any known instruction")
print("Family :" + family)
| 37.359882
| 120
| 0.627161
|
3d37f5d9ffe3695cd873520f94734ecba012f7ac
| 6,636
|
py
|
Python
|
BackLogOperacao.py
|
DanielRoberto72/Automacao_Backlog
|
a15046b81c083330e81feee83ac3bacf9cfc7e9c
|
[
"MIT"
] | null | null | null |
BackLogOperacao.py
|
DanielRoberto72/Automacao_Backlog
|
a15046b81c083330e81feee83ac3bacf9cfc7e9c
|
[
"MIT"
] | null | null | null |
BackLogOperacao.py
|
DanielRoberto72/Automacao_Backlog
|
a15046b81c083330e81feee83ac3bacf9cfc7e9c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import selenium, os, time, pandas as pd, csv, warnings, shutil, sys, lxml, re, itertools, openpyxl, glob, mysql.connector, smtplib
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from bs4 import BeautifulSoup as BS
from selenium.webdriver import ActionChains
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support import expected_conditions as EC
from datetime import datetime, timedelta
from bs4 import BeautifulSoup
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
import win32com.client
import os
import glob
warnings.filterwarnings("ignore")
log = ''
tempo = datetime.now() - timedelta()
timestamp = tempo.strftime('%Y-%m-%d')
timestamp_envio = tempo.strftime('%d-%m')
#-----------------------------------------------------------------------------------------------
#SETANDO INFORMACOES FIXAS
login = "seu.login"
senha = "suaSenha123"
dirRaiz = 'C:/Prod/Python/BacklogOperacao/'
diretorio = dirRaiz + 'arquivos/'
#-----------------------------------------------------------------------------------------------
#INICIANDO O CHROMEDRIVER
chrome_options = webdriver.ChromeOptions()
chromedriver = dirRaiz+"Driver/chromedriver.exe"
prefs = {"download.default_directory": r"C:\Prod\Python\BacklogOperacao\arquivos"}
chrome_options.add_experimental_option('prefs', prefs)
chrome_options.add_argument('ignore-certificate-errors')
driver = webdriver.Chrome(chrome_options=chrome_options, executable_path=chromedriver)
#-----------------------------------------------------------------------------------------------
def wait_xpath_click(y):
WebDriverWait(driver, 200).until(EC.presence_of_element_located((By.XPATH, y))).click()
#-----------------------------------------------------------------------------------------------
#BAIXANDO RELATÓRIO DO OTRS
try:
driver.get("link.html")
driver.find_element_by_name('Login').send_keys(login)
driver.find_element_by_name ('Senha').send_keys(senha)
print('lOGADO!')
WebDriverWait(driver, 200).until(EC.presence_of_element_located((By.XPATH, '/html/body/div/div[1]/div[2]/form/button'))).click()
driver.get('link2.html')
print('Aguardando para fazer o download')
time.sleep(120)
WebDriverWait(driver, 200).until(EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/div[2]/div[2]/div[2]/div/div/div[1]/div/button[2]'))).click()
WebDriverWait(driver, 200).until(EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/div[2]/div[2]/div[2]/div/div/div[1]/div/ul/li/a'))).click()
print('Aguardando o download do relatório! (2 Minutos)')
time.sleep(120)
print('Download realizado')
driver.close()
except:
print('Falha ao executar o script, script finalizado!')
sys.exit()
#-----------------------------------------------------------------------------------------------
#FAZENDO MANIPULAÇÃO DA PLANILHA
o = win32com.client.Dispatch("Excel.Application")
o.Visible = False
dirRaiz = 'C:/Prod/Python/BacklogOperacao/'
diretorio = dirRaiz + 'arquivos/'
files = glob.glob(diretorio + "/*.xls")
filename= diretorio +'relatorio_servico_realtime'
file = os.path.basename(filename)
output = diretorio + '/' + file.replace('.xls','.xlsx')
name='relatorio_servico_realtime'
wb = o.Workbooks.Open(filename)
wb.ActiveSheet.SaveAs(name,51)
wb.Close(True)
#Deletando arquivo xls
os.remove(diretorio+'relatorio_servico_realtime.xls')
#-----------------------------------------------------------------------------------------------
#FAZENDO BATIMENTOS DOS CHAMADOS POR FILA
dirRaiz = 'C:/Prod/Python/BacklogOperacao/'
diretorio = dirRaiz + 'arquivos/'
df = pd.read_excel(diretorio+'relatorio_servico_realtime.xlsx').astype(str)
try:
lista1 = ['NIVEL 2::','NIVEL 2::','NIVEL 2::']
Mvno = ['MVNO']
lista2 = ['NIVEL 2::','NIVEL 2::']
lista3 = ['NIVEL 3::', 'NIVEL 2::']
lista4 = ['NIVEL 2::']
lista5 = ['NIVEL 3::']
df1 = df[df['FILA'].isin(lista1)].reset_index(drop=True)
dfMVNO = df1[df1['MVNO'].isin(Mvno)].reset_index(drop=True)
df2 = df1[~df1['MVNO'].isin(Mvno)].reset_index(drop=True)
df3 = df[df['FILA'].isin(lista2)].reset_index(drop=True)
df4 = df[df['FILA'].isin(lista3)].reset_index(drop=True)
df5 = df[df['FILA'].isin(lista4)].reset_index(drop=True)
df6 = df[df['FILA'].isin(lista5)].reset_index(drop=True)
except:
print('FALHA!!!!')
the_type, the_value, the_traceback = sys.exc_info()
erro = 'Falha nos batimentos das filas'
print(the_type, ',' ,the_value,',', the_traceback)
#-----------------------------------------------------------------------------------------------
#CONTAGEM E TRANSFORMAÇÃO PARA STRING DOS CHAMADOS POR FILA
n1 = int(len(dfMVNO))
n2 = int(len(df3))
n3 = int(len(df2))
n4 = int(len(df4))
n5 = int(len(df5))
n6 = int(len(df6))
n1Str = str(n1)
n2Str = str(n2)
n3Str = str(n3)
n4Str = str(n4)
n5Str = str(n5)
n6Str = str(n6)
print(n1Str)
print(n2Str)
print(n3Str)
print(n4Str)
print(n5Str)
print(n6Str)
#-----------------------------------------------------------------------------------------------
#ENVIO DO EMAIL PARA OS DESTINATÁRIOS FIXOS
tempo = datetime.now() - timedelta()
timestamp_envio = tempo.strftime('%d-%m')
try:
email = 'emaildeenvio@email.com.br'
password = 'suasenha'
send_to_email = ['lista@email','lista@email']
subject = 'BACKLOG OPERAÇÃO '+timestamp_envio
message ='''
Bom dia!
Segue análise das filas
Fila 1: '''+n1Str+'''
Fila 2: '''+n2Str+'''
Fila 3: '''+n3Str+'''
Fila 4: '''+n4Str+'''
Fila 5: '''+n5Str+'''
Fila 6: '''+n6Str+'''
Atenciosamente. '''
msg = MIMEMultipart()
msg['From'] = email
msg['To'] = ", ".join(send_to_email)
msg['Subject'] = subject
msg.attach(MIMEText(message, 'plain'))
server = smtplib.SMTP('SMTP.office365.com',587)
server.starttls()
server.login(email, password)
text = msg.as_string()
server.sendmail(email, send_to_email, text)
server.quit()
print('Email enviado COM SUCESSO PARA OS DESTINATÁRIOS!')
except:
print('Falha ao enviar o Email!')
the_type, the_value, the_traceback = sys.exc_info()
print(the_type, ',' ,the_value,',', the_traceback)
pass
try:
os.remove(diretorio+'relatorio_servico_realtime.xlsx')
print('Processo de remover planilha finalizado!!!')
except:
print('falha ao remover arquivo ou arquivo já foi removido')
| 34.38342
| 159
| 0.627034
|
f50e17944809335dd1237c910a89582ccba3c346
| 1,388
|
py
|
Python
|
setup.py
|
slafs/honcho
|
40c308fba078d32f47aa1e6aa55427c32df32278
|
[
"MIT"
] | null | null | null |
setup.py
|
slafs/honcho
|
40c308fba078d32f47aa1e6aa55427c32df32278
|
[
"MIT"
] | null | null | null |
setup.py
|
slafs/honcho
|
40c308fba078d32f47aa1e6aa55427c32df32278
|
[
"MIT"
] | 1
|
2020-11-21T19:05:42.000Z
|
2020-11-21T19:05:42.000Z
|
import os
import sys
from setuptools import setup, find_packages
from honcho import __version__
requirements = []
export_requirements = []
if sys.version_info[:2] < (2, 7):
requirements.append('argparse')
requirements.append('ordereddict')
if (3, 0) <= sys.version_info[:2] < (3, 3):
export_requirements = ['jinja2>=2.6,<2.7']
else:
export_requirements = ['jinja2>=2.7,<2.8']
HERE = os.path.dirname(__file__)
try:
long_description = open(os.path.join(HERE, 'README.rst')).read()
except:
long_description = None
setup(
name='honcho',
version=__version__,
packages=find_packages(exclude=['honcho.test*']),
include_package_data=True,
# metadata for upload to PyPI
author='Nick Stenning',
author_email='nick@whiteink.com',
url='https://github.com/nickstenning/honcho',
description='Honcho: a python clone of Foreman. For managing Procfile-based applications.',
long_description=long_description,
license='MIT',
keywords='sysadmin process procfile',
install_requires=requirements,
extras_require={
'export': export_requirements,
},
entry_points={
'console_scripts': [
'honcho=honcho.command:main'
],
'honcho_exporters': [
'upstart=honcho.export.upstart:Export',
'supervisord=honcho.export.supervisord:Export',
],
}
)
| 25.703704
| 95
| 0.664986
|
7c83c698d44e2794aa1f7bbfe1deaa4c6a12b75e
| 2,357
|
py
|
Python
|
train_dqn.py
|
wbaik/2048-pytorch-DQN
|
eb27575b30e94e496393f5999d1f567c0d6b40bf
|
[
"MIT"
] | 4
|
2018-12-10T11:31:06.000Z
|
2021-11-08T20:37:23.000Z
|
train_dqn.py
|
wbaik/2048-pytorch-DQN
|
eb27575b30e94e496393f5999d1f567c0d6b40bf
|
[
"MIT"
] | null | null | null |
train_dqn.py
|
wbaik/2048-pytorch-DQN
|
eb27575b30e94e496393f5999d1f567c0d6b40bf
|
[
"MIT"
] | 3
|
2018-12-17T05:11:28.000Z
|
2021-08-02T13:53:57.000Z
|
import argparse
import gym
import gym_2048
import pickle
from play_2048 import Play2048
import torch
import torch.optim as optim
from utils.dqn import DQN
from utils import device, ReplayMemory
parser = argparse.ArgumentParser(description='Hyper-parameters for the DQN training')
parser.add_argument('--epsilon', default=1.0, type=float)
parser.add_argument('--min_epsilon', default=0.05, type=float)
parser.add_argument('--eps_decay_rate', default=1e-6, type=float)
parser.add_argument('--update_every', default=100, type=int)
parser.add_argument('--n_train', default=1000, type=int)
parser.add_argument('--batch_size', default=1024, type=int)
parser.add_argument('--gamma', default=0.999, type=float)
parser.add_argument('--replay_memory_length', default=3000000, type=int)
parser.add_argument('--learning_rate', default=1e-5, type=float)
parser.add_argument('--mode', default='train', type=str, choices=['train', 'test'])
parser.add_argument('--replay_memory', default='replay_memory.p', type=str)
args = parser.parse_args()
if __name__ == '__main__':
policy, target = DQN(4).to(device), DQN(4).to(device)
try:
policy.load_state_dict(torch.load('my_policy.pt'))
target.load_state_dict(torch.load('my_target.pt'))
except FileNotFoundError:
print('--- Exception Raised: Files not found...')
try:
rm = pickle.load(open(args.replay_memory, 'rb'))
except FileNotFoundError:
rm = ReplayMemory(args.replay_memory_length)
optimizer = optim.RMSprop(policy.parameters(), eps=args.learning_rate)
env = gym.make('game-2048-v0')
player = Play2048(env, rm, policy, target, optimizer,
args.batch_size, args.epsilon,
args.eps_decay_rate, args.min_epsilon,
args.n_train, args.update_every, args.gamma)
try:
player.play_2048(args.mode)
except KeyboardInterrupt:
print('\nKeyboard Interrupt!!!')
finally:
try:
if args.mode == 'train':
print('Saving...')
torch.save(policy.state_dict(), 'my_policy.pt')
torch.save(target.state_dict(), 'my_target.pt')
except Exception as e:
print('Error :{}'.format(e))
| 36.261538
| 99
| 0.645312
|
ce03ef1d670c70e01e2c183ee9fff1fd1b1e4de8
| 2,569
|
py
|
Python
|
shop/migrations/0009_auto_20170325_1625.py
|
IlyaDjurin/django-shop
|
0b13a5975ff3e9a601ff13a07bfd3e3beebf28e3
|
[
"MIT"
] | null | null | null |
shop/migrations/0009_auto_20170325_1625.py
|
IlyaDjurin/django-shop
|
0b13a5975ff3e9a601ff13a07bfd3e3beebf28e3
|
[
"MIT"
] | null | null | null |
shop/migrations/0009_auto_20170325_1625.py
|
IlyaDjurin/django-shop
|
0b13a5975ff3e9a601ff13a07bfd3e3beebf28e3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-25 13:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0008_tovar_inphoto_tovarinphoto_sim'),
]
operations = [
migrations.AddField(
model_name='tovar_img',
name='tovar_image1',
field=models.ImageField(blank=True, upload_to='products/%Y/%m/%d/', verbose_name='Изображение товара1'),
),
migrations.AddField(
model_name='tovar_img',
name='tovar_image10',
field=models.ImageField(blank=True, upload_to='products/%Y/%m/%d/', verbose_name='Изображение товара10'),
),
migrations.AddField(
model_name='tovar_img',
name='tovar_image2',
field=models.ImageField(blank=True, upload_to='products/%Y/%m/%d/', verbose_name='Изображение товара2'),
),
migrations.AddField(
model_name='tovar_img',
name='tovar_image3',
field=models.ImageField(blank=True, upload_to='products/%Y/%m/%d/', verbose_name='Изображение товара3'),
),
migrations.AddField(
model_name='tovar_img',
name='tovar_image4',
field=models.ImageField(blank=True, upload_to='products/%Y/%m/%d/', verbose_name='Изображение товара4'),
),
migrations.AddField(
model_name='tovar_img',
name='tovar_image5',
field=models.ImageField(blank=True, upload_to='products/%Y/%m/%d/', verbose_name='Изображение товара5'),
),
migrations.AddField(
model_name='tovar_img',
name='tovar_image6',
field=models.ImageField(blank=True, upload_to='products/%Y/%m/%d/', verbose_name='Изображение товара6'),
),
migrations.AddField(
model_name='tovar_img',
name='tovar_image7',
field=models.ImageField(blank=True, upload_to='products/%Y/%m/%d/', verbose_name='Изображение товара7'),
),
migrations.AddField(
model_name='tovar_img',
name='tovar_image8',
field=models.ImageField(blank=True, upload_to='products/%Y/%m/%d/', verbose_name='Изображение товара8'),
),
migrations.AddField(
model_name='tovar_img',
name='tovar_image9',
field=models.ImageField(blank=True, upload_to='products/%Y/%m/%d/', verbose_name='Изображение товара9'),
),
]
| 38.924242
| 117
| 0.599844
|
1ea3af779701bd128f2fce3091bf1890df421117
| 3,833
|
py
|
Python
|
flexget/components/sites/sites/nyaa.py
|
metaMMA/Flexget
|
a38986422461d7935ead1e2b4ed4c88bcd0a90f5
|
[
"MIT"
] | null | null | null |
flexget/components/sites/sites/nyaa.py
|
metaMMA/Flexget
|
a38986422461d7935ead1e2b4ed4c88bcd0a90f5
|
[
"MIT"
] | 1
|
2017-10-09T23:06:44.000Z
|
2017-10-09T23:06:44.000Z
|
flexget/components/sites/sites/nyaa.py
|
metaMMA/Flexget
|
a38986422461d7935ead1e2b4ed4c88bcd0a90f5
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import quote
import logging
import feedparser
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.components.sites.utils import normalize_unicode, torrent_availability
from flexget.utils.tools import parse_filesize
log = logging.getLogger('nyaa')
CATEGORIES = {
'all': '0_0',
# Anime
'anime': '1_0',
'anime amv': '1_1',
'anime eng': '1_2',
'anime non-eng': '1_3',
'anime raw': '1_4',
# Audio
'audio': '2_0',
'audio lless': '2_1',
'audio lossy': '2_2',
# Literature
'lit': '3_0',
'lit eng': '3_1',
'lit non-eng': '3_2',
'lit raw': '3_3',
# Live Action
'liveact': '4_0',
'liveact eng': '4_1',
'liveact idol': '4_2',
'liveact non-eng': '4_3',
'liveact raw': '4_4',
# Pictures
'pics': '5_0',
'pics graphics': '5_1',
'pics photos': '5_2',
# Software
'software': '6_0',
'software apps': '6_1',
'software games': '6_2',
}
FILTERS = ['all', 'filter remakes', 'trusted only']
class UrlRewriteNyaa(object):
"""Nyaa urlrewriter and search plugin."""
schema = {
'oneOf': [
{'type': 'string', 'enum': list(CATEGORIES)},
{
'type': 'object',
'properties': {
'category': {'type': 'string', 'enum': list(CATEGORIES)},
'filter': {'type': 'string', 'enum': list(FILTERS)},
},
'additionalProperties': False,
},
]
}
def search(self, task, entry, config):
if not isinstance(config, dict):
config = {'category': config}
config.setdefault('category', 'anime eng')
config.setdefault('filter', 'all')
entries = set()
for search_string in entry.get('search_strings', [entry['title']]):
name = normalize_unicode(search_string)
url = 'https://www.nyaa.si/?page=rss&q=%s&c=%s&f=%s' % (
quote(name.encode('utf-8')),
CATEGORIES[config['category']],
FILTERS.index(config['filter']),
)
log.debug('requesting: %s' % url)
rss = feedparser.parse(url)
status = rss.get('status', False)
if status != 200:
log.debug('Search result not 200 (OK), received %s' % status)
if status >= 400:
continue
ex = rss.get('bozo_exception', False)
if ex:
log.error('Got bozo_exception (bad feed) on %s' % url)
continue
for item in rss.entries:
entry = Entry()
entry['title'] = item.title
entry['url'] = item.link
entry['torrent_seeds'] = int(item.nyaa_seeders)
entry['torrent_leeches'] = int(item.nyaa_leechers)
entry['torrent_info_hash'] = item.nyaa_infohash
entry['torrent_availability'] = torrent_availability(
entry['torrent_seeds'], entry['torrent_leeches']
)
if item.nyaa_size:
entry['content_size'] = parse_filesize(item.nyaa_size)
entries.add(entry)
return entries
def url_rewritable(self, task, entry):
return entry['url'].startswith('https://www.nyaa.si/view/')
def url_rewrite(self, task, entry):
entry['url'] = entry['url'].replace('view', 'download') + ".torrent"
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteNyaa, 'nyaa', interfaces=['search', 'urlrewriter'], api_ver=2)
| 30.91129
| 92
| 0.549178
|
6d30b378219db158b440b71dfd80080bd3335090
| 5,519
|
py
|
Python
|
tests/h/models/annotation_test.py
|
julien-cheng/h
|
36c8ec044725720cf36f0986cdf025395aca8929
|
[
"BSD-2-Clause"
] | 2
|
2019-08-04T07:22:11.000Z
|
2020-07-17T05:01:41.000Z
|
tests/h/models/annotation_test.py
|
fuelpress/i.fuel.press
|
af7b25895d813af0fef656dcf483afe852a99d76
|
[
"BSD-2-Clause"
] | null | null | null |
tests/h/models/annotation_test.py
|
fuelpress/i.fuel.press
|
af7b25895d813af0fef656dcf483afe852a99d76
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from h.models.annotation import Annotation
def test_parent_id_of_direct_reply():
ann = Annotation(references=["parent_id"])
assert ann.parent_id == "parent_id"
def test_parent_id_of_reply_to_reply():
ann = Annotation(references=["reply1", "reply2", "parent_id"])
assert ann.parent_id == "parent_id"
def test_reply_is_reply():
ann = Annotation(references=["parent_id"])
assert ann.is_reply is True
def test_non_reply_is_not_reply():
ann = Annotation()
assert ann.is_reply is False
def test_parent_id_of_annotation():
ann = Annotation()
assert ann.parent_id is None
def test_thread_root_id_returns_id_if_no_references():
annotation = Annotation(id="GBhy1DoHEea6htPothzqZQ")
assert annotation.thread_root_id == "GBhy1DoHEea6htPothzqZQ"
def test_thread_root_id_returns_id_if_references_empty():
annotation = Annotation(id="jANlljoHEea6hsv8FY7ipw", references=[])
assert annotation.thread_root_id == "jANlljoHEea6hsv8FY7ipw"
def test_thread_root_id_returns_reference_if_only_one_reference():
annotation = Annotation(
id="qvJnIjoHEea6hiv0nJK7gw", references=["yiSVIDoHEea6hjcSFuROLw"]
)
assert annotation.thread_root_id == "yiSVIDoHEea6hjcSFuROLw"
def test_thread_root_id_returns_first_reference_if_many_references():
annotation = Annotation(
id="uK9yVjoHEea6hsewWuiKtQ",
references=[
"1Ife3DoHEea6hpv8vWujdQ",
"uVuItjoHEea6hiNgv1wvmg",
"Qe7fpc5ZRgWy0RSHEP9UNg",
],
)
assert annotation.thread_root_id == "1Ife3DoHEea6hpv8vWujdQ"
def test_text_setter_renders_markdown(markdown):
markdown.render.return_value = "<p>foobar</p>"
annotation = Annotation()
annotation.text = "foobar"
markdown.render.assert_called_once_with("foobar")
annotation.text_rendered == markdown.render.return_value
@pytest.mark.parametrize(
"userid,authority",
[
("acct:bmason@hypothes.is", "hypothes.is"),
("acct:kaylawatson@elifesciences.org", "elifesciences.org"),
],
)
def test_authority(factories, userid, authority):
assert factories.Annotation(userid=userid).authority == authority
def test_authority_when_annotation_has_no_userid():
assert Annotation().authority is None
def test_setting_extras_inline_is_persisted(db_session, factories):
"""
In-place changes to Annotation.extra should be persisted.
Setting an Annotation.extra value in-place:
my_annotation.extra['foo'] = 'bar'
should be persisted to the database.
"""
annotation = factories.Annotation(userid="fred")
annotation.extra["foo"] = "bar"
# We need to commit the db session here so that the in-place change to
# annotation.extra above would be lost if annotation.extra was a normal
# dict. Without this commit() this test would never fail.
db_session.commit()
annotation = db_session.query(Annotation).get(annotation.id)
assert annotation.extra == {"foo": "bar"}
def test_deleting_extras_inline_is_persisted(db_session, factories):
"""
In-place changes to Annotation.extra should be persisted.
Deleting an Annotation.extra value in-place should be persisted to the
database.
"""
annotation = factories.Annotation(userid="fred", extra={"foo": "bar"})
del annotation.extra["foo"]
db_session.commit()
annotation = db_session.query(Annotation).get(annotation.id)
assert "foo" not in annotation.extra
def test_appending_tags_inline_is_persisted(db_session, factories):
"""
In-place changes to Annotation.tags should be persisted.
Changes made by Annotation.tags.append() should be persisted to the
database.
"""
annotation = factories.Annotation(userid="fred", tags=["foo"])
annotation.tags.append("bar")
db_session.commit()
annotation = db_session.query(Annotation).get(annotation.id)
assert "bar" in annotation.tags
def test_deleting_tags_inline_is_persisted(db_session, factories):
"""In-place deletions of annotation tags should be persisted."""
annotation = factories.Annotation(userid="fred", tags=["foo"])
del annotation.tags[0]
db_session.commit()
annotation = db_session.query(Annotation).get(annotation.id)
assert "foo" not in annotation.tags
class TestThread(object):
def test_empty_thread(self, root):
assert root.thread == []
def test_empty_thread_ids(self, root):
assert root.thread_ids == []
def test_thread_with_replies(self, root, reply, subreply):
assert set(root.thread) == set([reply, subreply])
def test_thread_ids_with_replies(self, root, reply, subreply):
assert set(root.thread_ids) == set([reply.id, subreply.id])
@pytest.mark.usefixtures("subreply")
def test_reply_has_no_thread(self, reply):
assert reply.thread == []
@pytest.mark.usefixtures("subreply")
def test_reply_has_no_thread_ids(self, reply):
assert reply.thread_ids == []
@pytest.fixture
def root(self, factories):
return factories.Annotation()
@pytest.fixture
def reply(self, factories, root):
return factories.Annotation(references=[root.id])
@pytest.fixture
def subreply(self, factories, root, reply):
return factories.Annotation(references=[root.id, reply.id])
@pytest.fixture
def markdown(patch):
return patch("h.models.annotation.markdown")
| 26.791262
| 75
| 0.714803
|
d920fe736ba829c886cf351a7284c7cc4ca4319f
| 6,230
|
py
|
Python
|
solfasol/shop/migrations/0001_initial.py
|
rekognize/solfasol
|
c960c3364c753d75161242eccac4f085d800c843
|
[
"MIT"
] | null | null | null |
solfasol/shop/migrations/0001_initial.py
|
rekognize/solfasol
|
c960c3364c753d75161242eccac4f085d800c843
|
[
"MIT"
] | 1
|
2020-06-18T13:08:47.000Z
|
2020-06-18T13:08:47.000Z
|
solfasol/shop/migrations/0001_initial.py
|
Solfasol/solfasol
|
c960c3364c753d75161242eccac4f085d800c843
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.7 on 2020-07-03 12:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('sessions', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'verbose_name': 'cart',
'verbose_name_plural': 'carts',
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=50, null=True, verbose_name='name')),
('slug', models.SlugField(editable=False, unique=True)),
('order', models.PositiveSmallIntegerField(blank=True, default=0, null=True, verbose_name='order')),
],
options={
'verbose_name': 'category',
'verbose_name_plural': 'categories',
'ordering': ('order',),
},
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='name')),
('slug', models.SlugField(editable=False, unique=True)),
('description', models.CharField(max_length=250, verbose_name='description')),
('price', models.PositiveIntegerField()),
('image', models.ImageField(upload_to='shop/', verbose_name='image')),
('available', models.BooleanField(default=True, verbose_name='available')),
('promoted', models.BooleanField(default=False, verbose_name='promoted')),
('added', models.DateTimeField(auto_now_add=True, verbose_name='date')),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='shop.Category', verbose_name='category')),
],
options={
'verbose_name': 'item',
'verbose_name_plural': 'items',
'ordering': ('-added',),
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='name')),
],
options={
'verbose_name': 'tag',
'verbose_name_plural': 'tags',
},
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Full name')),
('email', models.EmailField(max_length=254, verbose_name='Email')),
('gsm_number', models.CharField(blank=True, max_length=20, null=True, verbose_name='Phone number')),
('identity_number', models.CharField(max_length=11, verbose_name='Identity number')),
('address', models.CharField(max_length=200, verbose_name='Address')),
('city', models.CharField(max_length=50, verbose_name='City')),
('country', models.CharField(max_length=50, verbose_name='Country')),
('zipcode', models.CharField(max_length=6, verbose_name='Zip code')),
('notes', models.TextField(blank=True, null=True, verbose_name='notes')),
('time', models.DateTimeField(auto_now_add=True)),
('cart', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='shop.Cart', verbose_name='cart')),
],
),
migrations.CreateModel(
name='ItemAlternative',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=250, verbose_name='description')),
('price', models.DecimalField(blank=True, decimal_places=2, max_digits=6, null=True, verbose_name='price')),
('available', models.BooleanField(default=True, verbose_name='available')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.Item')),
],
options={
'verbose_name': 'alternative',
'verbose_name_plural': 'alternatives',
},
),
migrations.AddField(
model_name='item',
name='tags',
field=models.ManyToManyField(blank=True, to='shop.Tag', verbose_name='tags'),
),
migrations.CreateModel(
name='CartItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('added', models.DateTimeField(auto_now_add=True, verbose_name='date')),
('paid', models.BooleanField(default=False, verbose_name='paid')),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.Cart', verbose_name='cart')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.Item', verbose_name='item')),
],
),
migrations.AddField(
model_name='cart',
name='items',
field=models.ManyToManyField(through='shop.CartItem', to='shop.Item'),
),
migrations.AddField(
model_name='cart',
name='session',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sessions.Session', verbose_name='session'),
),
]
| 48.671875
| 161
| 0.568218
|
93df5efc30156d61b8f95a68cce06d27b6235fd1
| 1,254
|
py
|
Python
|
barracuda_waf/komand_barracuda_waf/actions/update_action_policy/action.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | 1
|
2020-03-18T09:14:55.000Z
|
2020-03-18T09:14:55.000Z
|
barracuda_waf/komand_barracuda_waf/actions/update_action_policy/action.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | 1
|
2021-02-23T23:57:37.000Z
|
2021-02-23T23:57:37.000Z
|
barracuda_waf/komand_barracuda_waf/actions/update_action_policy/action.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | null | null | null |
import komand
from .schema import UpdateActionPolicyInput, UpdateActionPolicyOutput
class UpdateActionPolicy(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='update_action_policy',
description='Updates the values of given parameters in the given action policy',
input=UpdateActionPolicyInput(),
output=UpdateActionPolicyOutput())
def run(self, params={}):
action = "security_policies"
policy_id = params.get("policy_id")
attack_group_id = params.get("attack_group_id")
action_id = params.get("action_id")
if not policy_id or not attack_group_id or not action_id:
self.connection.connector.raise_error("Policy ID, attack group ID and attack ID can't be empty")
action = action + "/" + policy_id + "/attack_groups/" + attack_group_id + "/actions/" + action_id
r = self.connection.connector.put(action, params.get("action_policy"))
if "error" in r and "status" in r["error"] and r["error"]["status"] == 400:
self.connection.connector.raise_error("Problem with update")
return {"msg": r["msg"]}
def test(self):
return {"msg": ""}
| 38
| 108
| 0.645136
|
b84ad72cc8e3b204f7b8362343869bc7aa190c13
| 16,547
|
py
|
Python
|
gseapy/gsea.py
|
ostrokach/gseapy
|
6d30ec425e59fcadad10a471e312b3f7f24bac21
|
[
"MIT"
] | 7
|
2017-08-13T16:44:41.000Z
|
2021-06-29T11:46:27.000Z
|
gseapy/gsea.py
|
ostrokach/gseapy
|
6d30ec425e59fcadad10a471e312b3f7f24bac21
|
[
"MIT"
] | null | null | null |
gseapy/gsea.py
|
ostrokach/gseapy
|
6d30ec425e59fcadad10a471e312b3f7f24bac21
|
[
"MIT"
] | 3
|
2019-01-03T23:32:17.000Z
|
2021-05-06T12:32:34.000Z
|
#! python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division
import os,sys,errno, logging
from .parser import gsea_edb_parser, gsea_rank_metric, gsea_gmt_parser, gsea_cls_parser
from .algorithm import enrichment_score, gsea_compute, preprocess, ranking_metric
from .plot import gsea_plot, heatmap
from collections import OrderedDict
from .__main__ import log_init
import pandas as pd
def replot(indir, outdir='gseapy_out', weight=1, figsize=[6.5,6], format='pdf', min_size=3, max_size=5000):
"""The main fuction to run inside python.
:param indir: GSEA desktop results directory. In the sub folder, you must contain edb file foder.
:param outdir: Output directory.
:param weight: weighted score type. choose from {0,1,1.5,2}. Default: 1.
:param figsize: matplotlib output figure figsize. Defult: [6.5,6].
:param format: matplotlib output figure format. Default: 'pdf'.
:param min_size: min size of input genes presented in Gene Sets. Default: 3.
:param max_size: max size of input genes presented in Gene Sets. Default: 5000.
you will not encourage to use min_size, or max_size argment in :func:`replot` function.
Because gmt file has already been filter.
:return: Generate new figures with seleted figure format. Default: 'pdf'.
"""
argument = locals()
try:
os.makedirs(outdir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise exc
pass
logger = log_init(outdir, module='replot')
#write command to log file
argument = OrderedDict(sorted(argument.items(), key=lambda t:t[0]))
logger.debug("Command: replot, "+str(argument))
import glob
from bs4 import BeautifulSoup
#parsing files.......
try:
results_path = glob.glob(indir+'*/edb/results.edb')[0]
rank_path = glob.glob(indir+'*/edb/*.rnk')[0]
gene_set_path = glob.glob(indir+'*/edb/gene_sets.gmt')[0]
except IndexError as e:
logger.debug(e)
logger.error("Could not locate GSEA files in the given directory!")
sys.exit(1)
#extract sample names from .cls file
cls_path = glob.glob(indir+'*/edb/*.cls')
if cls_path:
phenoPos, phenoNeg, classes = gsea_cls_parser(cls_path[0])
else:
# logic for prerank results
phenoPos, phenoNeg = '',''
#obtain gene sets
gene_set_dict = gsea_gmt_parser(gene_set_path, min_size=min_size, max_size=max_size)
#obtain rank_metrics
rank_metric = gsea_rank_metric(rank_path)
correl_vector = rank_metric['rank'].values
gene_list = rank_metric['gene_name']
#extract each enriment term in the results.edb files and plot.
database = BeautifulSoup(open(results_path), features='xml')
length = len(database.findAll('DTG'))
for idx in range(length):
#extract statistical resutls from results.edb file
enrich_term, hit_ind, nes, pval, fdr= gsea_edb_parser(results_path, index=idx)
gene_set = gene_set_dict.get(enrich_term)
#calculate enrichment score
RES = enrichment_score(gene_list=gene_list, gene_set=gene_set, weighted_score_type=weight,
correl_vector=correl_vector)[2]
#plotting
fig = gsea_plot(rank_metric, enrich_term,hit_ind, nes, pval,
fdr, RES, phenoPos, phenoNeg, figsize=figsize)
fig.savefig('{a}/.gsea.replot.{b}.{c}'.format(a=outdir, b=enrich_term, c=format), dpi=300,)
logger.info("Congratulations! Your plots have been reproduced successfully!")
if hasattr(sys, 'ps1'):
handlers = logger.handlers[:]
for handler in handlers:
handler.close()
logger.removeHandler(handler)
return
def call(data, gene_sets, cls, outdir='gseapy_out', min_size=15, max_size=500, permutation_n=1000,
weighted_score_type=1,permutation_type='gene_set', method='log2_ratio_of_classes',
ascending=False, figsize=[6.5,6], format='pdf', graph_num=20, seed=None):
""" Run Gene Set Enrichment Analysis.
:param data: Gene expression data table.
:param gene_sets: Gene sets file. e.g. gmt files. Same input with GSEA.
:param permutation_n: Number of permutations for significance computation. Default: 1000.
:param permutation_type: Permutation type, "phenotype" for phenotypes, "gene_set" for genes.
:param int min_size: Minimum allowed number of genes from gene set also the data set. Defaut: 15.
:param int max_size: Maximum allowed number of genes from gene set also the data set. Defaults: 500.
:param weighted_score_type: Refer to :func:`algorithm.enrichment_socre`. Default:1.
:param method: The method used to calculate a correlation or ranking. Default: 'log2_ratio_of_classes'.
Others methods are:
1. 'signal_to_noise'
You must have at least three samples for each phenotype to use this metric.
The larger the signal-to-noise ratio, the larger the differences of the means (scaled by the standard deviations);
that is, the more distinct the gene expression is in each phenotype and the more the gene acts as a “class marker.”
2. 't_test'
Uses the difference of means scaled by the standard deviation and number of samples.
Note: You must have at least three samples for each phenotype to use this metric.
The larger the tTest ratio, the more distinct the gene expression is in each phenotype
and the more the gene acts as a “class marker.”
3. 'ratio_of_classes' (also referred to as fold change).
Uses the ratio of class means to calculate fold change for natural scale data.
4. 'diff_of_classes'
Uses the difference of class means to calculate fold change for log scale data
5. 'log2_ratio_of_classes'
Uses the log2 ratio of class means to calculate fold change for natural scale data.
This is the recommended statistic for calculating fold change for natural scale data.
:param ascending: Sorting order of rankings. Default: False.
:param outdir: Results output directory.
:param figsize: Matplotlib figsize, accept a tuple or list, e.g. [width,height]. Default: [6.5,6].
:param format: Matplotlib figure format. Default: 'pdf'.
:param graph_num: Plot graphs for top sets of each phenotype
:param seed: Random seed. expect an interger. Defalut:None.
:return: Return a DataFrame when inside python console.
Generate ``GSEA`` plots and store a dictionary into csv file,
where dictionary key is a gene set and values are::
| {es: enrichment score,
| nes: normalized enrichment score,
| p: P-value,
| fdr: FDR,
| size: gene set size,
| matched_size: genes matched to the data,
| genes: gene names from the data set}
"""
argument = locals()
assert permutation_type in ["phenotype", "gene_set"]
assert min_size <= max_size
try:
os.makedirs(outdir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise exc
pass
logger = log_init(outdir, module='call')
if isinstance(data, pd.DataFrame) :
df = data.copy()
argument['data'] = 'DataFrame'
elif isinstance(data, str) :
df = pd.read_table(data)
else:
raise Exception('Error parsing gene expression dataframe!')
sys.exit(1)
assert len(df) > 1
#write command to log file
argument = OrderedDict(sorted(argument.items(), key=lambda t:t[0]))
logger.debug("Command: call, "+str(argument))
#Start Analysis
logger.info("Parsing data files for GSEA.............................")
#select correct expression genes and values.
dat = preprocess(df)
# phenotype labels parsing
phenoPos, phenoNeg, classes = gsea_cls_parser(cls)
#ranking metrics calculation.
dat2 = ranking_metric(df=dat, method=method, phenoPos=phenoPos, phenoNeg=phenoNeg, classes=classes, ascending=ascending)
#filtering out gene sets and build gene sets dictionary
gmt = gsea_gmt_parser(gene_sets, min_size=min_size, max_size=max_size,gene_list=dat2['gene_name'].values)
logger.info("%s gene_sets used for further statistical testing....."% len(gmt))
logger.info("Start to run GSEA...Might take a while..................")
#compute ES, NES, pval, FDR, RES
results,hit_ind,rank_ES, subsets = gsea_compute(data=dat, n=permutation_n,gmt=gmt, weighted_score_type=weighted_score_type,
permutation_type=permutation_type, method=method,
phenoPos=phenoPos, phenoNeg=phenoNeg, classes=classes, ascending=ascending,
seed=seed)
logger.info("Start to generate gseapy reports, and produce figures...")
res = OrderedDict()
for gs,gseale,ind,RES in zip(subsets, list(results), hit_ind, rank_ES):
rdict = OrderedDict()
rdict['es'] = gseale[0]
rdict['nes'] = gseale[1]
rdict['pval'] = gseale[2]
rdict['fdr'] = gseale[3]
rdict['gene_set_size'] = len(gmt[gs])
rdict['matched_size'] = len(ind)
rdict['rank_ES'] = RES
rdict['genes'] = dat2.ix[ind,'gene_name'].tolist()
rdict['hit_index'] = ind
res[gs] = rdict
res_df = pd.DataFrame.from_dict(res,orient='index')
res_df.index.name = 'Term'
res_df.sort_values(by='fdr', inplace=True)
res_df.drop(['rank_ES','hit_index'], axis=1, inplace=True)
res_df.to_csv('{a}/{b}.{c}.gsea.reports.csv'.format(a=outdir, b='gseapy', c=permutation_type), float_format ='%.7f')
#Plotting
top_term = res_df.head(graph_num).index
width = len(classes) if len(classes) >= 6 else 5
for gs in top_term:
hit = res.get(gs)['hit_index']
gene_symbol = res.get(gs)['genes']
fig = gsea_plot(rank_metric=dat2, enrich_term=gs, hit_ind=hit,
nes=res.get(gs)['nes'], pval=res.get(gs)['pval'], fdr=res.get(gs)['fdr'],
RES=res.get(gs)['rank_ES'], phenoPos=phenoPos, phenoNeg=phenoNeg, figsize=figsize)
gs = gs.replace('/','_')
fig.savefig('{a}/{b}.gsea.{c}'.format(a=outdir, b=gs, c=format), dpi=300,)
heatmap(df=dat.loc[gene_symbol], term=gs, outdir=outdir,
figsize=(width, len(gene_symbol)/2), format=format)
logger.info("Congratulations. GSEAPY run successfully................")
# return dataframe if run gsea inside python console
#if isinstance(data, pd.DataFrame) or isinstance(cls, list):
if hasattr(sys, 'ps1'):
handlers = logger.handlers[:]
for handler in handlers:
handler.close()
logger.removeHandler(handler)
return res_df
def prerank(rnk, gene_sets, outdir='gseapy_out', pheno_pos='Pos', pheno_neg='Neg',
min_size=15, max_size=500, permutation_n=1000, weighted_score_type=1,
ascending=False, figsize=[6.5,6], format='pdf', graph_num=20, seed=None):
""" Run Gene Set Enrichment Analysis with pre-ranked correlation defined by user.
:param rnk: pre-ranked correlation table, Same input with ``GSEA`` .rnk file.
:param gene_sets: Gene sets file. e.g. gmt files. Same input with GSEA.
:param outdir: results output directory.
:param permutation_n: Number of permutations for significance computation. Default: 1000.
:param int min_size: Minimum allowed number of genes from gene set also the data set. Defaut: 15.
:param int max_size: Maximum allowed number of genes from gene set also the data set. Defaults: 500.
:param weighted_score_type: Refer to :func:`algorithm.enrichment_socre`. Default:1.
:param ascending: Sorting order of rankings. Default: False.
:param figsize: Matplotlib figsize, accept a tuple or list, e.g. [width,height]. Default: [6.5,6].
:param format: Matplotlib figure format. Default: 'pdf'.
:param graph_num: Plot graphs for top sets of each phenotype
:param seed: Random seed. expect an interger. Defalut:None.
:return: Return a DataFrame when inside python console.
Generate ``GSEA`` plots and store a dictionary into csv file,
where dictionary key is a gene set and values are::
| {es: enrichment score,
| nes: normalized enrichment score,
| p: P-value,
| fdr: FDR,
| size: gene set size,
| matched_size: genes matched to the data,
| genes: gene names from the data set}
"""
argument = locals()
assert min_size <= max_size
try:
os.makedirs(outdir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise exc
pass
logger = log_init(outdir, module='prerank')
if isinstance(rnk, pd.DataFrame) :
argument['rnk'] = 'DataFrame'
#write command to log file
argument = OrderedDict(sorted(argument.items(), key=lambda t:t[0]))
logger.debug("Command: prerank, "+str(argument))
#Start Analysis
logger.info("Parsing data files for GSEA.............................")
dat2 = gsea_rank_metric(rnk)
assert len(dat2) > 1
#drop duplicates in ranking metrics.
dat2.drop_duplicates(subset='gene_name',inplace=True, keep='first')
#filtering out gene sets and build gene sets dictionary
gmt = gsea_gmt_parser(gene_sets, min_size=min_size, max_size=max_size, gene_list=dat2['gene_name'].values)
logger.info("%s gene_sets used for further statistical testing....."% len(gmt))
logger.info("Start to run GSEA...Might take a while..................")
#compute ES, NES, pval, FDR, RES
results,hit_ind,rank_ES, subsets = gsea_compute(data=dat2, n=permutation_n, gmt=gmt, weighted_score_type=weighted_score_type,
permutation_type='gene_set', method=None, phenoPos=pheno_pos, phenoNeg=pheno_neg,
classes=None, ascending=ascending, seed=seed, prerank=True)
logger.info("Start to generate gseapy reports, and produce figures...")
res = OrderedDict()
for gs,gseale,ind,RES in zip(subsets, list(results), hit_ind, rank_ES):
rdict = OrderedDict()
rdict['es'] = gseale[0]
rdict['nes'] = gseale[1]
rdict['pval'] = gseale[2]
rdict['fdr'] = gseale[3]
rdict['gene_set_size'] = len(gmt[gs])
rdict['matched_size'] = len(ind)
rdict['rank_ES'] = RES
rdict['genes'] = dat2.ix[ind,'gene_name'].tolist()
rdict['hit_index'] = ind
res[gs] = rdict
res_df = pd.DataFrame.from_dict(res, orient='index')
res_df.index.name = 'Term'
res_df.sort_values(by='fdr', inplace=True)
res_df.drop(['rank_ES','hit_index'], axis=1, inplace=True)
res_df.to_csv('{a}/{b}.prerank.reports.csv'.format(a=outdir, b='gseapy'), float_format ='%.7f')
#Plotting
top_term = res_df.head(graph_num).index
for gs in top_term:
fig = gsea_plot(rank_metric=dat2, enrich_term=gs, hit_ind=res.get(gs)['hit_index'],
nes=res.get(gs)['nes'], pval=res.get(gs)['pval'], fdr=res.get(gs)['fdr'],
RES=res.get(gs)['rank_ES'], phenoPos=pheno_pos, phenoNeg=pheno_neg, figsize=figsize)
gs = gs.replace('/','_')
fig.savefig('{a}/{b}.gsea.{c}'.format(a=outdir, b=gs, c=format), dpi=300,)
logger.info("Congratulations...GSEAPY run successfully...............")
# return dataframe if run gsea inside python console
#if isinstance(rnk, pd.DataFrame):
if hasattr(sys, 'ps1'):
handlers = logger.handlers[:]
for handler in handlers:
handler.close()
logger.removeHandler(handler)
return res_df
| 46.611268
| 138
| 0.62392
|
7ba7949125e8e17f56e523c768036d1c8d6a8bde
| 71
|
py
|
Python
|
tests/bytecode/mp-tests/try5.py
|
LabAixBidouille/micropython
|
11aa6ba456287d6c80598a7ebbebd2887ce8f5a2
|
[
"MIT"
] | 303
|
2015-07-11T17:12:55.000Z
|
2018-01-08T03:02:37.000Z
|
tests/bytecode/mp-tests/try5.py
|
LabAixBidouille/micropython
|
11aa6ba456287d6c80598a7ebbebd2887ce8f5a2
|
[
"MIT"
] | 13
|
2016-05-12T16:51:22.000Z
|
2018-01-10T22:33:25.000Z
|
tests/bytecode/mp-tests/try5.py
|
LabAixBidouille/micropython
|
11aa6ba456287d6c80598a7ebbebd2887ce8f5a2
|
[
"MIT"
] | 26
|
2018-01-18T09:15:33.000Z
|
2022-02-07T13:09:14.000Z
|
try:
f()
except A:
g()
except B as b:
h()
finally:
i()
| 7.888889
| 14
| 0.43662
|
1677b4e34cd3093aa6d564c0f4c083395c4d968e
| 2,787
|
py
|
Python
|
aiohttp/base_protocol.py
|
vananabu/aiohttp
|
44fe5c74d5d1d1043cfb57bb5dd04a9f2b5e228e
|
[
"Apache-2.0"
] | null | null | null |
aiohttp/base_protocol.py
|
vananabu/aiohttp
|
44fe5c74d5d1d1043cfb57bb5dd04a9f2b5e228e
|
[
"Apache-2.0"
] | null | null | null |
aiohttp/base_protocol.py
|
vananabu/aiohttp
|
44fe5c74d5d1d1043cfb57bb5dd04a9f2b5e228e
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
from typing import Optional, cast
from .log import internal_logger
class BaseProtocol(asyncio.Protocol):
__slots__ = ('_loop', '_paused', '_drain_waiter',
'_connection_lost', 'transport')
def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
self._loop = loop # type: asyncio.AbstractEventLoop
self._paused = False
self._drain_waiter = None # type: Optional[asyncio.Future[None]]
self._connection_lost = False
self._reading_paused = False
self.transport = None # type: Optional[asyncio.Transport]
def pause_writing(self) -> None:
assert not self._paused
self._paused = True
if self._loop.get_debug():
internal_logger.debug("%r pauses writing", self)
def resume_writing(self) -> None:
assert self._paused
self._paused = False
if self._loop.get_debug():
internal_logger.debug("%r resumes writing", self)
waiter = self._drain_waiter
if waiter is not None:
self._drain_waiter = None
if not waiter.done():
waiter.set_result(None)
def pause_reading(self) -> None:
if not self._reading_paused and self.transport is not None:
try:
self.transport.pause_reading()
except (AttributeError, NotImplementedError, RuntimeError):
pass
self._reading_paused = True
def resume_reading(self) -> None:
if self._reading_paused and self.transport is not None:
try:
self.transport.resume_reading()
except (AttributeError, NotImplementedError, RuntimeError):
pass
self._reading_paused = False
def connection_made(self, transport: asyncio.BaseTransport) -> None:
self.transport = cast(asyncio.Transport, transport)
def connection_lost(self, exc: Optional[BaseException]) -> None:
self._connection_lost = True
# Wake up the writer if currently paused.
self.transport = None
if not self._paused:
return
waiter = self._drain_waiter
if waiter is None:
return
self._drain_waiter = None
if waiter.done():
return
if exc is None:
waiter.set_result(None)
else:
waiter.set_exception(exc)
async def _drain_helper(self) -> None:
if self._connection_lost:
raise ConnectionResetError('Connection lost')
if not self._paused:
return
waiter = self._drain_waiter
assert waiter is None or waiter.cancelled()
waiter = self._loop.create_future()
self._drain_waiter = waiter
await waiter
| 33.178571
| 73
| 0.613922
|
42a5ec1bdfcf097b5f762110cf01fe2d3ac8f404
| 62,585
|
py
|
Python
|
src/twisted/conch/test/test_keys.py
|
apjanke/twisted
|
22f949f7ce187513f0c218b73186c8a73baa00b4
|
[
"Unlicense",
"MIT"
] | 1
|
2021-01-03T01:54:14.000Z
|
2021-01-03T01:54:14.000Z
|
src/twisted/conch/test/test_keys.py
|
zerospam/twisted
|
e23b5e2040a4d643bc6a43785621358569886a0d
|
[
"MIT",
"Unlicense"
] | null | null | null |
src/twisted/conch/test/test_keys.py
|
zerospam/twisted
|
e23b5e2040a4d643bc6a43785621358569886a0d
|
[
"MIT",
"Unlicense"
] | null | null | null |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.ssh.keys}.
"""
import base64
import os
from textwrap import dedent
from twisted.conch.test import keydata
from twisted.python import randbytes
from twisted.python.filepath import FilePath
from twisted.python.reflect import requireModule
from twisted.trial import unittest
cryptography = requireModule("cryptography")
if cryptography is None:
skipCryptography = "Cannot run without cryptography."
pyasn1 = requireModule("pyasn1")
if cryptography and pyasn1:
from cryptography.hazmat.backends import default_backend
from twisted.conch.ssh import keys, common, sexpy
ED25519_SUPPORTED = default_backend().ed25519_supported()
else:
ED25519_SUPPORTED = False
def skipWithoutEd25519(f):
if not ED25519_SUPPORTED:
f.skip = "ed25519 not supported on this system"
return f
class KeyTests(unittest.TestCase):
if cryptography is None:
skip = skipCryptography
if pyasn1 is None:
skip = "Cannot run without PyASN1"
def setUp(self):
self.rsaObj = keys.Key._fromRSAComponents(
n=keydata.RSAData["n"],
e=keydata.RSAData["e"],
d=keydata.RSAData["d"],
p=keydata.RSAData["p"],
q=keydata.RSAData["q"],
u=keydata.RSAData["u"],
)._keyObject
self.dsaObj = keys.Key._fromDSAComponents(
y=keydata.DSAData["y"],
p=keydata.DSAData["p"],
q=keydata.DSAData["q"],
g=keydata.DSAData["g"],
x=keydata.DSAData["x"],
)._keyObject
self.ecObj = keys.Key._fromECComponents(
x=keydata.ECDatanistp256["x"],
y=keydata.ECDatanistp256["y"],
privateValue=keydata.ECDatanistp256["privateValue"],
curve=keydata.ECDatanistp256["curve"],
)._keyObject
self.ecObj384 = keys.Key._fromECComponents(
x=keydata.ECDatanistp384["x"],
y=keydata.ECDatanistp384["y"],
privateValue=keydata.ECDatanistp384["privateValue"],
curve=keydata.ECDatanistp384["curve"],
)._keyObject
self.ecObj521 = keys.Key._fromECComponents(
x=keydata.ECDatanistp521["x"],
y=keydata.ECDatanistp521["y"],
privateValue=keydata.ECDatanistp521["privateValue"],
curve=keydata.ECDatanistp521["curve"],
)._keyObject
if ED25519_SUPPORTED:
self.ed25519Obj = keys.Key._fromEd25519Components(
a=keydata.Ed25519Data["a"], k=keydata.Ed25519Data["k"]
)._keyObject
self.rsaSignature = (
b"\x00\x00\x00\x07ssh-rsa\x00\x00\x01\x00~Y\xa3\xd7\xfdW\xc6pu@"
b"\xd81\xa1S\xf3O\xdaE\xf4/\x1ex\x1d\xf1\x9a\xe1G3\xd9\xd6U\x1f"
b"\x8c\xd9\x1b\x8b\x90\x0e\x8a\xc1\x91\xd8\x0cd\xc9\x0c\xe7\xb2"
b"\xc9,'=\x15\x1cQg\xe7x\xb5j\xdbI\xc0\xde\xafb\xd7@\xcar\x0b"
b"\xce\xa3zM\x151q5\xde\xfa\x0c{wjKN\x88\xcbC\xe5\x89\xc3\xf9i"
b"\x96\x91\xdb\xca}\xdbR\x1a\x13T\xf9\x0cDJH\x0b\x06\xcfl\xf3"
b"\x13[\x82\xa2\x9d\x93\xfd\x8e\xce|\xfb^n\xd4\xed\xe2\xd1\x8a"
b"\xb7aY\x9bB\x8f\xa4\xc7\xbe7\xb5\x0b9j\xa4.\x87\x13\xf7\xf0"
b"\xda\xd7\xd2\xf9\x1f9p\xfd?\x18\x0f\xf2N\x9b\xcf/\x1e)\n>A\x19"
b"\xc2\xb5j\xf9UW\xd4\xae\x87B\xe6\x99t\xa2y\x90\x98\xa2\xaaf\xcb"
b"\x86\xe5k\xe3\xce\xe0u\x1c\xeb\x93\x1aN\x88\xc9\x93Y\xc3.V\xb1L"
b"44`C\xc7\xa66\xaf\xfa\x7f\x04Y\x92\xfa\xa4\x1a\x18%\x19\xd5 4^"
b"\xb9rY\xba \x01\xf9.\x89%H\xbe\x1c\x83A\x96"
)
self.dsaSignature = (
b"\x00\x00\x00\x07ssh-dss\x00\x00\x00(?\xc7\xeb\x86;\xd5TFA\xb4"
b"\xdf\x0c\xc4E@4,d\xbc\t\xd9\xae\xdd[\xed-\x82nQ\x8cf\x9b\xe8\xe1"
b"jrg\x84p<"
)
self.patch(randbytes, "secureRandom", lambda x: b"\xff" * x)
self.keyFile = self.mktemp()
with open(self.keyFile, "wb") as f:
f.write(keydata.privateRSA_lsh)
def tearDown(self):
os.unlink(self.keyFile)
def test_size(self):
"""
The L{keys.Key.size} method returns the size of key object in bits.
"""
self.assertEqual(keys.Key(self.rsaObj).size(), 2048)
self.assertEqual(keys.Key(self.dsaObj).size(), 1024)
self.assertEqual(keys.Key(self.ecObj).size(), 256)
self.assertEqual(keys.Key(self.ecObj384).size(), 384)
self.assertEqual(keys.Key(self.ecObj521).size(), 521)
if ED25519_SUPPORTED:
self.assertEqual(keys.Key(self.ed25519Obj).size(), 256)
def test__guessStringType(self):
"""
Test that the _guessStringType method guesses string types
correctly.
"""
self.assertEqual(
keys.Key._guessStringType(keydata.publicRSA_openssh), "public_openssh"
)
self.assertEqual(
keys.Key._guessStringType(keydata.publicDSA_openssh), "public_openssh"
)
self.assertEqual(
keys.Key._guessStringType(keydata.publicECDSA_openssh), "public_openssh"
)
if ED25519_SUPPORTED:
self.assertEqual(
keys.Key._guessStringType(keydata.publicEd25519_openssh),
"public_openssh",
)
self.assertEqual(
keys.Key._guessStringType(keydata.privateRSA_openssh), "private_openssh"
)
self.assertEqual(
keys.Key._guessStringType(keydata.privateRSA_openssh_new), "private_openssh"
)
self.assertEqual(
keys.Key._guessStringType(keydata.privateDSA_openssh), "private_openssh"
)
self.assertEqual(
keys.Key._guessStringType(keydata.privateDSA_openssh_new), "private_openssh"
)
self.assertEqual(
keys.Key._guessStringType(keydata.privateECDSA_openssh), "private_openssh"
)
self.assertEqual(
keys.Key._guessStringType(keydata.privateECDSA_openssh_new),
"private_openssh",
)
if ED25519_SUPPORTED:
self.assertEqual(
keys.Key._guessStringType(keydata.privateEd25519_openssh_new),
"private_openssh",
)
self.assertEqual(keys.Key._guessStringType(keydata.publicRSA_lsh), "public_lsh")
self.assertEqual(keys.Key._guessStringType(keydata.publicDSA_lsh), "public_lsh")
self.assertEqual(
keys.Key._guessStringType(keydata.privateRSA_lsh), "private_lsh"
)
self.assertEqual(
keys.Key._guessStringType(keydata.privateDSA_lsh), "private_lsh"
)
self.assertEqual(
keys.Key._guessStringType(keydata.privateRSA_agentv3), "agentv3"
)
self.assertEqual(
keys.Key._guessStringType(keydata.privateDSA_agentv3), "agentv3"
)
self.assertEqual(
keys.Key._guessStringType(b"\x00\x00\x00\x07ssh-rsa\x00\x00\x00\x01\x01"),
"blob",
)
self.assertEqual(
keys.Key._guessStringType(b"\x00\x00\x00\x07ssh-dss\x00\x00\x00\x01\x01"),
"blob",
)
self.assertEqual(keys.Key._guessStringType(b"not a key"), None)
def test_public(self):
"""
The L{keys.Key.public} method returns a public key for both
public and private keys.
"""
# NB: This assumes that the private and public keys correspond
# to each other.
privateRSAKey = keys.Key.fromString(keydata.privateRSA_openssh)
publicRSAKey = keys.Key.fromString(keydata.publicRSA_openssh)
self.assertEqual(privateRSAKey.public(), publicRSAKey.public())
privateDSAKey = keys.Key.fromString(keydata.privateDSA_openssh)
publicDSAKey = keys.Key.fromString(keydata.publicDSA_openssh)
self.assertEqual(privateDSAKey.public(), publicDSAKey.public())
privateECDSAKey = keys.Key.fromString(keydata.privateECDSA_openssh)
publicECDSAKey = keys.Key.fromString(keydata.publicECDSA_openssh)
self.assertEqual(privateECDSAKey.public(), publicECDSAKey.public())
if ED25519_SUPPORTED:
privateEd25519Key = keys.Key.fromString(keydata.privateEd25519_openssh_new)
publicEd25519Key = keys.Key.fromString(keydata.publicEd25519_openssh)
self.assertEqual(privateEd25519Key.public(), publicEd25519Key.public())
def test_isPublic(self):
"""
The L{keys.Key.isPublic} method returns True for public keys
otherwise False.
"""
rsaKey = keys.Key.fromString(keydata.privateRSA_openssh)
dsaKey = keys.Key.fromString(keydata.privateDSA_openssh)
ecdsaKey = keys.Key.fromString(keydata.privateECDSA_openssh)
self.assertTrue(rsaKey.public().isPublic())
self.assertFalse(rsaKey.isPublic())
self.assertTrue(dsaKey.public().isPublic())
self.assertFalse(dsaKey.isPublic())
self.assertTrue(ecdsaKey.public().isPublic())
self.assertFalse(ecdsaKey.isPublic())
if ED25519_SUPPORTED:
ed25519Key = keys.Key.fromString(keydata.privateEd25519_openssh_new)
self.assertTrue(ed25519Key.public().isPublic())
self.assertFalse(ed25519Key.isPublic())
def _testPublicPrivateFromString(self, public, private, type, data):
self._testPublicFromString(public, type, data)
self._testPrivateFromString(private, type, data)
def _testPublicFromString(self, public, type, data):
publicKey = keys.Key.fromString(public)
self.assertTrue(publicKey.isPublic())
self.assertEqual(publicKey.type(), type)
for k, v in publicKey.data().items():
self.assertEqual(data[k], v)
def _testPrivateFromString(self, private, type, data):
privateKey = keys.Key.fromString(private)
self.assertFalse(privateKey.isPublic())
self.assertEqual(privateKey.type(), type)
for k, v in data.items():
self.assertEqual(privateKey.data()[k], v)
def test_fromOpenSSH(self):
"""
Test that keys are correctly generated from OpenSSH strings.
"""
self._testPublicPrivateFromString(
keydata.publicECDSA_openssh,
keydata.privateECDSA_openssh,
"EC",
keydata.ECDatanistp256,
)
self._testPublicPrivateFromString(
keydata.publicRSA_openssh,
keydata.privateRSA_openssh,
"RSA",
keydata.RSAData,
)
self.assertEqual(
keys.Key.fromString(
keydata.privateRSA_openssh_encrypted, passphrase=b"encrypted"
),
keys.Key.fromString(keydata.privateRSA_openssh),
)
self.assertEqual(
keys.Key.fromString(keydata.privateRSA_openssh_alternate),
keys.Key.fromString(keydata.privateRSA_openssh),
)
self._testPublicPrivateFromString(
keydata.publicDSA_openssh,
keydata.privateDSA_openssh,
"DSA",
keydata.DSAData,
)
if ED25519_SUPPORTED:
self._testPublicPrivateFromString(
keydata.publicEd25519_openssh,
keydata.privateEd25519_openssh_new,
"Ed25519",
keydata.Ed25519Data,
)
def test_fromOpenSSHErrors(self):
"""
Tests for invalid key types.
"""
badKey = b"""-----BEGIN FOO PRIVATE KEY-----
MIGkAgEBBDAtAi7I8j73WCX20qUM5hhHwHuFzYWYYILs2Sh8UZ+awNkARZ/Fu2LU
LLl5RtOQpbWgBwYFK4EEACKhZANiAATU17sA9P5FRwSknKcFsjjsk0+E3CeXPYX0
Tk/M0HK3PpWQWgrO8JdRHP9eFE9O/23P8BumwFt7F/AvPlCzVd35VfraFT0o4cCW
G0RqpQ+np31aKmeJshkcYALEchnU+tQ=
-----END EC PRIVATE KEY-----"""
self.assertRaises(
keys.BadKeyError, keys.Key._fromString_PRIVATE_OPENSSH, badKey, None
)
def test_fromOpenSSH_with_whitespace(self):
"""
If key strings have trailing whitespace, it should be ignored.
"""
# from bug #3391, since our test key data doesn't have
# an issue with appended newlines
privateDSAData = b"""-----BEGIN DSA PRIVATE KEY-----
MIIBuwIBAAKBgQDylESNuc61jq2yatCzZbenlr9llG+p9LhIpOLUbXhhHcwC6hrh
EZIdCKqTO0USLrGoP5uS9UHAUoeN62Z0KXXWTwOWGEQn/syyPzNJtnBorHpNUT9D
Qzwl1yUa53NNgEctpo4NoEFOx8PuU6iFLyvgHCjNn2MsuGuzkZm7sI9ZpQIVAJiR
9dPc08KLdpJyRxz8T74b4FQRAoGAGBc4Z5Y6R/HZi7AYM/iNOM8su6hrk8ypkBwR
a3Dbhzk97fuV3SF1SDrcQu4zF7c4CtH609N5nfZs2SUjLLGPWln83Ysb8qhh55Em
AcHXuROrHS/sDsnqu8FQp86MaudrqMExCOYyVPE7jaBWW+/JWFbKCxmgOCSdViUJ
esJpBFsCgYEA7+jtVvSt9yrwsS/YU1QGP5wRAiDYB+T5cK4HytzAqJKRdC5qS4zf
C7R0eKcDHHLMYO39aPnCwXjscisnInEhYGNblTDyPyiyNxAOXuC8x7luTmwzMbNJ
/ow0IqSj0VF72VJN9uSoPpFd4lLT0zN8v42RWja0M8ohWNf+YNJluPgCFE0PT4Vm
SUrCyZXsNh6VXwjs3gKQ
-----END DSA PRIVATE KEY-----"""
self.assertEqual(
keys.Key.fromString(privateDSAData),
keys.Key.fromString(privateDSAData + b"\n"),
)
def test_fromNewerOpenSSH(self):
"""
Newer versions of OpenSSH generate encrypted keys which have a longer
IV than the older versions. These newer keys are also loaded.
"""
key = keys.Key.fromString(
keydata.privateRSA_openssh_encrypted_aes, passphrase=b"testxp"
)
self.assertEqual(key.type(), "RSA")
key2 = keys.Key.fromString(
keydata.privateRSA_openssh_encrypted_aes + b"\n", passphrase=b"testxp"
)
self.assertEqual(key, key2)
def test_fromOpenSSH_v1_format(self):
"""
OpenSSH 6.5 introduced a newer "openssh-key-v1" private key format
(made the default in OpenSSH 7.8). Loading keys in this format
produces identical results to loading the same keys in the old
PEM-based format.
"""
for old, new in (
(keydata.privateRSA_openssh, keydata.privateRSA_openssh_new),
(keydata.privateDSA_openssh, keydata.privateDSA_openssh_new),
(keydata.privateECDSA_openssh, keydata.privateECDSA_openssh_new),
(keydata.privateECDSA_openssh384, keydata.privateECDSA_openssh384_new),
(keydata.privateECDSA_openssh521, keydata.privateECDSA_openssh521_new),
):
self.assertEqual(keys.Key.fromString(new), keys.Key.fromString(old))
self.assertEqual(
keys.Key.fromString(
keydata.privateRSA_openssh_encrypted_new, passphrase=b"encrypted"
),
keys.Key.fromString(
keydata.privateRSA_openssh_encrypted, passphrase=b"encrypted"
),
)
def test_fromOpenSSH_windows_line_endings(self):
"""
Test that keys are correctly generated from OpenSSH strings with
Windows line endings.
"""
privateDSAData = b"""-----BEGIN DSA PRIVATE KEY-----
MIIBuwIBAAKBgQDylESNuc61jq2yatCzZbenlr9llG+p9LhIpOLUbXhhHcwC6hrh
EZIdCKqTO0USLrGoP5uS9UHAUoeN62Z0KXXWTwOWGEQn/syyPzNJtnBorHpNUT9D
Qzwl1yUa53NNgEctpo4NoEFOx8PuU6iFLyvgHCjNn2MsuGuzkZm7sI9ZpQIVAJiR
9dPc08KLdpJyRxz8T74b4FQRAoGAGBc4Z5Y6R/HZi7AYM/iNOM8su6hrk8ypkBwR
a3Dbhzk97fuV3SF1SDrcQu4zF7c4CtH609N5nfZs2SUjLLGPWln83Ysb8qhh55Em
AcHXuROrHS/sDsnqu8FQp86MaudrqMExCOYyVPE7jaBWW+/JWFbKCxmgOCSdViUJ
esJpBFsCgYEA7+jtVvSt9yrwsS/YU1QGP5wRAiDYB+T5cK4HytzAqJKRdC5qS4zf
C7R0eKcDHHLMYO39aPnCwXjscisnInEhYGNblTDyPyiyNxAOXuC8x7luTmwzMbNJ
/ow0IqSj0VF72VJN9uSoPpFd4lLT0zN8v42RWja0M8ohWNf+YNJluPgCFE0PT4Vm
SUrCyZXsNh6VXwjs3gKQ
-----END DSA PRIVATE KEY-----"""
self.assertEqual(
keys.Key.fromString(privateDSAData),
keys.Key.fromString(privateDSAData.replace(b"\n", b"\r\n")),
)
def test_fromLSHPublicUnsupportedType(self):
"""
C{BadKeyError} exception is raised when public key has an unknown
type.
"""
sexp = sexpy.pack([[b"public-key", [b"bad-key", [b"p", b"2"]]]])
self.assertRaises(
keys.BadKeyError,
keys.Key.fromString,
data=b"{" + base64.b64encode(sexp) + b"}",
)
def test_fromLSHPrivateUnsupportedType(self):
"""
C{BadKeyError} exception is raised when private key has an unknown
type.
"""
sexp = sexpy.pack([[b"private-key", [b"bad-key", [b"p", b"2"]]]])
self.assertRaises(
keys.BadKeyError,
keys.Key.fromString,
sexp,
)
def test_fromLSHRSA(self):
"""
RSA public and private keys can be generated from a LSH strings.
"""
self._testPublicPrivateFromString(
keydata.publicRSA_lsh,
keydata.privateRSA_lsh,
"RSA",
keydata.RSAData,
)
def test_fromLSHDSA(self):
"""
DSA public and private key can be generated from LSHs.
"""
self._testPublicPrivateFromString(
keydata.publicDSA_lsh,
keydata.privateDSA_lsh,
"DSA",
keydata.DSAData,
)
def test_fromAgentv3(self):
"""
Test that keys are correctly generated from Agent v3 strings.
"""
self._testPrivateFromString(keydata.privateRSA_agentv3, "RSA", keydata.RSAData)
self._testPrivateFromString(keydata.privateDSA_agentv3, "DSA", keydata.DSAData)
self.assertRaises(
keys.BadKeyError,
keys.Key.fromString,
b"\x00\x00\x00\x07ssh-foo" + b"\x00\x00\x00\x01\x01" * 5,
)
def test_fromStringNormalizesUnicodePassphrase(self):
"""
L{keys.Key.fromString} applies Normalization Form KC to Unicode
passphrases.
"""
key = keys.Key(self.rsaObj)
key_data = key.toString(
"openssh", passphrase="verschl\u00FCsselt".encode("UTF-8")
)
self.assertEqual(
keys.Key.fromString(key_data, passphrase="verschlu\u0308sselt"), key
)
# U+FFFF is a "noncharacter" and guaranteed to have General_Category
# Cn (Unassigned).
self.assertRaises(
keys.PassphraseNormalizationError,
keys.Key.fromString,
key_data,
passphrase="unassigned \uFFFF",
)
def test_fromStringErrors(self):
"""
keys.Key.fromString should raise BadKeyError when the key is invalid.
"""
self.assertRaises(keys.BadKeyError, keys.Key.fromString, b"")
# no key data with a bad key type
self.assertRaises(keys.BadKeyError, keys.Key.fromString, b"", "bad_type")
# trying to decrypt a key which doesn't support encryption
self.assertRaises(
keys.BadKeyError,
keys.Key.fromString,
keydata.publicRSA_lsh,
passphrase=b"unencrypted",
)
# trying to decrypt a key with the wrong passphrase
self.assertRaises(
keys.EncryptedKeyError,
keys.Key.fromString,
keys.Key(self.rsaObj).toString("openssh", passphrase=b"encrypted"),
)
# key with no key data
self.assertRaises(
keys.BadKeyError, keys.Key.fromString, b"-----BEGIN RSA KEY-----\nwA==\n"
)
# key with invalid DEK Info
self.assertRaises(
keys.BadKeyError,
keys.Key.fromString,
b"""-----BEGIN ENCRYPTED RSA KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: weird type
4Ed/a9OgJWHJsne7yOGWeWMzHYKsxuP9w1v0aYcp+puS75wvhHLiUnNwxz0KDi6n
T3YkKLBsoCWS68ApR2J9yeQ6R+EyS+UQDrO9nwqo3DB5BT3Ggt8S1wE7vjNLQD0H
g/SJnlqwsECNhh8aAx+Ag0m3ZKOZiRD5mCkcDQsZET7URSmFytDKOjhFn3u6ZFVB
sXrfpYc6TJtOQlHd/52JB6aAbjt6afSv955Z7enIi+5yEJ5y7oYQTaE5zrFMP7N5
9LbfJFlKXxEddy/DErRLxEjmC+t4svHesoJKc2jjjyNPiOoGGF3kJXea62vsjdNV
gMK5Eged3TBVIk2dv8rtJUvyFeCUtjQ1UJZIebScRR47KrbsIpCmU8I4/uHWm5hW
0mOwvdx1L/mqx/BHqVU9Dw2COhOdLbFxlFI92chkovkmNk4P48ziyVnpm7ME22sE
vfCMsyirdqB1mrL4CSM7FXONv+CgfBfeYVkYW8RfJac9U1L/O+JNn7yee414O/rS
hRYw4UdWnH6Gg6niklVKWNY0ZwUZC8zgm2iqy8YCYuneS37jC+OEKP+/s6HSKuqk
2bzcl3/TcZXNSM815hnFRpz0anuyAsvwPNRyvxG2/DacJHL1f6luV4B0o6W410yf
qXQx01DLo7nuyhJqoH3UGCyyXB+/QUs0mbG2PAEn3f5dVs31JMdbt+PrxURXXjKk
4cexpUcIpqqlfpIRe3RD0sDVbH4OXsGhi2kiTfPZu7mgyFxKopRbn1KwU1qKinfY
EU9O4PoTak/tPT+5jFNhaP+HrURoi/pU8EAUNSktl7xAkHYwkN/9Cm7DeBghgf3n
8+tyCGYDsB5utPD0/Xe9yx0Qhc/kMm4xIyQDyA937dk3mUvLC9vulnAP8I+Izim0
fZ182+D1bWwykoD0997mUHG/AUChWR01V1OLwRyPv2wUtiS8VNG76Y2aqKlgqP1P
V+IvIEqR4ERvSBVFzXNF8Y6j/sVxo8+aZw+d0L1Ns/R55deErGg3B8i/2EqGd3r+
0jps9BqFHHWW87n3VyEB3jWCMj8Vi2EJIfa/7pSaViFIQn8LiBLf+zxG5LTOToK5
xkN42fReDcqi3UNfKNGnv4dsplyTR2hyx65lsj4bRKDGLKOuB1y7iB0AGb0LtcAI
dcsVlcCeUquDXtqKvRnwfIMg+ZunyjqHBhj3qgRgbXbT6zjaSdNnih569aTg0Vup
VykzZ7+n/KVcGLmvX0NesdoI7TKbq4TnEIOynuG5Sf+2GpARO5bjcWKSZeN/Ybgk
gccf8Cqf6XWqiwlWd0B7BR3SymeHIaSymC45wmbgdstrbk7Ppa2Tp9AZku8M2Y7c
8mY9b+onK075/ypiwBm4L4GRNTFLnoNQJXx0OSl4FNRWsn6ztbD+jZhu8Seu10Jw
SEJVJ+gmTKdRLYORJKyqhDet6g7kAxs4EoJ25WsOnX5nNr00rit+NkMPA7xbJT+7
CfI51GQLw7pUPeO2WNt6yZO/YkzZrqvTj5FEwybkUyBv7L0gkqu9wjfDdUw0fVHE
xEm4DxjEoaIp8dW/JOzXQ2EF+WaSOgdYsw3Ac+rnnjnNptCdOEDGP6QBkt+oXj4P
-----END RSA PRIVATE KEY-----""",
passphrase="encrypted",
)
# key with invalid encryption type
self.assertRaises(
keys.BadKeyError,
keys.Key.fromString,
b"""-----BEGIN ENCRYPTED RSA KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: FOO-123-BAR,01234567
4Ed/a9OgJWHJsne7yOGWeWMzHYKsxuP9w1v0aYcp+puS75wvhHLiUnNwxz0KDi6n
T3YkKLBsoCWS68ApR2J9yeQ6R+EyS+UQDrO9nwqo3DB5BT3Ggt8S1wE7vjNLQD0H
g/SJnlqwsECNhh8aAx+Ag0m3ZKOZiRD5mCkcDQsZET7URSmFytDKOjhFn3u6ZFVB
sXrfpYc6TJtOQlHd/52JB6aAbjt6afSv955Z7enIi+5yEJ5y7oYQTaE5zrFMP7N5
9LbfJFlKXxEddy/DErRLxEjmC+t4svHesoJKc2jjjyNPiOoGGF3kJXea62vsjdNV
gMK5Eged3TBVIk2dv8rtJUvyFeCUtjQ1UJZIebScRR47KrbsIpCmU8I4/uHWm5hW
0mOwvdx1L/mqx/BHqVU9Dw2COhOdLbFxlFI92chkovkmNk4P48ziyVnpm7ME22sE
vfCMsyirdqB1mrL4CSM7FXONv+CgfBfeYVkYW8RfJac9U1L/O+JNn7yee414O/rS
hRYw4UdWnH6Gg6niklVKWNY0ZwUZC8zgm2iqy8YCYuneS37jC+OEKP+/s6HSKuqk
2bzcl3/TcZXNSM815hnFRpz0anuyAsvwPNRyvxG2/DacJHL1f6luV4B0o6W410yf
qXQx01DLo7nuyhJqoH3UGCyyXB+/QUs0mbG2PAEn3f5dVs31JMdbt+PrxURXXjKk
4cexpUcIpqqlfpIRe3RD0sDVbH4OXsGhi2kiTfPZu7mgyFxKopRbn1KwU1qKinfY
EU9O4PoTak/tPT+5jFNhaP+HrURoi/pU8EAUNSktl7xAkHYwkN/9Cm7DeBghgf3n
8+tyCGYDsB5utPD0/Xe9yx0Qhc/kMm4xIyQDyA937dk3mUvLC9vulnAP8I+Izim0
fZ182+D1bWwykoD0997mUHG/AUChWR01V1OLwRyPv2wUtiS8VNG76Y2aqKlgqP1P
V+IvIEqR4ERvSBVFzXNF8Y6j/sVxo8+aZw+d0L1Ns/R55deErGg3B8i/2EqGd3r+
0jps9BqFHHWW87n3VyEB3jWCMj8Vi2EJIfa/7pSaViFIQn8LiBLf+zxG5LTOToK5
xkN42fReDcqi3UNfKNGnv4dsplyTR2hyx65lsj4bRKDGLKOuB1y7iB0AGb0LtcAI
dcsVlcCeUquDXtqKvRnwfIMg+ZunyjqHBhj3qgRgbXbT6zjaSdNnih569aTg0Vup
VykzZ7+n/KVcGLmvX0NesdoI7TKbq4TnEIOynuG5Sf+2GpARO5bjcWKSZeN/Ybgk
gccf8Cqf6XWqiwlWd0B7BR3SymeHIaSymC45wmbgdstrbk7Ppa2Tp9AZku8M2Y7c
8mY9b+onK075/ypiwBm4L4GRNTFLnoNQJXx0OSl4FNRWsn6ztbD+jZhu8Seu10Jw
SEJVJ+gmTKdRLYORJKyqhDet6g7kAxs4EoJ25WsOnX5nNr00rit+NkMPA7xbJT+7
CfI51GQLw7pUPeO2WNt6yZO/YkzZrqvTj5FEwybkUyBv7L0gkqu9wjfDdUw0fVHE
xEm4DxjEoaIp8dW/JOzXQ2EF+WaSOgdYsw3Ac+rnnjnNptCdOEDGP6QBkt+oXj4P
-----END RSA PRIVATE KEY-----""",
passphrase="encrypted",
)
# key with bad IV (AES)
self.assertRaises(
keys.BadKeyError,
keys.Key.fromString,
b"""-----BEGIN ENCRYPTED RSA KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: AES-128-CBC,01234
4Ed/a9OgJWHJsne7yOGWeWMzHYKsxuP9w1v0aYcp+puS75wvhHLiUnNwxz0KDi6n
T3YkKLBsoCWS68ApR2J9yeQ6R+EyS+UQDrO9nwqo3DB5BT3Ggt8S1wE7vjNLQD0H
g/SJnlqwsECNhh8aAx+Ag0m3ZKOZiRD5mCkcDQsZET7URSmFytDKOjhFn3u6ZFVB
sXrfpYc6TJtOQlHd/52JB6aAbjt6afSv955Z7enIi+5yEJ5y7oYQTaE5zrFMP7N5
9LbfJFlKXxEddy/DErRLxEjmC+t4svHesoJKc2jjjyNPiOoGGF3kJXea62vsjdNV
gMK5Eged3TBVIk2dv8rtJUvyFeCUtjQ1UJZIebScRR47KrbsIpCmU8I4/uHWm5hW
0mOwvdx1L/mqx/BHqVU9Dw2COhOdLbFxlFI92chkovkmNk4P48ziyVnpm7ME22sE
vfCMsyirdqB1mrL4CSM7FXONv+CgfBfeYVkYW8RfJac9U1L/O+JNn7yee414O/rS
hRYw4UdWnH6Gg6niklVKWNY0ZwUZC8zgm2iqy8YCYuneS37jC+OEKP+/s6HSKuqk
2bzcl3/TcZXNSM815hnFRpz0anuyAsvwPNRyvxG2/DacJHL1f6luV4B0o6W410yf
qXQx01DLo7nuyhJqoH3UGCyyXB+/QUs0mbG2PAEn3f5dVs31JMdbt+PrxURXXjKk
4cexpUcIpqqlfpIRe3RD0sDVbH4OXsGhi2kiTfPZu7mgyFxKopRbn1KwU1qKinfY
EU9O4PoTak/tPT+5jFNhaP+HrURoi/pU8EAUNSktl7xAkHYwkN/9Cm7DeBghgf3n
8+tyCGYDsB5utPD0/Xe9yx0Qhc/kMm4xIyQDyA937dk3mUvLC9vulnAP8I+Izim0
fZ182+D1bWwykoD0997mUHG/AUChWR01V1OLwRyPv2wUtiS8VNG76Y2aqKlgqP1P
V+IvIEqR4ERvSBVFzXNF8Y6j/sVxo8+aZw+d0L1Ns/R55deErGg3B8i/2EqGd3r+
0jps9BqFHHWW87n3VyEB3jWCMj8Vi2EJIfa/7pSaViFIQn8LiBLf+zxG5LTOToK5
xkN42fReDcqi3UNfKNGnv4dsplyTR2hyx65lsj4bRKDGLKOuB1y7iB0AGb0LtcAI
dcsVlcCeUquDXtqKvRnwfIMg+ZunyjqHBhj3qgRgbXbT6zjaSdNnih569aTg0Vup
VykzZ7+n/KVcGLmvX0NesdoI7TKbq4TnEIOynuG5Sf+2GpARO5bjcWKSZeN/Ybgk
gccf8Cqf6XWqiwlWd0B7BR3SymeHIaSymC45wmbgdstrbk7Ppa2Tp9AZku8M2Y7c
8mY9b+onK075/ypiwBm4L4GRNTFLnoNQJXx0OSl4FNRWsn6ztbD+jZhu8Seu10Jw
SEJVJ+gmTKdRLYORJKyqhDet6g7kAxs4EoJ25WsOnX5nNr00rit+NkMPA7xbJT+7
CfI51GQLw7pUPeO2WNt6yZO/YkzZrqvTj5FEwybkUyBv7L0gkqu9wjfDdUw0fVHE
xEm4DxjEoaIp8dW/JOzXQ2EF+WaSOgdYsw3Ac+rnnjnNptCdOEDGP6QBkt+oXj4P
-----END RSA PRIVATE KEY-----""",
passphrase="encrypted",
)
# key with bad IV (DES3)
self.assertRaises(
keys.BadKeyError,
keys.Key.fromString,
b"""-----BEGIN ENCRYPTED RSA KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: DES-EDE3-CBC,01234
4Ed/a9OgJWHJsne7yOGWeWMzHYKsxuP9w1v0aYcp+puS75wvhHLiUnNwxz0KDi6n
T3YkKLBsoCWS68ApR2J9yeQ6R+EyS+UQDrO9nwqo3DB5BT3Ggt8S1wE7vjNLQD0H
g/SJnlqwsECNhh8aAx+Ag0m3ZKOZiRD5mCkcDQsZET7URSmFytDKOjhFn3u6ZFVB
sXrfpYc6TJtOQlHd/52JB6aAbjt6afSv955Z7enIi+5yEJ5y7oYQTaE5zrFMP7N5
9LbfJFlKXxEddy/DErRLxEjmC+t4svHesoJKc2jjjyNPiOoGGF3kJXea62vsjdNV
gMK5Eged3TBVIk2dv8rtJUvyFeCUtjQ1UJZIebScRR47KrbsIpCmU8I4/uHWm5hW
0mOwvdx1L/mqx/BHqVU9Dw2COhOdLbFxlFI92chkovkmNk4P48ziyVnpm7ME22sE
vfCMsyirdqB1mrL4CSM7FXONv+CgfBfeYVkYW8RfJac9U1L/O+JNn7yee414O/rS
hRYw4UdWnH6Gg6niklVKWNY0ZwUZC8zgm2iqy8YCYuneS37jC+OEKP+/s6HSKuqk
2bzcl3/TcZXNSM815hnFRpz0anuyAsvwPNRyvxG2/DacJHL1f6luV4B0o6W410yf
qXQx01DLo7nuyhJqoH3UGCyyXB+/QUs0mbG2PAEn3f5dVs31JMdbt+PrxURXXjKk
4cexpUcIpqqlfpIRe3RD0sDVbH4OXsGhi2kiTfPZu7mgyFxKopRbn1KwU1qKinfY
EU9O4PoTak/tPT+5jFNhaP+HrURoi/pU8EAUNSktl7xAkHYwkN/9Cm7DeBghgf3n
8+tyCGYDsB5utPD0/Xe9yx0Qhc/kMm4xIyQDyA937dk3mUvLC9vulnAP8I+Izim0
fZ182+D1bWwykoD0997mUHG/AUChWR01V1OLwRyPv2wUtiS8VNG76Y2aqKlgqP1P
V+IvIEqR4ERvSBVFzXNF8Y6j/sVxo8+aZw+d0L1Ns/R55deErGg3B8i/2EqGd3r+
0jps9BqFHHWW87n3VyEB3jWCMj8Vi2EJIfa/7pSaViFIQn8LiBLf+zxG5LTOToK5
xkN42fReDcqi3UNfKNGnv4dsplyTR2hyx65lsj4bRKDGLKOuB1y7iB0AGb0LtcAI
dcsVlcCeUquDXtqKvRnwfIMg+ZunyjqHBhj3qgRgbXbT6zjaSdNnih569aTg0Vup
VykzZ7+n/KVcGLmvX0NesdoI7TKbq4TnEIOynuG5Sf+2GpARO5bjcWKSZeN/Ybgk
gccf8Cqf6XWqiwlWd0B7BR3SymeHIaSymC45wmbgdstrbk7Ppa2Tp9AZku8M2Y7c
8mY9b+onK075/ypiwBm4L4GRNTFLnoNQJXx0OSl4FNRWsn6ztbD+jZhu8Seu10Jw
SEJVJ+gmTKdRLYORJKyqhDet6g7kAxs4EoJ25WsOnX5nNr00rit+NkMPA7xbJT+7
CfI51GQLw7pUPeO2WNt6yZO/YkzZrqvTj5FEwybkUyBv7L0gkqu9wjfDdUw0fVHE
xEm4DxjEoaIp8dW/JOzXQ2EF+WaSOgdYsw3Ac+rnnjnNptCdOEDGP6QBkt+oXj4P
-----END RSA PRIVATE KEY-----""",
passphrase="encrypted",
)
def test_fromFile(self):
"""
Test that fromFile works correctly.
"""
self.assertEqual(
keys.Key.fromFile(self.keyFile), keys.Key.fromString(keydata.privateRSA_lsh)
)
self.assertRaises(keys.BadKeyError, keys.Key.fromFile, self.keyFile, "bad_type")
self.assertRaises(
keys.BadKeyError, keys.Key.fromFile, self.keyFile, passphrase="unencrypted"
)
def test_init(self):
"""
Test that the PublicKey object is initialized correctly.
"""
obj = keys.Key._fromRSAComponents(n=5, e=3)._keyObject
key = keys.Key(obj)
self.assertEqual(key._keyObject, obj)
def test_equal(self):
"""
Test that Key objects are compared correctly.
"""
rsa1 = keys.Key(self.rsaObj)
rsa2 = keys.Key(self.rsaObj)
rsa3 = keys.Key(keys.Key._fromRSAComponents(n=5, e=3)._keyObject)
dsa = keys.Key(self.dsaObj)
self.assertTrue(rsa1 == rsa2)
self.assertFalse(rsa1 == rsa3)
self.assertFalse(rsa1 == dsa)
self.assertFalse(rsa1 == object)
self.assertFalse(rsa1 == None)
def test_notEqual(self):
"""
Test that Key objects are not-compared correctly.
"""
rsa1 = keys.Key(self.rsaObj)
rsa2 = keys.Key(self.rsaObj)
rsa3 = keys.Key(keys.Key._fromRSAComponents(n=5, e=3)._keyObject)
dsa = keys.Key(self.dsaObj)
self.assertFalse(rsa1 != rsa2)
self.assertTrue(rsa1 != rsa3)
self.assertTrue(rsa1 != dsa)
self.assertTrue(rsa1 != object)
self.assertTrue(rsa1 != None)
def test_dataError(self):
"""
The L{keys.Key.data} method raises RuntimeError for bad keys.
"""
badKey = keys.Key(b"")
self.assertRaises(RuntimeError, badKey.data)
def test_fingerprintdefault(self):
"""
Test that the fingerprint method returns fingerprint in
L{FingerprintFormats.MD5-HEX} format by default.
"""
self.assertEqual(
keys.Key(self.rsaObj).fingerprint(),
"85:25:04:32:58:55:96:9f:57:ee:fb:a8:1a:ea:69:da",
)
self.assertEqual(
keys.Key(self.dsaObj).fingerprint(),
"63:15:b3:0e:e6:4f:50:de:91:48:3d:01:6b:b3:13:c1",
)
def test_fingerprint_md5_hex(self):
"""
fingerprint method generates key fingerprint in
L{FingerprintFormats.MD5-HEX} format if explicitly specified.
"""
self.assertEqual(
keys.Key(self.rsaObj).fingerprint(keys.FingerprintFormats.MD5_HEX),
"85:25:04:32:58:55:96:9f:57:ee:fb:a8:1a:ea:69:da",
)
self.assertEqual(
keys.Key(self.dsaObj).fingerprint(keys.FingerprintFormats.MD5_HEX),
"63:15:b3:0e:e6:4f:50:de:91:48:3d:01:6b:b3:13:c1",
)
def test_fingerprintsha256(self):
"""
fingerprint method generates key fingerprint in
L{FingerprintFormats.SHA256-BASE64} format if explicitly specified.
"""
self.assertEqual(
keys.Key(self.rsaObj).fingerprint(keys.FingerprintFormats.SHA256_BASE64),
"FBTCOoknq0mHy+kpfnY9tDdcAJuWtCpuQMaV3EsvbUI=",
)
self.assertEqual(
keys.Key(self.dsaObj).fingerprint(keys.FingerprintFormats.SHA256_BASE64),
"Wz5o2YbKyxOEcJn1au/UaALSVruUzfz0vaLI1xiIGyY=",
)
def test_fingerprintBadFormat(self):
"""
A C{BadFingerPrintFormat} error is raised when unsupported
formats are requested.
"""
with self.assertRaises(keys.BadFingerPrintFormat) as em:
keys.Key(self.rsaObj).fingerprint("sha256-base")
self.assertEqual(
"Unsupported fingerprint format: sha256-base", em.exception.args[0]
)
def test_type(self):
"""
Test that the type method returns the correct type for an object.
"""
self.assertEqual(keys.Key(self.rsaObj).type(), "RSA")
self.assertEqual(keys.Key(self.rsaObj).sshType(), b"ssh-rsa")
self.assertEqual(keys.Key(self.dsaObj).type(), "DSA")
self.assertEqual(keys.Key(self.dsaObj).sshType(), b"ssh-dss")
self.assertEqual(keys.Key(self.ecObj).type(), "EC")
self.assertEqual(
keys.Key(self.ecObj).sshType(), keydata.ECDatanistp256["curve"]
)
if ED25519_SUPPORTED:
self.assertEqual(keys.Key(self.ed25519Obj).type(), "Ed25519")
self.assertEqual(keys.Key(self.ed25519Obj).sshType(), b"ssh-ed25519")
self.assertRaises(RuntimeError, keys.Key(None).type)
self.assertRaises(RuntimeError, keys.Key(None).sshType)
self.assertRaises(RuntimeError, keys.Key(self).type)
self.assertRaises(RuntimeError, keys.Key(self).sshType)
def test_fromBlobUnsupportedType(self):
"""
A C{BadKeyError} error is raised whey the blob has an unsupported
key type.
"""
badBlob = common.NS(b"ssh-bad")
self.assertRaises(keys.BadKeyError, keys.Key.fromString, badBlob)
def test_fromBlobRSA(self):
"""
A public RSA key is correctly generated from a public key blob.
"""
rsaPublicData = {
"n": keydata.RSAData["n"],
"e": keydata.RSAData["e"],
}
rsaBlob = (
common.NS(b"ssh-rsa")
+ common.MP(rsaPublicData["e"])
+ common.MP(rsaPublicData["n"])
)
rsaKey = keys.Key.fromString(rsaBlob)
self.assertTrue(rsaKey.isPublic())
self.assertEqual(rsaPublicData, rsaKey.data())
def test_fromBlobDSA(self):
"""
A public DSA key is correctly generated from a public key blob.
"""
dsaPublicData = {
"p": keydata.DSAData["p"],
"q": keydata.DSAData["q"],
"g": keydata.DSAData["g"],
"y": keydata.DSAData["y"],
}
dsaBlob = (
common.NS(b"ssh-dss")
+ common.MP(dsaPublicData["p"])
+ common.MP(dsaPublicData["q"])
+ common.MP(dsaPublicData["g"])
+ common.MP(dsaPublicData["y"])
)
dsaKey = keys.Key.fromString(dsaBlob)
self.assertTrue(dsaKey.isPublic())
self.assertEqual(dsaPublicData, dsaKey.data())
def test_fromBlobECDSA(self):
"""
Key.fromString generates ECDSA keys from blobs.
"""
from cryptography import utils
ecPublicData = {
"x": keydata.ECDatanistp256["x"],
"y": keydata.ECDatanistp256["y"],
"curve": keydata.ECDatanistp256["curve"],
}
ecblob = (
common.NS(ecPublicData["curve"])
+ common.NS(ecPublicData["curve"][-8:])
+ common.NS(
b"\x04"
+ utils.int_to_bytes(ecPublicData["x"], 32)
+ utils.int_to_bytes(ecPublicData["y"], 32)
)
)
eckey = keys.Key.fromString(ecblob)
self.assertTrue(eckey.isPublic())
self.assertEqual(ecPublicData, eckey.data())
@skipWithoutEd25519
def test_fromBlobEd25519(self):
"""
A public Ed25519 key is correctly generated from a public key blob.
"""
ed25519PublicData = {
"a": keydata.Ed25519Data["a"],
}
ed25519Blob = common.NS(b"ssh-ed25519") + common.NS(ed25519PublicData["a"])
ed25519Key = keys.Key.fromString(ed25519Blob)
self.assertTrue(ed25519Key.isPublic())
self.assertEqual(ed25519PublicData, ed25519Key.data())
def test_fromPrivateBlobUnsupportedType(self):
"""
C{BadKeyError} is raised when loading a private blob with an
unsupported type.
"""
badBlob = common.NS(b"ssh-bad")
self.assertRaises(keys.BadKeyError, keys.Key._fromString_PRIVATE_BLOB, badBlob)
def test_fromPrivateBlobRSA(self):
"""
A private RSA key is correctly generated from a private key blob.
"""
rsaBlob = (
common.NS(b"ssh-rsa")
+ common.MP(keydata.RSAData["n"])
+ common.MP(keydata.RSAData["e"])
+ common.MP(keydata.RSAData["d"])
+ common.MP(keydata.RSAData["u"])
+ common.MP(keydata.RSAData["p"])
+ common.MP(keydata.RSAData["q"])
)
rsaKey = keys.Key._fromString_PRIVATE_BLOB(rsaBlob)
self.assertFalse(rsaKey.isPublic())
self.assertEqual(keydata.RSAData, rsaKey.data())
self.assertEqual(
rsaKey, keys.Key._fromString_PRIVATE_BLOB(rsaKey.privateBlob())
)
def test_fromPrivateBlobDSA(self):
"""
A private DSA key is correctly generated from a private key blob.
"""
dsaBlob = (
common.NS(b"ssh-dss")
+ common.MP(keydata.DSAData["p"])
+ common.MP(keydata.DSAData["q"])
+ common.MP(keydata.DSAData["g"])
+ common.MP(keydata.DSAData["y"])
+ common.MP(keydata.DSAData["x"])
)
dsaKey = keys.Key._fromString_PRIVATE_BLOB(dsaBlob)
self.assertFalse(dsaKey.isPublic())
self.assertEqual(keydata.DSAData, dsaKey.data())
self.assertEqual(
dsaKey, keys.Key._fromString_PRIVATE_BLOB(dsaKey.privateBlob())
)
def test_fromPrivateBlobECDSA(self):
"""
A private EC key is correctly generated from a private key blob.
"""
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives import serialization
publicNumbers = ec.EllipticCurvePublicNumbers(
x=keydata.ECDatanistp256["x"],
y=keydata.ECDatanistp256["y"],
curve=ec.SECP256R1(),
)
ecblob = (
common.NS(keydata.ECDatanistp256["curve"])
+ common.NS(keydata.ECDatanistp256["curve"][-8:])
+ common.NS(
publicNumbers.public_key(default_backend()).public_bytes(
serialization.Encoding.X962,
serialization.PublicFormat.UncompressedPoint,
)
)
+ common.MP(keydata.ECDatanistp256["privateValue"])
)
eckey = keys.Key._fromString_PRIVATE_BLOB(ecblob)
self.assertFalse(eckey.isPublic())
self.assertEqual(keydata.ECDatanistp256, eckey.data())
self.assertEqual(eckey, keys.Key._fromString_PRIVATE_BLOB(eckey.privateBlob()))
@skipWithoutEd25519
def test_fromPrivateBlobEd25519(self):
"""
A private Ed25519 key is correctly generated from a private key blob.
"""
ed25519Blob = (
common.NS(b"ssh-ed25519")
+ common.NS(keydata.Ed25519Data["a"])
+ common.NS(keydata.Ed25519Data["k"] + keydata.Ed25519Data["a"])
)
ed25519Key = keys.Key._fromString_PRIVATE_BLOB(ed25519Blob)
self.assertFalse(ed25519Key.isPublic())
self.assertEqual(keydata.Ed25519Data, ed25519Key.data())
self.assertEqual(
ed25519Key, keys.Key._fromString_PRIVATE_BLOB(ed25519Key.privateBlob())
)
def test_blobRSA(self):
"""
Return the over-the-wire SSH format of the RSA public key.
"""
self.assertEqual(
keys.Key(self.rsaObj).blob(),
common.NS(b"ssh-rsa")
+ common.MP(self.rsaObj.private_numbers().public_numbers.e)
+ common.MP(self.rsaObj.private_numbers().public_numbers.n),
)
def test_blobDSA(self):
"""
Return the over-the-wire SSH format of the DSA public key.
"""
publicNumbers = self.dsaObj.private_numbers().public_numbers
self.assertEqual(
keys.Key(self.dsaObj).blob(),
common.NS(b"ssh-dss")
+ common.MP(publicNumbers.parameter_numbers.p)
+ common.MP(publicNumbers.parameter_numbers.q)
+ common.MP(publicNumbers.parameter_numbers.g)
+ common.MP(publicNumbers.y),
)
def test_blobEC(self):
"""
Return the over-the-wire SSH format of the EC public key.
"""
from cryptography import utils
byteLength = (self.ecObj.curve.key_size + 7) // 8
self.assertEqual(
keys.Key(self.ecObj).blob(),
common.NS(keydata.ECDatanistp256["curve"])
+ common.NS(keydata.ECDatanistp256["curve"][-8:])
+ common.NS(
b"\x04"
+ utils.int_to_bytes(
self.ecObj.private_numbers().public_numbers.x, byteLength
)
+ utils.int_to_bytes(
self.ecObj.private_numbers().public_numbers.y, byteLength
)
),
)
@skipWithoutEd25519
def test_blobEd25519(self):
"""
Return the over-the-wire SSH format of the Ed25519 public key.
"""
from cryptography.hazmat.primitives import serialization
publicBytes = self.ed25519Obj.public_key().public_bytes(
serialization.Encoding.Raw, serialization.PublicFormat.Raw
)
self.assertEqual(
keys.Key(self.ed25519Obj).blob(),
common.NS(b"ssh-ed25519") + common.NS(publicBytes),
)
def test_blobNoKey(self):
"""
C{RuntimeError} is raised when the blob is requested for a Key
which is not wrapping anything.
"""
badKey = keys.Key(None)
self.assertRaises(RuntimeError, badKey.blob)
def test_privateBlobRSA(self):
"""
L{keys.Key.privateBlob} returns the SSH protocol-level format of an
RSA private key.
"""
numbers = self.rsaObj.private_numbers()
self.assertEqual(
keys.Key(self.rsaObj).privateBlob(),
common.NS(b"ssh-rsa")
+ common.MP(numbers.public_numbers.n)
+ common.MP(numbers.public_numbers.e)
+ common.MP(numbers.d)
+ common.MP(numbers.iqmp)
+ common.MP(numbers.p)
+ common.MP(numbers.q),
)
def test_privateBlobDSA(self):
"""
L{keys.Key.privateBlob} returns the SSH protocol-level format of a DSA
private key.
"""
publicNumbers = self.dsaObj.private_numbers().public_numbers
self.assertEqual(
keys.Key(self.dsaObj).privateBlob(),
common.NS(b"ssh-dss")
+ common.MP(publicNumbers.parameter_numbers.p)
+ common.MP(publicNumbers.parameter_numbers.q)
+ common.MP(publicNumbers.parameter_numbers.g)
+ common.MP(publicNumbers.y)
+ common.MP(self.dsaObj.private_numbers().x),
)
def test_privateBlobEC(self):
"""
L{keys.Key.privateBlob} returns the SSH ptotocol-level format of EC
private key.
"""
from cryptography.hazmat.primitives import serialization
self.assertEqual(
keys.Key(self.ecObj).privateBlob(),
common.NS(keydata.ECDatanistp256["curve"])
+ common.NS(keydata.ECDatanistp256["curve"][-8:])
+ common.NS(
self.ecObj.public_key().public_bytes(
serialization.Encoding.X962,
serialization.PublicFormat.UncompressedPoint,
)
)
+ common.MP(self.ecObj.private_numbers().private_value),
)
@skipWithoutEd25519
def test_privateBlobEd25519(self):
"""
L{keys.Key.privateBlob} returns the SSH protocol-level format of an
Ed25519 private key.
"""
from cryptography.hazmat.primitives import serialization
publicBytes = self.ed25519Obj.public_key().public_bytes(
serialization.Encoding.Raw, serialization.PublicFormat.Raw
)
privateBytes = self.ed25519Obj.private_bytes(
serialization.Encoding.Raw,
serialization.PrivateFormat.Raw,
serialization.NoEncryption(),
)
self.assertEqual(
keys.Key(self.ed25519Obj).privateBlob(),
common.NS(b"ssh-ed25519")
+ common.NS(publicBytes)
+ common.NS(privateBytes + publicBytes),
)
def test_privateBlobNoKeyObject(self):
"""
Raises L{RuntimeError} if the underlying key object does not exists.
"""
badKey = keys.Key(None)
self.assertRaises(RuntimeError, badKey.privateBlob)
def test_toOpenSSHRSA(self):
"""
L{keys.Key.toString} serializes an RSA key in OpenSSH format.
"""
key = keys.Key.fromString(keydata.privateRSA_agentv3)
self.assertEqual(key.toString("openssh"), keydata.privateRSA_openssh)
self.assertEqual(
key.toString("openssh", passphrase=b"encrypted"),
keydata.privateRSA_openssh_encrypted,
)
self.assertEqual(
key.public().toString("openssh"), keydata.publicRSA_openssh[:-8]
) # no comment
self.assertEqual(
key.public().toString("openssh", comment=b"comment"),
keydata.publicRSA_openssh,
)
def test_toOpenSSHRSA_v1_format(self):
"""
L{keys.Key.toString} serializes an RSA key in OpenSSH's v1 format.
"""
key = keys.Key.fromString(keydata.privateRSA_openssh)
new_key_data = key.toString("openssh", subtype="v1")
new_enc_key_data = key.toString("openssh", subtype="v1", passphrase="encrypted")
self.assertEqual(
b"-----BEGIN OPENSSH PRIVATE KEY-----", new_key_data.splitlines()[0]
)
self.assertEqual(
b"-----BEGIN OPENSSH PRIVATE KEY-----", new_enc_key_data.splitlines()[0]
)
self.assertEqual(key, keys.Key.fromString(new_key_data))
self.assertEqual(
key, keys.Key.fromString(new_enc_key_data, passphrase="encrypted")
)
def test_toOpenSSHDSA(self):
"""
L{keys.Key.toString} serializes a DSA key in OpenSSH format.
"""
key = keys.Key.fromString(keydata.privateDSA_lsh)
self.assertEqual(key.toString("openssh"), keydata.privateDSA_openssh)
self.assertEqual(
key.public().toString("openssh", comment=b"comment"),
keydata.publicDSA_openssh,
)
self.assertEqual(
key.public().toString("openssh"), keydata.publicDSA_openssh[:-8]
) # no comment
def test_toOpenSSHDSA_v1_format(self):
"""
L{keys.Key.toString} serializes a DSA key in OpenSSH's v1 format.
"""
key = keys.Key.fromString(keydata.privateDSA_openssh)
new_key_data = key.toString("openssh", subtype="v1")
new_enc_key_data = key.toString("openssh", subtype="v1", passphrase="encrypted")
self.assertEqual(
b"-----BEGIN OPENSSH PRIVATE KEY-----", new_key_data.splitlines()[0]
)
self.assertEqual(
b"-----BEGIN OPENSSH PRIVATE KEY-----", new_enc_key_data.splitlines()[0]
)
self.assertEqual(key, keys.Key.fromString(new_key_data))
self.assertEqual(
key, keys.Key.fromString(new_enc_key_data, passphrase="encrypted")
)
def test_toOpenSSHECDSA(self):
"""
L{keys.Key.toString} serializes an ECDSA key in OpenSSH format.
"""
key = keys.Key.fromString(keydata.privateECDSA_openssh)
self.assertEqual(
key.public().toString("openssh", comment=b"comment"),
keydata.publicECDSA_openssh,
)
self.assertEqual(
key.public().toString("openssh"), keydata.publicECDSA_openssh[:-8]
) # no comment
def test_toOpenSSHECDSA_v1_format(self):
"""
L{keys.Key.toString} serializes an ECDSA key in OpenSSH's v1 format.
"""
key = keys.Key.fromString(keydata.privateECDSA_openssh)
new_key_data = key.toString("openssh", subtype="v1")
new_enc_key_data = key.toString("openssh", subtype="v1", passphrase="encrypted")
self.assertEqual(
b"-----BEGIN OPENSSH PRIVATE KEY-----", new_key_data.splitlines()[0]
)
self.assertEqual(
b"-----BEGIN OPENSSH PRIVATE KEY-----", new_enc_key_data.splitlines()[0]
)
self.assertEqual(key, keys.Key.fromString(new_key_data))
self.assertEqual(
key, keys.Key.fromString(new_enc_key_data, passphrase="encrypted")
)
@skipWithoutEd25519
def test_toOpenSSHEd25519(self):
"""
L{keys.Key.toString} serializes an Ed25519 key in OpenSSH's v1 format.
"""
key = keys.Key.fromString(keydata.privateEd25519_openssh_new)
new_key_data = key.toString("openssh")
new_enc_key_data = key.toString("openssh", passphrase="encrypted")
self.assertEqual(
b"-----BEGIN OPENSSH PRIVATE KEY-----", new_key_data.splitlines()[0]
)
self.assertEqual(
b"-----BEGIN OPENSSH PRIVATE KEY-----", new_enc_key_data.splitlines()[0]
)
self.assertEqual(key, keys.Key.fromString(new_key_data))
self.assertEqual(
key, keys.Key.fromString(new_enc_key_data, passphrase="encrypted")
)
self.assertEqual(new_key_data, key.toString("openssh", subtype="v1"))
@skipWithoutEd25519
def test_toOpenSSHEd25519_PEM_format(self):
"""
L{keys.Key.toString} refuses to serialize an Ed25519 key in
OpenSSH's old PEM format, as no encoding of Ed25519 is defined for
that format.
"""
key = keys.Key.fromString(keydata.privateEd25519_openssh_new)
self.assertRaises(ValueError, key.toString, "openssh", subtype="PEM")
def test_toLSHRSA(self):
"""
L{keys.Key.toString} serializes an RSA key in LSH format.
"""
key = keys.Key.fromString(keydata.privateRSA_openssh)
self.assertEqual(key.toString("lsh"), keydata.privateRSA_lsh)
self.assertEqual(key.public().toString("lsh"), keydata.publicRSA_lsh)
def test_toLSHDSA(self):
"""
L{keys.Key.toString} serializes a DSA key in LSH format.
"""
key = keys.Key.fromString(keydata.privateDSA_openssh)
self.assertEqual(key.toString("lsh"), keydata.privateDSA_lsh)
self.assertEqual(key.public().toString("lsh"), keydata.publicDSA_lsh)
def test_toAgentv3RSA(self):
"""
L{keys.Key.toString} serializes an RSA key in Agent v3 format.
"""
key = keys.Key.fromString(keydata.privateRSA_openssh)
self.assertEqual(key.toString("agentv3"), keydata.privateRSA_agentv3)
def test_toAgentv3DSA(self):
"""
L{keys.Key.toString} serializes a DSA key in Agent v3 format.
"""
key = keys.Key.fromString(keydata.privateDSA_openssh)
self.assertEqual(key.toString("agentv3"), keydata.privateDSA_agentv3)
def test_toStringNormalizesUnicodePassphrase(self):
"""
L{keys.Key.toString} applies Normalization Form KC to Unicode
passphrases.
"""
key = keys.Key(self.rsaObj)
key_data = key.toString("openssh", passphrase="verschlu\u0308sselt")
self.assertEqual(
keys.Key.fromString(
key_data, passphrase="verschl\u00FCsselt".encode("UTF-8")
),
key,
)
# U+FFFF is a "noncharacter" and guaranteed to have General_Category
# Cn (Unassigned).
self.assertRaises(
keys.PassphraseNormalizationError,
key.toString,
"openssh",
passphrase="unassigned \uFFFF",
)
def test_toStringErrors(self):
"""
L{keys.Key.toString} raises L{keys.BadKeyError} when passed an invalid
format type.
"""
self.assertRaises(keys.BadKeyError, keys.Key(self.rsaObj).toString, "bad_type")
def test_signAndVerifyRSA(self):
"""
Signed data can be verified using RSA.
"""
data = b"some-data"
key = keys.Key.fromString(keydata.privateRSA_openssh)
signature = key.sign(data)
self.assertTrue(key.public().verify(signature, data))
self.assertTrue(key.verify(signature, data))
def test_signAndVerifyDSA(self):
"""
Signed data can be verified using DSA.
"""
data = b"some-data"
key = keys.Key.fromString(keydata.privateDSA_openssh)
signature = key.sign(data)
self.assertTrue(key.public().verify(signature, data))
self.assertTrue(key.verify(signature, data))
def test_signAndVerifyEC(self):
"""
Signed data can be verified using EC.
"""
data = b"some-data"
key = keys.Key.fromString(keydata.privateECDSA_openssh)
signature = key.sign(data)
key384 = keys.Key.fromString(keydata.privateECDSA_openssh384)
signature384 = key384.sign(data)
key521 = keys.Key.fromString(keydata.privateECDSA_openssh521)
signature521 = key521.sign(data)
self.assertTrue(key.public().verify(signature, data))
self.assertTrue(key.verify(signature, data))
self.assertTrue(key384.public().verify(signature384, data))
self.assertTrue(key384.verify(signature384, data))
self.assertTrue(key521.public().verify(signature521, data))
self.assertTrue(key521.verify(signature521, data))
@skipWithoutEd25519
def test_signAndVerifyEd25519(self):
"""
Signed data can be verified using Ed25519.
"""
data = b"some-data"
key = keys.Key.fromString(keydata.privateEd25519_openssh_new)
signature = key.sign(data)
self.assertTrue(key.public().verify(signature, data))
self.assertTrue(key.verify(signature, data))
def test_verifyRSA(self):
"""
A known-good RSA signature verifies successfully.
"""
key = keys.Key.fromString(keydata.publicRSA_openssh)
self.assertTrue(key.verify(self.rsaSignature, b""))
self.assertFalse(key.verify(self.rsaSignature, b"a"))
self.assertFalse(key.verify(self.dsaSignature, b""))
def test_verifyDSA(self):
"""
A known-good DSA signature verifies successfully.
"""
key = keys.Key.fromString(keydata.publicDSA_openssh)
self.assertTrue(key.verify(self.dsaSignature, b""))
self.assertFalse(key.verify(self.dsaSignature, b"a"))
self.assertFalse(key.verify(self.rsaSignature, b""))
def test_verifyDSANoPrefix(self):
"""
Some commercial SSH servers send DSA keys as 2 20-byte numbers;
they are still verified as valid keys.
"""
key = keys.Key.fromString(keydata.publicDSA_openssh)
self.assertTrue(key.verify(self.dsaSignature[-40:], b""))
def test_reprPrivateRSA(self):
"""
The repr of a L{keys.Key} contains all of the RSA components for an RSA
private key.
"""
self.assertEqual(
repr(keys.Key(self.rsaObj)),
"""<RSA Private Key (2048 bits)
attr d:
\t21:4c:08:66:a2:28:d5:b4:fb:8e:0f:72:1b:85:09:
\t00:b9:f2:4e:37:f0:1c:57:4b:e3:51:7f:9e:23:a7:
\te4:3a:98:55:1b:ea:8b:7a:98:1e:bc:d8:ba:b1:f9:
\t89:12:18:60:ac:e8:cc:0b:4e:09:5a:40:6a:ba:2f:
\t99:f8:b3:24:60:84:b9:ce:69:95:9a:f9:e2:fc:1f:
\t51:4d:27:15:db:2b:27:ad:ef:b4:69:ac:be:7d:10:
\teb:86:47:70:73:b4:00:87:95:15:3b:37:f9:e7:14:
\te7:80:bb:68:1e:1b:e6:dd:bb:73:63:b9:67:e6:b2:
\t27:7f:cf:cf:30:9b:c2:98:fd:d9:18:36:2f:36:2e:
\tf1:3d:81:7a:9f:e1:03:2d:47:db:34:51:62:39:dd:
\t4f:e9:ac:a8:8b:d9:d6:f3:84:c4:17:b9:71:9d:06:
\t08:42:78:4d:bb:c5:2a:f4:c3:58:cd:55:2b:ed:be:
\t33:5f:04:ea:7b:e6:04:24:63:f2:2d:d7:3d:1b:6c:
\td5:9c:63:43:2f:92:88:8d:3e:6e:da:18:37:d8:0f:
\t25:67:89:1d:b9:46:34:5e:c9:ce:c4:8b:ed:92:5a:
\t33:07:0f:df:86:08:f9:92:e9:db:eb:38:08:36:c9:
\tcd:cd:0a:01:48:5b:39:3e:7a:ca:c6:80:a9:dc:d4:
\t39
attr e:
\t01:00:01
attr n:
\t00:d5:6a:ac:78:23:d6:d6:1b:ec:25:a1:50:c4:77:
\t63:50:84:45:01:55:42:14:2a:2a:e0:d0:60:ee:d4:
\te9:a3:ad:4a:fa:39:06:5e:84:55:75:5f:00:36:bf:
\t6f:aa:2a:3f:83:26:37:c1:69:2e:5b:fd:f0:f3:d2:
\t7d:d6:98:cd:3a:40:78:d5:ca:a8:18:c0:11:93:24:
\t09:0c:81:4c:8f:f7:9c:ed:13:16:6a:a4:04:e9:49:
\t77:c3:e4:55:64:b3:79:68:9e:2c:08:eb:ac:e8:04:
\t2d:21:77:05:a7:8e:ef:53:30:0d:a5:e5:bb:3d:6a:
\te2:09:36:6f:fd:34:d3:7d:6f:46:ff:87:da:a9:29:
\t27:aa:ff:ad:f5:85:e6:3e:1a:b8:7a:1d:4a:b1:ea:
\tc0:5a:f7:30:df:1f:c2:a4:e4:ef:3f:91:49:96:40:
\td5:19:77:2d:37:c3:5e:ec:9d:a6:3a:44:a5:c2:a4:
\t29:dd:d5:ba:9c:3d:45:b3:c6:2c:18:64:d5:ba:3d:
\tdf:ab:7f:cd:42:ac:a7:f1:18:0b:a0:58:15:62:0b:
\ta4:2a:6e:43:c3:e4:04:9f:35:a3:47:8e:46:ed:33:
\ta5:65:bd:bc:3b:29:6e:02:0b:57:df:74:e8:13:b4:
\t37:35:7e:83:5f:20:26:60:a6:dc:ad:8b:c6:6c:79:
\t98:f7
attr p:
\t00:d9:70:06:d8:e2:bc:d4:78:91:50:94:d4:c1:1b:
\t89:38:6c:46:64:5a:51:a0:9a:07:3d:48:8f:03:51:
\tcc:6b:12:8e:7d:1a:b1:65:e7:71:75:39:e0:32:05:
\t75:8d:18:4c:af:93:b1:49:b1:66:5f:78:62:7a:d1:
\t0c:ca:e6:4d:43:b3:9c:f4:6b:7d:e6:0c:98:dc:cf:
\t21:62:8e:d5:2e:12:de:04:ae:d7:24:6e:83:31:a2:
\t15:a2:44:3d:22:a9:62:26:22:b9:b2:ed:54:0a:9d:
\t08:83:a7:07:0d:ff:19:18:8e:d8:ab:1d:da:48:9c:
\t31:68:11:a1:66:6d:e3:d8:1d
attr q:
\t00:fb:44:17:8b:a4:36:be:1e:37:1d:a7:f6:61:6c:
\t04:c4:aa:dd:78:3e:07:8c:1e:33:02:ae:03:14:87:
\t83:7a:e5:9e:7d:08:67:a8:f2:aa:bf:12:70:cf:72:
\ta9:a7:c7:0b:1d:88:d5:20:fd:9c:63:ca:47:30:55:
\t4e:8b:c4:cf:f4:7f:16:a4:92:12:74:a1:09:c2:c4:
\t6e:9c:8c:33:ef:a5:e5:f7:e0:2b:ad:4f:5c:11:aa:
\t1a:84:37:5b:fd:7a:ea:c3:cd:7c:b0:c8:e4:1f:54:
\t63:b5:c7:af:df:f4:09:a7:fc:c7:25:fc:5c:e9:91:
\td7:92:c5:98:1e:56:d3:b1:23
attr u:
\t00:85:4b:1b:7a:9b:12:10:37:9e:1f:ad:5e:da:fe:
\tc6:96:fe:df:35:6b:b9:34:e2:16:97:92:26:09:bd:
\tbd:70:20:03:a7:35:bd:2d:1b:a0:d2:07:47:2b:d4:
\tde:a8:a8:07:07:1b:b8:04:20:a7:27:41:3c:6c:39:
\t39:e9:41:ce:e7:17:1d:d1:4c:5c:bc:3d:d2:26:26:
\tfe:6a:d6:fd:48:72:ae:46:fa:7b:c3:d3:19:60:44:
\t1d:a5:13:a7:80:f5:63:29:d4:7a:5d:06:07:16:5d:
\tf6:8b:3d:cb:64:3a:e2:84:5a:4d:8c:06:2d:2d:9d:
\t1c:eb:83:4c:78:3d:79:54:ce>""",
)
def test_reprPublicRSA(self):
"""
The repr of a L{keys.Key} contains all of the RSA components for an RSA
public key.
"""
self.assertEqual(
repr(keys.Key(self.rsaObj).public()),
"""<RSA Public Key (2048 bits)
attr e:
\t01:00:01
attr n:
\t00:d5:6a:ac:78:23:d6:d6:1b:ec:25:a1:50:c4:77:
\t63:50:84:45:01:55:42:14:2a:2a:e0:d0:60:ee:d4:
\te9:a3:ad:4a:fa:39:06:5e:84:55:75:5f:00:36:bf:
\t6f:aa:2a:3f:83:26:37:c1:69:2e:5b:fd:f0:f3:d2:
\t7d:d6:98:cd:3a:40:78:d5:ca:a8:18:c0:11:93:24:
\t09:0c:81:4c:8f:f7:9c:ed:13:16:6a:a4:04:e9:49:
\t77:c3:e4:55:64:b3:79:68:9e:2c:08:eb:ac:e8:04:
\t2d:21:77:05:a7:8e:ef:53:30:0d:a5:e5:bb:3d:6a:
\te2:09:36:6f:fd:34:d3:7d:6f:46:ff:87:da:a9:29:
\t27:aa:ff:ad:f5:85:e6:3e:1a:b8:7a:1d:4a:b1:ea:
\tc0:5a:f7:30:df:1f:c2:a4:e4:ef:3f:91:49:96:40:
\td5:19:77:2d:37:c3:5e:ec:9d:a6:3a:44:a5:c2:a4:
\t29:dd:d5:ba:9c:3d:45:b3:c6:2c:18:64:d5:ba:3d:
\tdf:ab:7f:cd:42:ac:a7:f1:18:0b:a0:58:15:62:0b:
\ta4:2a:6e:43:c3:e4:04:9f:35:a3:47:8e:46:ed:33:
\ta5:65:bd:bc:3b:29:6e:02:0b:57:df:74:e8:13:b4:
\t37:35:7e:83:5f:20:26:60:a6:dc:ad:8b:c6:6c:79:
\t98:f7>""",
)
def test_reprPublicECDSA(self):
"""
The repr of a L{keys.Key} contains all the OpenSSH format for an ECDSA
public key.
"""
self.assertEqual(
repr(keys.Key(self.ecObj).public()),
dedent(
"""\
<Elliptic Curve Public Key (256 bits)
curve:
\tecdsa-sha2-nistp256
x:
\t{x}
y:
\t{y}>
"""
).format(**keydata.ECDatanistp256),
)
def test_reprPrivateECDSA(self):
"""
The repr of a L{keys.Key} contains all the OpenSSH format for an ECDSA
private key.
"""
self.assertEqual(
repr(keys.Key(self.ecObj)),
dedent(
"""\
<Elliptic Curve Private Key (256 bits)
curve:
\tecdsa-sha2-nistp256
privateValue:
\t{privateValue}
x:
\t{x}
y:
\t{y}>
"""
).format(**keydata.ECDatanistp256),
)
@skipWithoutEd25519
def test_reprPublicEd25519(self):
"""
The repr of a L{keys.Key} contains all the OpenSSH format for an
Ed25519 public key.
"""
self.assertEqual(
repr(keys.Key(self.ed25519Obj).public()),
dedent(
"""\
<Ed25519 Public Key (256 bits)
attr a:
\tf1:16:d1:15:4a:1e:15:0e:19:5e:19:46:b5:f2:44:
\t0d:b2:52:a0:ae:2a:6b:23:13:73:45:fd:40:d9:57:
\t7b:8b>"""
),
)
@skipWithoutEd25519
def test_reprPrivateEd25519(self):
"""
The repr of a L{keys.Key} contains all the OpenSSH format for an
Ed25519 private key.
"""
self.assertEqual(
repr(keys.Key(self.ed25519Obj)),
dedent(
"""\
<Ed25519 Private Key (256 bits)
attr a:
\tf1:16:d1:15:4a:1e:15:0e:19:5e:19:46:b5:f2:44:
\t0d:b2:52:a0:ae:2a:6b:23:13:73:45:fd:40:d9:57:
\t7b:8b
attr k:
\t37:2f:25:da:8d:d4:a8:9a:78:7c:61:f0:98:01:c6:
\tf4:5e:6d:67:05:69:31:37:4c:69:0d:05:55:bb:c9:
\t44:58>"""
),
)
class PersistentRSAKeyTests(unittest.TestCase):
"""
Tests for L{keys._getPersistentRSAKey}.
"""
if cryptography is None:
skip = skipCryptography
def test_providedArguments(self):
"""
L{keys._getPersistentRSAKey} will put the key in
C{directory}/C{filename}, with the key length of C{keySize}.
"""
tempDir = FilePath(self.mktemp())
keyFile = tempDir.child("mykey.pem")
key = keys._getPersistentRSAKey(keyFile, keySize=512)
self.assertEqual(key.size(), 512)
self.assertTrue(keyFile.exists())
def test_noRegeneration(self):
"""
L{keys._getPersistentRSAKey} will not regenerate the key if the key
already exists.
"""
tempDir = FilePath(self.mktemp())
keyFile = tempDir.child("mykey.pem")
key = keys._getPersistentRSAKey(keyFile, keySize=512)
self.assertEqual(key.size(), 512)
self.assertTrue(keyFile.exists())
keyContent = keyFile.getContent()
# Set the key size to 1024 bits. Since it exists already, it will find
# the 512 bit key, and not generate a 1024 bit key.
key = keys._getPersistentRSAKey(keyFile, keySize=1024)
self.assertEqual(key.size(), 512)
self.assertEqual(keyFile.getContent(), keyContent)
def test_keySizeZero(self):
"""
If the key generated by L{keys.getPersistentRSAKey} is set to None
the key size should then become 0.
"""
tempDir = FilePath(self.mktemp())
keyFile = tempDir.child("mykey.pem")
key = keys._getPersistentRSAKey(keyFile, keySize=512)
key._keyObject = None
self.assertEqual(key.size(), 0)
| 38.278287
| 88
| 0.644691
|
53b6b877edaa0302cbb38a7e461542f8813e7855
| 24,757
|
py
|
Python
|
tests/conftest.py
|
sti0/homeassistant-core
|
e4a613a4b33a50ecab7d0ac2c0577aecc94e75da
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
sti0/homeassistant-core
|
e4a613a4b33a50ecab7d0ac2c0577aecc94e75da
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
sti0/homeassistant-core
|
e4a613a4b33a50ecab7d0ac2c0577aecc94e75da
|
[
"Apache-2.0"
] | null | null | null |
"""Set up some common test helper things."""
from __future__ import annotations
import asyncio
from collections.abc import AsyncGenerator
import functools
import logging
import socket
import ssl
import threading
from unittest.mock import AsyncMock, MagicMock, Mock, patch
from aiohttp.test_utils import make_mocked_request
import freezegun
import multidict
import pytest
import pytest_socket
import requests_mock as _requests_mock
from homeassistant import core as ha, loader, runner, util
from homeassistant.auth.const import GROUP_ID_ADMIN, GROUP_ID_READ_ONLY
from homeassistant.auth.models import Credentials
from homeassistant.auth.providers import homeassistant, legacy_api_password
from homeassistant.components import mqtt, recorder
from homeassistant.components.websocket_api.auth import (
TYPE_AUTH,
TYPE_AUTH_OK,
TYPE_AUTH_REQUIRED,
)
from homeassistant.components.websocket_api.http import URL
from homeassistant.const import HASSIO_USER_NAME
from homeassistant.core import CoreState, HomeAssistant
from homeassistant.helpers import config_entry_oauth2_flow
from homeassistant.helpers.typing import ConfigType
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util, location
from tests.ignore_uncaught_exceptions import IGNORE_UNCAUGHT_EXCEPTIONS
pytest.register_assert_rewrite("tests.common")
from tests.common import ( # noqa: E402, isort:skip
CLIENT_ID,
INSTANCES,
MockConfigEntry,
MockUser,
SetupRecorderInstanceT,
async_fire_mqtt_message,
async_init_recorder_component,
async_test_home_assistant,
get_test_home_assistant,
init_recorder_component,
mock_storage as mock_storage,
)
from tests.test_util.aiohttp import mock_aiohttp_client # noqa: E402, isort:skip
from tests.components.recorder.common import ( # noqa: E402, isort:skip
async_recorder_block_till_done,
)
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO)
asyncio.set_event_loop_policy(runner.HassEventLoopPolicy(False))
# Disable fixtures overriding our beautiful policy
asyncio.set_event_loop_policy = lambda policy: None
def pytest_configure(config):
"""Register marker for tests that log exceptions."""
config.addinivalue_line(
"markers", "no_fail_on_log_exception: mark test to not fail on logged exception"
)
def pytest_runtest_setup():
"""Prepare pytest_socket and freezegun.
pytest_socket:
Throw if tests attempt to open sockets.
allow_unix_socket is set to True because it's needed by asyncio.
Important: socket_allow_hosts must be called before disable_socket, otherwise all
destinations will be allowed.
freezegun:
Modified to include https://github.com/spulec/freezegun/pull/424
"""
pytest_socket.socket_allow_hosts(["127.0.0.1"])
disable_socket(allow_unix_socket=True)
freezegun.api.datetime_to_fakedatetime = ha_datetime_to_fakedatetime
freezegun.api.FakeDatetime = HAFakeDatetime
@pytest.fixture
def socket_disabled(pytestconfig):
"""Disable socket.socket for duration of this test function.
This incorporates changes from https://github.com/miketheman/pytest-socket/pull/76
and hardcodes allow_unix_socket to True because it's not passed on the command line.
"""
socket_was_enabled = socket.socket == pytest_socket._true_socket
disable_socket(allow_unix_socket=True)
yield
if socket_was_enabled:
pytest_socket.enable_socket()
@pytest.fixture
def socket_enabled(pytestconfig):
"""Enable socket.socket for duration of this test function.
This incorporates changes from https://github.com/miketheman/pytest-socket/pull/76
and hardcodes allow_unix_socket to True because it's not passed on the command line.
"""
socket_was_disabled = socket.socket != pytest_socket._true_socket
pytest_socket.enable_socket()
yield
if socket_was_disabled:
disable_socket(allow_unix_socket=True)
def disable_socket(allow_unix_socket=False):
"""Disable socket.socket to disable the Internet. useful in testing.
This incorporates changes from https://github.com/miketheman/pytest-socket/pull/75
"""
class GuardedSocket(socket.socket):
"""socket guard to disable socket creation (from pytest-socket)."""
def __new__(cls, *args, **kwargs):
try:
if len(args) > 0:
is_unix_socket = args[0] == socket.AF_UNIX
else:
is_unix_socket = kwargs.get("family") == socket.AF_UNIX
except AttributeError:
# AF_UNIX not supported on Windows https://bugs.python.org/issue33408
is_unix_socket = False
if is_unix_socket and allow_unix_socket:
return super().__new__(cls, *args, **kwargs)
raise pytest_socket.SocketBlockedError()
socket.socket = GuardedSocket
def ha_datetime_to_fakedatetime(datetime):
"""Convert datetime to FakeDatetime.
Modified to include https://github.com/spulec/freezegun/pull/424.
"""
return freezegun.api.FakeDatetime(
datetime.year,
datetime.month,
datetime.day,
datetime.hour,
datetime.minute,
datetime.second,
datetime.microsecond,
datetime.tzinfo,
fold=datetime.fold,
)
class HAFakeDatetime(freezegun.api.FakeDatetime):
"""Modified to include https://github.com/spulec/freezegun/pull/424."""
@classmethod
def now(cls, tz=None):
"""Return frozen now."""
now = cls._time_to_freeze() or freezegun.api.real_datetime.now()
if tz:
result = tz.fromutc(now.replace(tzinfo=tz))
else:
result = now
# Add the _tz_offset only if it's non-zero to preserve fold
if cls._tz_offset():
result += cls._tz_offset()
return ha_datetime_to_fakedatetime(result)
def check_real(func):
"""Force a function to require a keyword _test_real to be passed in."""
@functools.wraps(func)
async def guard_func(*args, **kwargs):
real = kwargs.pop("_test_real", None)
if not real:
raise Exception(
'Forgot to mock or pass "_test_real=True" to %s', func.__name__
)
return await func(*args, **kwargs)
return guard_func
# Guard a few functions that would make network connections
location.async_detect_location_info = check_real(location.async_detect_location_info)
util.get_local_ip = lambda: "127.0.0.1"
@pytest.fixture(autouse=True)
def verify_cleanup():
"""Verify that the test has cleaned up resources correctly."""
threads_before = frozenset(threading.enumerate())
yield
if len(INSTANCES) >= 2:
count = len(INSTANCES)
for inst in INSTANCES:
inst.stop()
pytest.exit(f"Detected non stopped instances ({count}), aborting test run")
threads = frozenset(threading.enumerate()) - threads_before
assert not threads
@pytest.fixture(autouse=True)
def bcrypt_cost():
"""Run with reduced rounds during tests, to speed up uses."""
import bcrypt
gensalt_orig = bcrypt.gensalt
def gensalt_mock(rounds=12, prefix=b"2b"):
return gensalt_orig(4, prefix)
bcrypt.gensalt = gensalt_mock
yield
bcrypt.gensalt = gensalt_orig
@pytest.fixture
def hass_storage():
"""Fixture to mock storage."""
with mock_storage() as stored_data:
yield stored_data
@pytest.fixture
def load_registries():
"""Fixture to control the loading of registries when setting up the hass fixture.
To avoid loading the registries, tests can be marked with:
@pytest.mark.parametrize("load_registries", [False])
"""
return True
@pytest.fixture
def hass(loop, load_registries, hass_storage, request):
"""Fixture to provide a test instance of Home Assistant."""
orig_tz = dt_util.DEFAULT_TIME_ZONE
def exc_handle(loop, context):
"""Handle exceptions by rethrowing them, which will fail the test."""
# Most of these contexts will contain an exception, but not all.
# The docs note the key as "optional"
# See https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.call_exception_handler
if "exception" in context:
exceptions.append(context["exception"])
else:
exceptions.append(
Exception(
"Received exception handler without exception, but with message: %s"
% context["message"]
)
)
orig_exception_handler(loop, context)
exceptions = []
hass = loop.run_until_complete(async_test_home_assistant(loop, load_registries))
orig_exception_handler = loop.get_exception_handler()
loop.set_exception_handler(exc_handle)
yield hass
loop.run_until_complete(hass.async_stop(force=True))
# Restore timezone, it is set when creating the hass object
dt_util.DEFAULT_TIME_ZONE = orig_tz
for ex in exceptions:
if (
request.module.__name__,
request.function.__name__,
) in IGNORE_UNCAUGHT_EXCEPTIONS:
continue
raise ex
@pytest.fixture
async def stop_hass():
"""Make sure all hass are stopped."""
orig_hass = ha.HomeAssistant
created = []
def mock_hass():
hass_inst = orig_hass()
created.append(hass_inst)
return hass_inst
with patch("homeassistant.core.HomeAssistant", mock_hass):
yield
for hass_inst in created:
if hass_inst.state == ha.CoreState.stopped:
continue
with patch.object(hass_inst.loop, "stop"):
await hass_inst.async_block_till_done()
await hass_inst.async_stop(force=True)
@pytest.fixture
def requests_mock():
"""Fixture to provide a requests mocker."""
with _requests_mock.mock() as m:
yield m
@pytest.fixture
def aioclient_mock():
"""Fixture to mock aioclient calls."""
with mock_aiohttp_client() as mock_session:
yield mock_session
@pytest.fixture
def mock_device_tracker_conf():
"""Prevent device tracker from reading/writing data."""
devices = []
async def mock_update_config(path, id, entity):
devices.append(entity)
with patch(
"homeassistant.components.device_tracker.legacy"
".DeviceTracker.async_update_config",
side_effect=mock_update_config,
), patch(
"homeassistant.components.device_tracker.legacy.async_load_config",
side_effect=lambda *args: devices,
):
yield devices
@pytest.fixture
async def hass_admin_credential(hass, local_auth):
"""Provide credentials for admin user."""
return Credentials(
id="mock-credential-id",
auth_provider_type="homeassistant",
auth_provider_id=None,
data={"username": "admin"},
is_new=False,
)
@pytest.fixture
async def hass_access_token(hass, hass_admin_user, hass_admin_credential):
"""Return an access token to access Home Assistant."""
await hass.auth.async_link_user(hass_admin_user, hass_admin_credential)
refresh_token = await hass.auth.async_create_refresh_token(
hass_admin_user, CLIENT_ID, credential=hass_admin_credential
)
return hass.auth.async_create_access_token(refresh_token)
@pytest.fixture
def hass_owner_user(hass, local_auth):
"""Return a Home Assistant admin user."""
return MockUser(is_owner=True).add_to_hass(hass)
@pytest.fixture
def hass_admin_user(hass, local_auth):
"""Return a Home Assistant admin user."""
admin_group = hass.loop.run_until_complete(
hass.auth.async_get_group(GROUP_ID_ADMIN)
)
return MockUser(groups=[admin_group]).add_to_hass(hass)
@pytest.fixture
def hass_read_only_user(hass, local_auth):
"""Return a Home Assistant read only user."""
read_only_group = hass.loop.run_until_complete(
hass.auth.async_get_group(GROUP_ID_READ_ONLY)
)
return MockUser(groups=[read_only_group]).add_to_hass(hass)
@pytest.fixture
def hass_read_only_access_token(hass, hass_read_only_user, local_auth):
"""Return a Home Assistant read only user."""
credential = Credentials(
id="mock-readonly-credential-id",
auth_provider_type="homeassistant",
auth_provider_id=None,
data={"username": "readonly"},
is_new=False,
)
hass_read_only_user.credentials.append(credential)
refresh_token = hass.loop.run_until_complete(
hass.auth.async_create_refresh_token(
hass_read_only_user, CLIENT_ID, credential=credential
)
)
return hass.auth.async_create_access_token(refresh_token)
@pytest.fixture
def hass_supervisor_user(hass, local_auth):
"""Return the Home Assistant Supervisor user."""
admin_group = hass.loop.run_until_complete(
hass.auth.async_get_group(GROUP_ID_ADMIN)
)
return MockUser(
name=HASSIO_USER_NAME, groups=[admin_group], system_generated=True
).add_to_hass(hass)
@pytest.fixture
def hass_supervisor_access_token(hass, hass_supervisor_user, local_auth):
"""Return a Home Assistant Supervisor access token."""
refresh_token = hass.loop.run_until_complete(
hass.auth.async_create_refresh_token(hass_supervisor_user)
)
return hass.auth.async_create_access_token(refresh_token)
@pytest.fixture
def legacy_auth(hass):
"""Load legacy API password provider."""
prv = legacy_api_password.LegacyApiPasswordAuthProvider(
hass,
hass.auth._store,
{"type": "legacy_api_password", "api_password": "test-password"},
)
hass.auth._providers[(prv.type, prv.id)] = prv
return prv
@pytest.fixture
def local_auth(hass):
"""Load local auth provider."""
prv = homeassistant.HassAuthProvider(
hass, hass.auth._store, {"type": "homeassistant"}
)
hass.loop.run_until_complete(prv.async_initialize())
hass.auth._providers[(prv.type, prv.id)] = prv
return prv
@pytest.fixture
def hass_client(hass, aiohttp_client, hass_access_token, socket_enabled):
"""Return an authenticated HTTP client."""
async def auth_client():
"""Return an authenticated client."""
return await aiohttp_client(
hass.http.app, headers={"Authorization": f"Bearer {hass_access_token}"}
)
return auth_client
@pytest.fixture
def hass_client_no_auth(hass, aiohttp_client, socket_enabled):
"""Return an unauthenticated HTTP client."""
async def client():
"""Return an authenticated client."""
return await aiohttp_client(hass.http.app)
return client
@pytest.fixture
def current_request():
"""Mock current request."""
with patch("homeassistant.components.http.current_request") as mock_request_context:
mocked_request = make_mocked_request(
"GET",
"/some/request",
headers={"Host": "example.com"},
sslcontext=ssl.SSLContext(ssl.PROTOCOL_TLS),
)
mock_request_context.get.return_value = mocked_request
yield mock_request_context
@pytest.fixture
def current_request_with_host(current_request):
"""Mock current request with a host header."""
new_headers = multidict.CIMultiDict(current_request.get.return_value.headers)
new_headers[config_entry_oauth2_flow.HEADER_FRONTEND_BASE] = "https://example.com"
current_request.get.return_value = current_request.get.return_value.clone(
headers=new_headers
)
@pytest.fixture
def hass_ws_client(aiohttp_client, hass_access_token, hass, socket_enabled):
"""Websocket client fixture connected to websocket server."""
async def create_client(hass=hass, access_token=hass_access_token):
"""Create a websocket client."""
assert await async_setup_component(hass, "websocket_api", {})
client = await aiohttp_client(hass.http.app)
websocket = await client.ws_connect(URL)
auth_resp = await websocket.receive_json()
assert auth_resp["type"] == TYPE_AUTH_REQUIRED
if access_token is None:
await websocket.send_json({"type": TYPE_AUTH, "access_token": "incorrect"})
else:
await websocket.send_json({"type": TYPE_AUTH, "access_token": access_token})
auth_ok = await websocket.receive_json()
assert auth_ok["type"] == TYPE_AUTH_OK
# wrap in client
websocket.client = client
return websocket
return create_client
@pytest.fixture(autouse=True)
def fail_on_log_exception(request, monkeypatch):
"""Fixture to fail if a callback wrapped by catch_log_exception or coroutine wrapped by async_create_catching_coro throws."""
if "no_fail_on_log_exception" in request.keywords:
return
def log_exception(format_err, *args):
raise
monkeypatch.setattr("homeassistant.util.logging.log_exception", log_exception)
@pytest.fixture
def mqtt_config():
"""Fixture to allow overriding MQTT config."""
return None
@pytest.fixture
def mqtt_client_mock(hass):
"""Fixture to mock MQTT client."""
mid = 0
def get_mid():
nonlocal mid
mid += 1
return mid
class FakeInfo:
def __init__(self, mid):
self.mid = mid
self.rc = 0
with patch("paho.mqtt.client.Client") as mock_client:
@ha.callback
def _async_fire_mqtt_message(topic, payload, qos, retain):
async_fire_mqtt_message(hass, topic, payload, qos, retain)
mid = get_mid()
mock_client.on_publish(0, 0, mid)
return FakeInfo(mid)
def _subscribe(topic, qos=0):
mid = get_mid()
mock_client.on_subscribe(0, 0, mid)
return (0, mid)
def _unsubscribe(topic):
mid = get_mid()
mock_client.on_unsubscribe(0, 0, mid)
return (0, mid)
mock_client = mock_client.return_value
mock_client.connect.return_value = 0
mock_client.subscribe.side_effect = _subscribe
mock_client.unsubscribe.side_effect = _unsubscribe
mock_client.publish.side_effect = _async_fire_mqtt_message
yield mock_client
@pytest.fixture
async def mqtt_mock(hass, mqtt_client_mock, mqtt_config):
"""Fixture to mock MQTT component."""
if mqtt_config is None:
mqtt_config = {mqtt.CONF_BROKER: "mock-broker", mqtt.CONF_BIRTH_MESSAGE: {}}
await hass.async_block_till_done()
entry = MockConfigEntry(
data=mqtt_config,
domain=mqtt.DOMAIN,
title="Tasmota",
)
entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
mqtt_component_mock = MagicMock(
return_value=hass.data["mqtt"],
spec_set=hass.data["mqtt"],
wraps=hass.data["mqtt"],
)
mqtt_component_mock.conf = hass.data["mqtt"].conf # For diagnostics
mqtt_component_mock._mqttc = mqtt_client_mock
# connected set to True to get a more realistics behavior when subscribing
hass.data["mqtt"].connected = True
hass.data["mqtt"] = mqtt_component_mock
component = hass.data["mqtt"]
component.reset_mock()
return component
@pytest.fixture(autouse=True)
def mock_get_source_ip():
"""Mock network util's async_get_source_ip."""
with patch(
"homeassistant.components.network.util.async_get_source_ip",
return_value="10.10.10.10",
):
yield
@pytest.fixture
def mock_zeroconf():
"""Mock zeroconf."""
with patch("homeassistant.components.zeroconf.HaZeroconf", autospec=True), patch(
"homeassistant.components.zeroconf.HaAsyncServiceBrowser", autospec=True
):
yield
@pytest.fixture
def mock_async_zeroconf(mock_zeroconf):
"""Mock AsyncZeroconf."""
with patch("homeassistant.components.zeroconf.HaAsyncZeroconf") as mock_aiozc:
zc = mock_aiozc.return_value
zc.async_unregister_service = AsyncMock()
zc.async_register_service = AsyncMock()
zc.async_update_service = AsyncMock()
zc.zeroconf.async_wait_for_start = AsyncMock()
zc.zeroconf.done = False
zc.async_close = AsyncMock()
zc.ha_async_close = AsyncMock()
yield zc
@pytest.fixture
def enable_custom_integrations(hass):
"""Enable custom integrations defined in the test dir."""
hass.data.pop(loader.DATA_CUSTOM_COMPONENTS)
@pytest.fixture
def enable_statistics():
"""Fixture to control enabling of recorder's statistics compilation.
To enable statistics, tests can be marked with:
@pytest.mark.parametrize("enable_statistics", [True])
"""
return False
@pytest.fixture
def enable_nightly_purge():
"""Fixture to control enabling of recorder's nightly purge job.
To enable nightly purging, tests can be marked with:
@pytest.mark.parametrize("enable_nightly_purge", [True])
"""
return False
@pytest.fixture
def recorder_config():
"""Fixture to override recorder config.
To override the config, tests can be marked with:
@pytest.mark.parametrize("recorder_config", [{...}])
"""
return None
@pytest.fixture
def hass_recorder(enable_nightly_purge, enable_statistics, hass_storage):
"""Home Assistant fixture with in-memory recorder."""
original_tz = dt_util.DEFAULT_TIME_ZONE
hass = get_test_home_assistant()
nightly = recorder.Recorder.async_nightly_tasks if enable_nightly_purge else None
stats = recorder.Recorder.async_periodic_statistics if enable_statistics else None
with patch(
"homeassistant.components.recorder.Recorder.async_nightly_tasks",
side_effect=nightly,
autospec=True,
), patch(
"homeassistant.components.recorder.Recorder.async_periodic_statistics",
side_effect=stats,
autospec=True,
):
def setup_recorder(config=None):
"""Set up with params."""
init_recorder_component(hass, config)
hass.start()
hass.block_till_done()
hass.data[recorder.DATA_INSTANCE].block_till_done()
return hass
yield setup_recorder
hass.stop()
# Restore timezone, it is set when creating the hass object
dt_util.DEFAULT_TIME_ZONE = original_tz
@pytest.fixture
async def async_setup_recorder_instance(
enable_nightly_purge, enable_statistics
) -> AsyncGenerator[SetupRecorderInstanceT, None]:
"""Yield callable to setup recorder instance."""
async def async_setup_recorder(
hass: HomeAssistant, config: ConfigType | None = None
) -> recorder.Recorder:
"""Setup and return recorder instance.""" # noqa: D401
nightly = (
recorder.Recorder.async_nightly_tasks if enable_nightly_purge else None
)
stats = (
recorder.Recorder.async_periodic_statistics if enable_statistics else None
)
with patch(
"homeassistant.components.recorder.Recorder.async_nightly_tasks",
side_effect=nightly,
autospec=True,
), patch(
"homeassistant.components.recorder.Recorder.async_periodic_statistics",
side_effect=stats,
autospec=True,
):
await async_init_recorder_component(hass, config)
await hass.async_block_till_done()
instance = hass.data[recorder.DATA_INSTANCE]
# The recorder's worker is not started until Home Assistant is running
if hass.state == CoreState.running:
await async_recorder_block_till_done(hass, instance)
return instance
return async_setup_recorder
@pytest.fixture
async def recorder_mock(recorder_config, async_setup_recorder_instance, hass):
"""Fixture with in-memory recorder."""
await async_setup_recorder_instance(hass, recorder_config)
@pytest.fixture
def mock_integration_frame():
"""Mock as if we're calling code from inside an integration."""
correct_frame = Mock(
filename="/home/paulus/homeassistant/components/hue/light.py",
lineno="23",
line="self.light.is_on",
)
with patch(
"homeassistant.helpers.frame.extract_stack",
return_value=[
Mock(
filename="/home/paulus/homeassistant/core.py",
lineno="23",
line="do_something()",
),
correct_frame,
Mock(
filename="/home/paulus/aiohue/lights.py",
lineno="2",
line="something()",
),
],
):
yield correct_frame
| 30.677819
| 129
| 0.690673
|
663763de2c0950be428b9158872503f3982373e9
| 1,319
|
py
|
Python
|
custom_components/zigate/config_flow.py
|
alexeypetrenko/homeassistant-zigate
|
954656db06950ca63cb4f742aaf6123b03aecb25
|
[
"MIT"
] | 60
|
2018-07-11T17:03:15.000Z
|
2021-03-04T09:44:31.000Z
|
custom_components/zigate/config_flow.py
|
alexeypetrenko/homeassistant-zigate
|
954656db06950ca63cb4f742aaf6123b03aecb25
|
[
"MIT"
] | 159
|
2018-07-14T19:37:32.000Z
|
2022-03-20T00:02:45.000Z
|
custom_components/zigate/config_flow.py
|
alexeypetrenko/homeassistant-zigate
|
954656db06950ca63cb4f742aaf6123b03aecb25
|
[
"MIT"
] | 28
|
2018-07-23T13:24:11.000Z
|
2021-06-04T21:36:45.000Z
|
"""Adds config flow for ZiGate."""
from collections import OrderedDict
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_PORT
from .const import DOMAIN
@config_entries.HANDLERS.register(DOMAIN)
class ZiGateConfigFlow(config_entries.ConfigFlow):
"""ZiGate config flow."""
async def async_step_user(self, user_input=None):
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
if self.hass.data.get(DOMAIN):
return self.async_abort(reason="single_instance_allowed")
errors = {}
fields = OrderedDict()
fields[vol.Optional(CONF_PORT)] = str
if user_input is not None:
print(user_input)
return self.async_create_entry(title=user_input.get(CONF_PORT, 'Auto'), data=user_input)
return self.async_show_form(
step_id="user", data_schema=vol.Schema(fields), errors=errors
)
async def async_step_import(self, import_info):
"""Handle a ZiGate config import."""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
print('import ', import_info)
return self.async_create_entry(title="configuration.yaml", data={})
| 33.820513
| 100
| 0.692949
|
0949ca57ede540b8bd84d45a33e8387d4f9db132
| 1,138
|
py
|
Python
|
benchmark/compute/baremetal/baremetal_wrapper.py
|
georgianpartners/automl_benchmark
|
42b641ecdd5d13fc30bed1fb2ef8ea924dea28da
|
[
"MIT"
] | 25
|
2018-08-20T15:00:47.000Z
|
2021-02-25T17:45:46.000Z
|
benchmark/compute/baremetal/baremetal_wrapper.py
|
georgianpartners/automl_benchmark
|
42b641ecdd5d13fc30bed1fb2ef8ea924dea28da
|
[
"MIT"
] | 2
|
2019-01-29T17:33:24.000Z
|
2020-03-13T00:39:55.000Z
|
benchmark/compute/baremetal/baremetal_wrapper.py
|
georgian-io/automl_benchmark
|
42b641ecdd5d13fc30bed1fb2ef8ea924dea28da
|
[
"MIT"
] | 5
|
2019-02-11T10:05:34.000Z
|
2019-08-23T11:59:13.000Z
|
#!/usr/bin/env python
import os
import boto3
from ...analysis import process
from ...analysis import single_dataset
def execute():
s3_bucket = os.getenv("S3_BUCKET")
s3_folder = os.getenv("S3_FOLDER","")
task = os.getenv("TASK")
s3 = boto3.resource('s3')
print(task)
print(task[1:-1])
test = task[1:-1].split(",")
print(test)
single_dataset(test[2], use_cache=True)
results = process(test[1], test[2], test[3], int(test[4]))
csv = (','.join(map(str,results))+'\n').encode("utf-8")
key = (s3_folder+"out/"+"results" + str(test[0]) +".csv")
s3.Bucket(s3_bucket).put_object(Key=key, Body = csv)
key = '{}logs/pass/{}/{}-{}-{}'.format(s3_folder, test[1], test[2], test[3], test[4])
open('status', 'w').write(key)
if __name__ == '__main__':
try:
execute()
except:
s3_bucket = os.getenv("S3_BUCKET")
s3_folder = os.getenv("S3_FOLDER","")
task = os.getenv("TASK")
test = task[1:-1].split(",")
key = '{}logs/fail/{}/{}-{}-{}'.format(s3_folder, test[1], test[2], test[3], test[4])
open('status', 'w').write(key)
| 26.465116
| 93
| 0.571178
|
69fbffe51a498b2ba86c180b8871507fb0e77237
| 1,586
|
py
|
Python
|
aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/QueryOTAJobRequest.py
|
liuzheng/aliyun-openapi-python-sdk
|
1ba6743f3d6f2cef57ec9e3be1754b04293c3150
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/QueryOTAJobRequest.py
|
liuzheng/aliyun-openapi-python-sdk
|
1ba6743f3d6f2cef57ec9e3be1754b04293c3150
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/QueryOTAJobRequest.py
|
liuzheng/aliyun-openapi-python-sdk
|
1ba6743f3d6f2cef57ec9e3be1754b04293c3150
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class QueryOTAJobRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'QueryOTAJob')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_JobId(self):
return self.get_query_params().get('JobId')
def set_JobId(self,JobId):
self.add_query_param('JobId',JobId)
def get_IotInstanceId(self):
return self.get_query_params().get('IotInstanceId')
def set_IotInstanceId(self,IotInstanceId):
self.add_query_param('IotInstanceId',IotInstanceId)
| 36.045455
| 74
| 0.764817
|
65c668142891d6dd19f5242cd081b1d232feded3
| 1,313
|
py
|
Python
|
01-code/01-ecmwf-py/b-get-era-int-daily-by-month.py
|
sebaki/climate-jet-stream
|
6b07e4327a0c2edee13c112b2e6379032ff23042
|
[
"MIT"
] | null | null | null |
01-code/01-ecmwf-py/b-get-era-int-daily-by-month.py
|
sebaki/climate-jet-stream
|
6b07e4327a0c2edee13c112b2e6379032ff23042
|
[
"MIT"
] | null | null | null |
01-code/01-ecmwf-py/b-get-era-int-daily-by-month.py
|
sebaki/climate-jet-stream
|
6b07e4327a0c2edee13c112b2e6379032ff23042
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env py27
from ecmwfapi import ECMWFDataServer
import numpy as np
import datetime as dt
import calendar
server = ECMWFDataServer()
dir = "/automount/agh/Projects/skiefer/"
year_first = 2016
year_last = 2016
years_vec = np.arange(year_first, (year_last + 1))
months_vec = np.arange(1, 13)
for i in range(len(years_vec)):
for ii in range(len(months_vec)):
date_loop_start = dt.date(years_vec[i], months_vec[ii], 1)
month_len = calendar.monthrange(years_vec[i], months_vec[ii])[1]
date_loop_end = dt.date(years_vec[i], months_vec[ii], month_len)
date_loop = str(date_loop_start) + '/to/' + str(date_loop_end)
target_loop = dir + str(date_loop_start.year) + '-' + str(date_loop_start.month) + '-ei.nc'
server.retrieve({
'class': "ei",
'dataset': "interim",
'date': date_loop,
'expver': "1",
'format': "netcdf",
'grid': "1.00/1.00",
'levelist': "100/150/200/250/300/400",
'levtype': "pl",
'param': "129.128/130.128/131.128/132.128/135.128/155.128",
'step': "0",
'stream': "oper",
'target': target_loop,
'time': "00/06/12/18",
'type': "an"
})
print('Done successfully.')
| 28.543478
| 99
| 0.573496
|
3aa813e287c63cc9a6c7fdfeecb9d2fc3315f872
| 12,011
|
py
|
Python
|
data/xueqiu_com/Xueqiu_Rule.py
|
dingyuanhong/Collection
|
43bfae16c93b643d1b09d84f27bfdbe5ef8ed497
|
[
"MIT"
] | 1
|
2021-07-03T13:39:42.000Z
|
2021-07-03T13:39:42.000Z
|
data/xueqiu_com/Xueqiu_Rule.py
|
dingyuanhong/Collection
|
43bfae16c93b643d1b09d84f27bfdbe5ef8ed497
|
[
"MIT"
] | null | null | null |
data/xueqiu_com/Xueqiu_Rule.py
|
dingyuanhong/Collection
|
43bfae16c93b643d1b09d84f27bfdbe5ef8ed497
|
[
"MIT"
] | 1
|
2020-05-29T23:38:22.000Z
|
2020-05-29T23:38:22.000Z
|
import json
def stock_screen_parse(content,res):
values = json.loads(res.text);
stocks = [];
for item in values["list"]:
stocks.append({"symbol":item["symbol"],"name":item["name"]});
return stocks;
def stock_screen_count_parse(content,res):
values = json.loads(res.text);
count = int(values["count"]);
return count;
#exchange
#CN 沪深一览
#type
#kcb 科创板
#股票列表数量
stock_screen_count_rule = {
"param":{
"category":"SH", #类别
"exchange":"", #市场
"areacode":"", #地域
"indcode":"", #板块代码
"orderby":"symbol", #排序字段
"order":"desc", #排序方式
"page":"1",
},
"url":"https://xueqiu.com/stock/screener/screen.json",
"parse":stock_screen_count_parse,
};
#股票列表
stock_screen_rule = {
"param":{
"category":"SH", #类别
"exchange":"", #市场
"areacode":"", #地域
"indcode":"", #板块代码
"orderby":"symbol", #排序字段
"order":"desc", #排序方式
"page":"1",
},
"url":"https://xueqiu.com/stock/screener/screen.json",
"parse":stock_screen_parse,
};
#沪深一览
#https://xueqiu.com/service/v5/stock/screener/quote/list?page=1&size=90&order=desc&orderby=percent&order_by=percent&market=CN&type=sh_sz&_=1582548769209
#泸A
#https://xueqiu.com/service/v5/stock/screener/quote/list?type=sha&order_by=percent&order=desc&size=10&page=1&_=1582687522770
#泸B
#https://xueqiu.com/service/v5/stock/screener/quote/list?type=shb&order_by=percent&order=desc&size=10&page=1&_=1582687401721
#深A
#https://xueqiu.com/service/v5/stock/screener/quote/list?type=sza&order_by=percent&order=desc&size=10&page=1&_=1582687544665
#深B
#https://xueqiu.com/service/v5/stock/screener/quote/list?type=szb&order_by=percent&order=desc&size=10&page=1&_=1582687564228
#创业板
#https://xueqiu.com/service/v5/stock/screener/quote/list?type=cyb&order_by=percent&order=desc&size=10&page=1&_=1582687583601
#中小板
#https://xueqiu.com/service/v5/stock/screener/quote/list?type=zxb&order_by=percent&order=desc&size=10&page=1&_=1582687605058
#可转债
#https://xueqiu.com/service/v5/stock/screener/quote/list?page=1&size=90&order=desc&orderby=percent&order_by=percent&exchange=CN&market=CN&industry=%E5%8F%AF%E8%BD%AC%E5%80%BA&type=convert&_=1582549648517
#国债
#https://xueqiu.com/service/v5/stock/screener/quote/list?page=1&size=90&order=desc&orderby=percent&order_by=percent&exchange=CN&market=CN&industry=%E5%9B%BD%E5%80%BA&type=national&_=1582549901964
#企业债
#https://xueqiu.com/service/v5/stock/screener/quote/list?page=1&size=90&order=desc&orderby=percent&order_by=percent&exchange=CN&market=CN&industry=%E4%BC%81%E5%80%BA&type=corp&_=1582549982655
#港股一览
#https://xueqiu.com/service/v5/stock/screener/quote/list?page=1&size=90&order=desc&orderby=percent&order_by=percent&market=HK&type=hk&is_delay=true&_=1582548721702
#美股一览
#https://xueqiu.com/service/v5/stock/screener/quote/list?page=1&size=90&order=desc&orderby=percent&order_by=percent&market=US&type=us&_=1582548971877
#美股明星股
#https://xueqiu.com/service/v5/stock/screener/quote/list?page=1&size=90&order=desc&orderby=percent&order_by=percent&market=US&type=us_star&_=1582549256747
#美股中概股
#https://xueqiu.com/service/v5/stock/screener/quote/list?page=1&size=90&order=desc&orderby=percent&order_by=percent&market=US&type=us_china&_=1582549362835
def stock_quote_parse(content,res):
data = json.loads(res.text)["data"];
count = 0;
if "count" in data:
count = int(data["count"]);
if count > 0:
return data["list"];
return [];
def stock_quote_count_parse(content,res):
data = json.loads(res.text)["data"];
count = 0;
if "count" in data:
count = int(data["count"]);
return count;
#股票列表
#沪深一览 type:sh_sz market:CN
#沪深科创版 type:kcb market:CN
#可转债 type:convert market:CN industry=%E5%8F%AF%E8%BD%AC%E5%80%BA
#国债 type:national market:CN industry=%E5%8F%AF%E8%BD%AC%E5%80%BA
#企业债 type:corp market:CN industry=%E4%BC%81%E5%80%BA
#港股一览 type:hk market:HK 有is_delay
#美股一览 type:us market:US
#美股明星股 type:us_star market:US
#美股中概股 type:us_china market:US
quote_list_rule = {
"param":{
"type":"", #类别
"market":"", #市场
"exchange":{
"use":"market=CN",
"status":"可选",
}, #可选,A股需要
"industry":{
"use":"market=CN",
"status":"可选",
},
"is_delay":{
"use":"type:hk;market:HK",
"status":"可选",
}, #可选,港股需要
"order_by":"symbol",
"orderby":"symbol", #排序字段
"order":"desc", #排序方式
"page":"1",
"size":"90",
},
"url":"https://xueqiu.com/service/v5/stock/screener/quote/list",
"parse":stock_quote_parse,
};
quote_list_count_rule = {
"param":{
"type":"", #类别
"market":"", #市场
"exchange":{
"use":"market=CN",
"status":"可选",
}, #可选,A股需要
"industry":{
"use":"market=CN",
"status":"可选",
},
"is_delay":{
"use":"type:hk;market:HK",
"status":"可选",
}, #可选,港股需要
"order_by":"symbol",
"orderby":"symbol", #排序字段
"order":"desc", #排序方式
"page":"1",
"size":"90",
},
"url":"https://xueqiu.com/service/v5/stock/screener/quote/list",
"parse":stock_quote_count_parse,
};
from bs4 import BeautifulSoup
def ind_code_parse(content,res):
#index = 0; #沪深板块
#index = 2; #港股板块
#index = 3; #美股板块
index = content["index"];
soup = BeautifulSoup(res.text);
divs = soup.find_all("div", class_="third-nav");
a = divs[index].find_all("a");
ind_codes = [];
for it in a:
ind_codes.append({
"name":it.attrs["title"],
"ind_code":it.attrs["data-level2code"],
});
return ind_codes;
#沪深板块
ind_code_rule = {
"url":"https://xueqiu.com/hq",
"parse":ind_code_parse,
};
#板块
#沪深板块 market:CN exchange:CN
#港股板块 market:HK exchange:HK
#美股板块 market:US exchange:US
ind_code_quote_list_rule = {
"param":{
"ind_code":"", #板块
"market":"", #市场
"exchange":"",
"order_by":"symbol",
"orderby":"symbol", #排序字段
"order":"desc", #排序方式
"page":"1",
"size":"90",
},
"url":"https://xueqiu.com/service/v5/stock/screener/quote/list",
"parse":stock_screen_parse,
};
#分钟线
#period: 1m 5m 15m 30m 60m 120m day week month quarter year
stock_kline_rule = {
"param":{
"symbol":"", #股票编号
"period":"5m",
"count":"-142",
"begin":"1591716218251",
"type":"before",
"indicator":"kline,pe,pb,ps,pcf,market_capital,agt,ggt,balance",
},
"url":"https://stock.xueqiu.com/v5/stock/chart/kline.json",
"header":{
"Host": "stock.xueqiu.com",
}
};
#当日分时线(实时获取)
#period: 1d 5d (分时 5日)
stock_minute_rule = {
"param":{
"symbol":"", #股票编号
"period":"1d",
},
"url":"https://stock.xueqiu.com/v5/stock/chart/minute.json",
"header":{
"Host": "stock.xueqiu.com",
}
};
#活动日个股详情
stock_quote_rule = {
"param":{
"symbol":"", #股票编号
"extend":"detail",
},
"url":"https://stock.xueqiu.com/v5/stock/quote.json",
};
#公司简介
stock_compinfo_rule = {
"param":{
"symbol":"", #股票编号
},
"url":"https://xueqiu.com/stock/f10/compinfo.json",
};
#公司董事
stock_skholder_rule = {
"param":{
"symbol":"", #股票编号
},
"url":"https://stock.xueqiu.com/v5/stock/f10/cn/skholder.json",
};
#高管增坚持
stock_skholderchg_rule = {
"param":{
"symbol":"", #股票编号
"extend":"true",
"page":"1",
"size":"10",
},
"url":"https://stock.xueqiu.com/v5/stock/f10/cn/skholderchg.json",
};
#内部交易
# stock_skholderchg_rule = {
# "param":{
# "symbol":"", #股票编号
# "extend":"true",
# "page":"1",
# "size":"10",
# },
# "url":"https://stock.xueqiu.com/v5/stock/f10/cn/skholderchg.json",
# };
#股东人数
stock_holders_rule = {
"param":{
"symbol":"", #股票编号
"extend":"true",
"page":"1",
"size":"10",
},
"url":"https://stock.xueqiu.com/v5/stock/f10/cn/holders.json",
};
#限售解禁
stock_shareschg_rule = {
"param":{
"symbol":"", #股票编号
"extend":"true",
"type":"restricts",
"page":"1",
"size":"10",
},
"url":"https://stock.xueqiu.com/v5/stock/f10/cn/shareschg.json",
};
#分红增配
stock_bonus_rule = {
"param":{
"symbol":"", #股票编号
"size":"10",
"page":"1",
},
"url":"https://xueqiu.com/stock/f10/bonus.json",
};
#增发一览
stock_cn_bonus_rule = {
"param":{
"symbol":"", #股票编号
"size":"10",
"page":"1",
"extend":"true"
},
"url":"https://stock.xueqiu.com/v5/stock/f10/cn/bonus.json",
};
#主要指标
stock_indicator_rule = {
"param":{
"symbol":"", #股票编号
"type":"Q4",
"is_detail":"true",
"count":"5",
"timestamp":"",
},
"url":"https://stock.xueqiu.com/v5/stock/f10/cn/shareschg.json",
};
#现金流量表
cash_flow_rule = {
"param":{
"symbol":"", #股票编号
"type":"all",
"is_detail":"true",
"count":"5",
},
"url":"https://stock.xueqiu.com/v5/stock/finance/cn/cash_flow.json",
"header":{
"Host": "stock.xueqiu.com",
}
};
#资产负债表
balance_rule = {
"param":{
"symbol":"", #股票编号
"type":"all",
"is_detail":"true",
"count":"5",
},
"url":"https://stock.xueqiu.com/v5/stock/finance/cn/balance.json",
"header":{
"Host": "stock.xueqiu.com",
}
};
#利润表
income_rule = {
"param":{
"symbol":"", #股票编号
"type":"all",
"is_detail":"true",
"count":"5",
},
"url":"https://stock.xueqiu.com/v5/stock/finance/cn/income.json",
"header":{
"Host": "stock.xueqiu.com",
}
};
#雪球规则
Xueqiu_Rules = [
{
"name":"股票列表",
"type":"stocks_list",
"rule":quote_list_rule
},
{
"name":"股票数量",
"type":"stocks_list_count",
"rule":quote_list_count_rule
},
{
"name":"沪深板块列表",
"type":"sh_sz_inc_code_list",
"index":0,
"rule":ind_code_rule,
},
{
"name":"港股板块列表",
"type":"hk_inc_code_list",
"index":1,
"rule":ind_code_rule,
},
{
"name":"美股板块列表",
"type":"us_inc_code_list",
"index":3,
"rule":ind_code_rule,
},
{
"name":"板块股票列表",
"type":"ind_code_stock_list",
"rule":ind_code_quote_list_rule,
},
{
"name":"K线",
"type":"kline",
"rule":stock_kline_rule,
},
{
"name":"分时线",
"type":"minute",
"rule":stock_minute_rule,
},
{
"name":"当日K线",
"type":"stock",
"rule":stock_quote_rule,
},
{
"name":"公司简介",
"type":"compinfo",
"rule":stock_compinfo_rule,
},
{
"name":"公司董事",
"type":"skholder",
"rule":stock_skholder_rule,
},
{
"name":"高管增坚持",
"type":"skholderchg",
"rule":stock_skholderchg_rule,
},
{
"name":"股东人数",
"type":"holders",
"rule":stock_holders_rule,
},
{
"name":"限售解禁",
"type":"shareschg",
"rule":stock_shareschg_rule,
},
{
"name":"分红增配",
"type":"bonus",
"rule":stock_bonus_rule,
},
{
"name":"增发一览",
"type":"cn_bonus",
"rule":stock_cn_bonus_rule,
},
{
"name":"主要指标",
"type":"indicator",
"rule":stock_indicator_rule,
},
{
"name":"现金流量表",
"type":"cash_flow",
"rule":cash_flow_rule,
},
{
"name":"资产负债表",
"type":"balance",
"rule":balance_rule,
},
{
"name":"利润表",
"type":"income",
"rule":income_rule,
},
];
| 24.313765
| 203
| 0.551245
|
6e23d34c24a2bb1cdacc27dfad4921931fce317f
| 36,484
|
py
|
Python
|
tests/unit/fake_data_root/vault/var/lib/juju/agents/unit-vault-0/charm/hooks/relations/hacluster/interface_hacluster/common.py
|
KellenRenshaw/hotsos
|
e3fc51ab7f8af606a5846a3486a7fda23d761583
|
[
"Apache-2.0"
] | 6
|
2021-10-01T19:46:14.000Z
|
2022-03-31T17:05:08.000Z
|
tests/unit/fake_data_root/vault/var/lib/juju/agents/unit-vault-0/charm/hooks/relations/hacluster/interface_hacluster/common.py
|
KellenRenshaw/hotsos
|
e3fc51ab7f8af606a5846a3486a7fda23d761583
|
[
"Apache-2.0"
] | 111
|
2021-10-01T18:18:17.000Z
|
2022-03-29T12:23:20.000Z
|
tests/unit/fake_data_root/vault/var/lib/juju/agents/unit-vault-0/charm/hooks/relations/hacluster/interface_hacluster/common.py
|
KellenRenshaw/hotsos
|
e3fc51ab7f8af606a5846a3486a7fda23d761583
|
[
"Apache-2.0"
] | 10
|
2021-09-29T14:47:54.000Z
|
2022-03-18T14:52:16.000Z
|
#!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import ipaddress
import json
class ResourceManagement():
def data_changed(self, data_id, data, hash_type='md5'):
raise NotImplementedError
def get_local(self, key, default=None, scope=None):
raise NotImplementedError
def set_local(self, key=None, value=None, data=None, scope=None, **kwdata):
raise NotImplementedError
def set_remote(self, key=None, value=None, data=None, scope=None,
**kwdata):
raise NotImplementedError
def is_clustered(self):
"""Has the hacluster charm set clustered?
The hacluster charm sets cluster=True when it determines it is ready.
Check the relation data for clustered and force a boolean return.
:returns: boolean
"""
clustered_values = self.get_remote_all('clustered')
if clustered_values:
# There is only ever one subordinate hacluster unit
clustered = clustered_values[0]
# Future versions of hacluster will return a bool
# Current versions return a string
if type(clustered) is bool:
return clustered
elif (clustered is not None and
(clustered.lower() == 'true' or
clustered.lower() == 'yes')):
return True
return False
def bind_on(self, iface=None, mcastport=None):
relation_data = {}
if iface:
relation_data['corosync_bindiface'] = iface
if mcastport:
relation_data['corosync_mcastport'] = mcastport
if relation_data and self.data_changed('hacluster-bind_on',
relation_data):
self.set_local(**relation_data)
self.set_remote(**relation_data)
def manage_resources(self, crm):
"""
Request for the hacluster to manage the resources defined in the
crm object.
res = CRM()
res.primitive('res_neutron_haproxy', 'lsb:haproxy',
op='monitor interval="5s"')
res.init_services('haproxy')
res.clone('cl_nova_haproxy', 'res_neutron_haproxy')
hacluster.manage_resources(crm)
:param crm: CRM() instance - Config object for Pacemaker resources
:returns: None
"""
relation_data = {
'json_{}'.format(k): json.dumps(v, sort_keys=True)
for k, v in crm.items()
}
if self.data_changed('hacluster-manage_resources', relation_data):
self.set_local(**relation_data)
self.set_remote(**relation_data)
def bind_resources(self, iface=None, mcastport=None):
"""Inform the ha subordinate about each service it should manage. The
child class specifies the services via self.ha_resources
:param iface: string - Network interface to bind to
:param mcastport: int - Multicast port corosync should use for cluster
management traffic
"""
if mcastport is None:
mcastport = 4440
resources_dict = self.get_local('resources')
self.bind_on(iface=iface, mcastport=mcastport)
if resources_dict:
resources = CRM(**resources_dict)
self.manage_resources(resources)
def delete_resource(self, resource_name):
resource_dict = self.get_local('resources')
if resource_dict:
resources = CRM(**resource_dict)
else:
resources = CRM()
resources.add_delete_resource(resource_name)
self.set_local(resources=resources)
def add_vip(self, name, vip, iface=None, netmask=None):
"""Add a VirtualIP object for each user specified vip to self.resources
:param name: string - Name of service
:param vip: string - Virtual IP to be managed
:param iface: string - Network interface to bind vip to
:param netmask: string - Netmask for vip
:returns: None
"""
resource_dict = self.get_local('resources')
if resource_dict:
resources = CRM(**resource_dict)
else:
resources = CRM()
resources.add(
VirtualIP(
name,
vip,
nic=iface,
cidr=netmask,))
# Vip Group
group = 'grp_{}_vips'.format(name)
vip_res_group_members = []
if resource_dict:
vip_resources = resource_dict.get('resources')
if vip_resources:
for vip_res in vip_resources:
if 'vip' in vip_res:
vip_res_group_members.append(vip_res)
resources.group(group,
*sorted(vip_res_group_members))
self.set_local(resources=resources)
def remove_vip(self, name, vip, iface=None):
"""Remove a virtual IP
:param name: string - Name of service
:param vip: string - Virtual IP
:param iface: string - Network interface vip bound to
"""
if iface:
nic_name = iface
else:
nic_name = hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7]
self.delete_resource('res_{}_{}_vip'.format(name, nic_name))
def add_init_service(self, name, service, clone=True):
"""Add a InitService object for haproxy to self.resources
:param name: string - Name of service
:param service: string - Name service uses in init system
:returns: None
"""
resource_dict = self.get_local('resources')
if resource_dict:
resources = CRM(**resource_dict)
else:
resources = CRM()
resources.add(
InitService(name, service, clone))
self.set_local(resources=resources)
def remove_init_service(self, name, service):
"""Remove an init service
:param name: string - Name of service
:param service: string - Name of service used in init system
"""
res_key = 'res_{}_{}'.format(
name.replace('-', '_'),
service.replace('-', '_'))
self.delete_resource(res_key)
def add_systemd_service(self, name, service, clone=True):
"""Add a SystemdService object to self.resources
:param name: string - Name of service
:param service: string - Name service uses in systemd
:returns: None
"""
resource_dict = self.get_local('resources')
if resource_dict:
resources = CRM(**resource_dict)
else:
resources = CRM()
resources.add(
SystemdService(name, service, clone))
self.set_local(resources=resources)
def remove_systemd_service(self, name, service):
"""Remove a systemd service
:param name: string - Name of service
:param service: string - Name of service used in systemd
"""
res_key = 'res_{}_{}'.format(
name.replace('-', '_'),
service.replace('-', '_'))
self.delete_resource(res_key)
def add_dnsha(self, name, ip, fqdn, endpoint_type):
"""Add a DNS entry to self.resources
:param name: string - Name of service
:param ip: string - IP address dns entry should resolve to
:param fqdn: string - The DNS entry name
:param endpoint_type: string - Public, private, internal etc
:returns: None
"""
resource_dict = self.get_local('resources')
if resource_dict:
resources = CRM(**resource_dict)
else:
resources = CRM()
resources.add(
DNSEntry(name, ip, fqdn, endpoint_type))
# DNS Group
group = 'grp_{}_hostnames'.format(name)
dns_res_group_members = []
if resource_dict:
dns_resources = resource_dict.get('resources')
if dns_resources:
for dns_res in dns_resources:
if 'hostname' in dns_res:
dns_res_group_members.append(dns_res)
resources.group(group,
*sorted(dns_res_group_members))
self.set_local(resources=resources)
def remove_dnsha(self, name, endpoint_type):
"""Remove a DNS entry
:param name: string - Name of service
:param endpoint_type: string - Public, private, internal etc
:returns: None
"""
res_key = 'res_{}_{}_hostname'.format(
self.service_name.replace('-', '_'),
self.endpoint_type)
self.delete_resource(res_key)
def add_colocation(self, name, score, colo_resources, node_attribute=None):
"""Add a colocation directive
:param name: string - Name of colocation directive
:param score: string - ALWAYS, INFINITY, NEVER, NEGATIVE_INFINITY}. See
CRM.colocation for more details
:param colo_resources: List[string] - List of resource names to
colocate
:param node_attribute: Colocate resources on a set of nodes with this
attribute and not necessarily on the same node.
"""
node_config = {}
if node_attribute:
node_config = {
'node_attribute': node_attribute}
resource_dict = self.get_local('resources')
if resource_dict:
resources = CRM(**resource_dict)
else:
resources = CRM()
resources.colocation(
name,
score,
*colo_resources,
**node_config)
self.set_local(resources=resources)
def remove_colocation(self, name):
"""Remove a colocation directive
:param name: string - Name of colocation directive
"""
self.delete_resource(name)
def get_remote_all(self, key, default=None):
"""Return a list of all values presented by remote units for key"""
raise NotImplementedError
class CRM(dict):
"""
Configuration object for Pacemaker resources for the HACluster
interface. This class provides access to the supported resources
available in the 'crm configure' within the HACluster.
See Also
--------
More documentation is available regarding the definitions of
primitives, clones, and other pacemaker resources at the crmsh
site at http://crmsh.github.io/man
"""
# Constants provided for ordering constraints (e.g. the kind value)
MANDATORY = "Mandatory"
OPTIONAL = "Optional"
SERIALIZE = "Serialize"
# Constants defining weights of constraints
INFINITY = "inf"
NEG_INFINITY = "-inf"
# Constaints aliased to their interpretations for constraints
ALWAYS = INFINITY
NEVER = NEG_INFINITY
def __init__(self, *args, **kwargs):
self['resources'] = {}
self['delete_resources'] = []
self['resource_params'] = {}
self['groups'] = {}
self['ms'] = {}
self['orders'] = {}
self['colocations'] = {}
self['clones'] = {}
self['locations'] = {}
self['init_services'] = []
self['systemd_services'] = []
super(CRM, self).__init__(*args, **kwargs)
def primitive(self, name, agent, description=None, **kwargs):
"""Configures a primitive resource within Pacemaker.
A primitive is used to describe a resource which should be managed
by the cluster. Primitives consist of a name, the agent type, and
various configuration options to the primitive. For example:
crm.primitive('www8', 'apache',
params='configfile=/etc/apache/www8.conf',
operations='$id-ref=apache_ops')
will create the an apache primitive (resource) for the www8 service
hosted by the Apache HTTP server. The parameters specified can either
be provided individually (e.g. a string) or as an iterable.
The following example shows how to specify multiple ops for a drbd
volume in a Master/Slave configuration::
ops = ['monitor role=Master interval=60s',
'monitor role=Slave interval=300s']
crm.primitive('r0', 'ocf:linbit:drbd',
params='drbd_resource=r0',
op=ops)
Additional arguments may be passed in as kwargs in which the key of
the kwarg is prepended to the value.
Parameters
----------
name: str
the name of the primitive.
agent: str
the type of agent to use to monitor the primitive resource
(e.g. ocf:linbit:drbd).
description: str, optional, kwarg
a description about the resource
params: str or iterable, optional, kwarg
parameters which are provided to the resource agent
meta: str or iterable, optional, kwarg
metadata information for the primitive resource
utilization: str or iterable, optional, kwarg
utilization information for the primitive resource
operations: str or iterable, optional, kwarg
operations information for the primitive resource in id_spec
format (e.g. $id=<id> or $id-ref=<id>)
op: str or iterable, optional, kwarg
op information regarding the primitive resource. This takes the
form of '<start|stop|monitor> [<attr>=<value> <attr>=<value> ...]'
Returns
-------
None
See Also
--------
http://crmsh.github.io/man/#cmdhelp_configure_primitive
"""
resources = self['resources']
resources[name] = agent
specs = ''
if description:
specs = specs + 'description="%s"' % description
# Use the ordering specified in the crm manual
for key in 'params', 'meta', 'utilization', 'operations', 'op':
if key not in kwargs:
continue
specs = specs + (' %s' % self._parse(key, kwargs[key]))
if specs:
self['resource_params'][name] = specs
def _parse(self, prefix, data):
results = ''
if isinstance(data, str):
data = [data]
first = True
for d in data:
if first:
results = results + ' '
first = False
results = results + ('%s %s ' % (prefix, d))
results = results.rstrip()
return results
def clone(self, name, resource, description=None, **kwargs):
"""Creates a resource which should run on all nodes.
Parameters
----------
name: str
the name of the clone
resource: str
the name or id of the resource to clone
description: str, optional
text containing a description for the clone
meta: str or list of str, optional, kwarg
metadata attributes to assign to the clone
params: str or list of str, optional, kwarg
parameters to assign to the clone
Returns
-------
None
See Also
--------
http://crmsh.github.io/man/#cmdhelp_configure_clone
"""
clone_specs = resource
if description:
clone_specs = clone_specs + (' description="%s"' % description)
for key in 'meta', 'params':
if key not in kwargs:
continue
value = kwargs[key]
if not value:
continue
clone_specs = clone_specs + (' %s' % self._parse(key, value))
self['clones'][name] = clone_specs
def colocation(self, name, score=ALWAYS, *resources, **kwargs):
"""Configures the colocation constraints of resources.
Provides placement constraints regarding resources defined within
the cluster. Using the colocate function, resource affinity or
anti-affinity can be defined.
For example, the following code ensures that the nova-console service
always runs where the cluster vip is running:
crm.colocation('console_with_vip', ALWAYS,
'nova-console', 'vip')
The affinity or anti-affinity of resources relationships is be
expressed in the `score` parameter. A positive score indicates that
the resources should run on the same node.A score of INFINITY (or
ALWAYS) will ensure the resources are always run on the same node(s)
and a score of NEG_INFINITY (or NEVER) ensures that the resources are
never run on the same node(s).
crm.colocation('never_apache_with_dummy', NEVER,
'apache', 'dummy')
Any *resources values which are provided are treated as resources which
the colocation constraint applies to. At least two resources must be
defined as part of the ordering constraint.
The resources take the form of <resource_name>[:role]. If the
colocation constraint applies specifically to a role, this information
should be included int he resource supplied.
Parameters
----------
id: str
id or name of the colocation constraint
score: str {ALWAYS, INFINITY, NEVER, NEGATIVE_INFINITY} or int
the score or weight of the colocation constraint. A positive value
will indicate that the resources should run on the same node. A
negative value indicates that the resources should run on separate
nodes.
resources: str or list
the list of resources which the colocation constraint applies to.
node_attribute: str, optional, kwarg
can be used to run the resources on a set of nodes, not just a
single node.
Returns
-------
None
See Also
--------
http://crmsh.github.io/man/#cmdhelp_configure_colocation
"""
specs = '%s: %s' % (score, ' '.join(resources))
if 'node_attribute' in kwargs:
specs = specs + (' node-attribute=%s' % kwargs['node_attribute'])
self['colocations'][name] = specs
def group(self, name, *resources, **kwargs):
"""Creates a group of resources within Pacemaker.
The created group includes the list of resources provided in the list
of resources supplied. For example::
crm.group('grp_mysql', 'res_mysql_rbd', 'res_mysql_fs',
'res_mysql_vip', 'res_mysqld')
will create the 'grp_mysql' resource group consisting of the
res_mysql_rbd, res_mysql_fs, res_mysql_vip, and res_mysqld resources.
Parameters
----------
name: str
the name of the group resource
resources: list of str
the names or ids of resources to include within the group.
description: str, optional, kwarg
text to describe the resource
meta: str or list of str, optional, kwarg
metadata attributes to assign to the group
params: str or list of str, optional, kwarg
parameters to assign to the group
Returns
-------
None
See Also
--------
http://crmsh.github.io/man/#cmdhelp_configure_group
"""
specs = ' '.join(resources)
if 'description' in kwargs:
specs = specs + (' description=%s"' % kwargs['description'])
for key in 'meta', 'params':
if key not in kwargs:
continue
value = kwargs[key]
specs = specs + (' %s' % self._parse(key, value))
self['groups'][name] = specs
def remove_deleted_resources(self):
"""Work through the existing resources and remove any mention of ones
which have been marked for deletion."""
for res in self['delete_resources']:
for key in self.keys():
if key == 'delete_resources':
continue
if isinstance(self[key], dict) and res in self[key].keys():
del self[key][res]
elif isinstance(self[key], list) and res in self[key]:
self[key].remove(res)
elif isinstance(self[key], tuple) and res in self[key]:
self[key] = tuple(x for x in self[key] if x != res)
def delete_resource(self, *resources):
"""Specify objects/resources to be deleted from within Pacemaker. This
is not additive, the list of resources is set to exaclty what was
passed in.
Parameters
----------
resources: str or list
the name or id of the specific resource to delete.
Returns
-------
None
See Also
--------
http://crmsh.github.io/man/#cmdhelp_configure_delete
"""
self['delete_resources'] = resources
self.remove_deleted_resources()
def add_delete_resource(self, resource):
"""Specify an object/resource to delete from within Pacemaker. It can
be called multiple times to add additional resources to the deletion
list.
Parameters
----------
resources: str
the name or id of the specific resource to delete.
Returns
-------
None
See Also
--------
http://crmsh.github.io/man/#cmdhelp_configure_delete
"""
if resource not in self['delete_resources']:
# NOTE(fnordahl): this unpleasant piece of code is regrettably
# necessary for Python3.4 (and trusty) compability see LP: #1814218
# and LP: #1813982
self['delete_resources'] = tuple(
self['delete_resources'] or ()) + (resource,)
self.remove_deleted_resources()
def init_services(self, *resources):
"""Specifies that the service(s) is an init or upstart service.
Services (resources) which are noted as upstart services are
disabled, stopped, and left to pacemaker to manage the resource.
Parameters
----------
resources: str or list of str, varargs
The resources which should be noted as init services.
Returns
-------
None
"""
self['init_services'] = resources
def systemd_services(self, *resources):
"""Specifies that the service(s) is a systemd service.
Services (resources) which are noted as systemd services are
disabled, stopped, and left to pacemaker to manage the resource.
Parameters
----------
resources: str or list of str, varargs
The resources which should be noted as systemd services.
Returns
-------
None
"""
self['systemd_services'] = resources
def ms(self, name, resource, description=None, **kwargs):
"""Create a master/slave resource type.
The following code provides an example of creating a master/slave
resource on drbd disk1::
crm.ms('disk1', 'drbd1', meta='notify=true globally-unique=false')
Parameters
----------
name: str
the name or id of the master resource
resource: str
the name or id of the resource which now ha a master/slave
assocation tied to it.
description: str, optional
a textual description of the master resource
meta: str or list of strs, optional, kwargs
strings defining the metadata for the master/slave resource type
params: str or list of strs, optional, kwargs
parameter strings which should be passed to the master/slave
resource creation
Returns
-------
None
See Also
--------
http://crmsh.github.io/man/#cmdhelp_configure_ms
"""
specs = resource
if description:
specs = specs + (' description="%s"' % description)
for key in 'meta', 'params':
if key not in kwargs:
continue
value = kwargs[key]
specs = specs + (' %s' % self._parse(key, value))
self['ms'][name] = specs
def location(self, name, resource, **kwargs):
"""Defines the preference of nodes for the given resource.
The location constraitns consist of one or more rules which specify
a score to be awarded if the rules match.
Parameters
----------
name: str
the name or id of the location constraint
resource: str
the name, id, resource, set, tag, or resoruce pattern defining the
set of resources which match the location placement constraint.
attributes: str or list str, optional, kwarg
attributes which should be assigned to the location constraint
rule: str or list of str, optional, kwarg
the rule(s) which define the location constraint rules when
selecting a location to run the resource.
Returns
-------
None
See Also
--------
http://crmsh.github.io/man/#cmdhelp_configure_location
"""
specs = resource
# Check if there are attributes assigned to the location and if so,
# format the spec string with the attributes
if 'attributes' in kwargs:
attrs = kwargs['attributes']
if isinstance(attrs, str):
attrs = [attrs]
specs = specs + (' %s' % ' '.join(attrs))
if 'rule' in kwargs:
rules = kwargs['rule']
specs = specs + (' %s' % self._parse('rule', rules))
self['locations'][name] = specs
def order(self, name, score=None, *resources, **kwargs):
"""Configures the ordering constraints of resources.
Provides ordering constraints to resources defined in a Pacemaker
cluster which affect the way that resources are started, stopped,
promoted, etc. Basic ordering is provided by simply specifying the
ordering name and an ordered list of the resources which the ordering
constraint applies to.
For example, the following code ensures that the apache resource is
started after the ClusterIP is started::
hacluster.order('apache-after-ip', 'ClusterIP', 'apache')
By default, the ordering constraint will specify that the ordering
constraint is mandatory. The constraint behavior can be specified
using the 'score' keyword argument, e.g.::
hacluster.order('apache-after-ip', score=hacluster.OPTIONAL,
'ClusterIP', 'apache')
Any *resources values which are provided are treated as resources which
the ordering constraint applies to. At least two resources must be
defined as part of the ordering constraint.
The resources take the form of <resource_name>[:<action>]. If the
ordering constraint applies to a specific action for the resource,
this information should be included in the resource supplied.
Parameters
----------
name: str
the id or name of the order constraint
resoures: str or list of strs in varargs format
the resources the ordering constraint applies to. The ordering
of the list of resources is used to provide the ordering.
score: {MANDATORY, OPTIONAL, SERIALIZED}, optional
the score of the ordering constraint.
symmetrical: boolean, optional, kwarg
when True, then the services for the resources will be stopped in
the reverse order. The default value for this is True.
Returns
-------
None
See Also
--------
http://crmsh.github.io/man/#cmdhelp_configure_order
"""
specs = ''
if score:
specs = '%s:' % score
specs = specs + (' %s' % ' '.join(resources))
if 'symmetrical' in kwargs:
specs = specs + (' symmetrical=' % kwargs['symmetrical'])
self['orders'][name] = specs
def add(self, resource_desc):
"""Adds a resource descriptor object to the CRM configuration.
Adds a `ResourceDescriptor` object to the CRM configuration which
understands how to configure the resource itself. The
`ResourceDescriptor` object needs to know how to interact with this
CRM class in order to properly configure the pacemaker resources.
The minimum viable resource descriptor object will implement a method
which takes a reference parameter to this CRM in order to configure
itself.
Parameters
----------
resource_desC: ResourceDescriptor
an object which provides an abstraction of a monitored resource
within pacemaker.
Returns
-------
None
"""
method = getattr(resource_desc, 'configure_resource', None)
if not callable(method):
raise ValueError('Invalid resource_desc. The "configure_resource"'
' function has not been defined.')
method(self)
class ResourceDescriptor(object):
"""
A ResourceDescriptor provides a logical resource or concept and knows
how to configure pacemaker.
"""
def configure_resource(self, crm):
"""Configures the logical resource(s) within the CRM.
This is the callback method which is invoked by the CRM in order
to allow this ResourceDescriptor to fully configure the logical
resource.
For example, a Virtual IP may provide a standard abstraction and
configure the specific details under the covers.
"""
pass
class InitService(ResourceDescriptor):
def __init__(self, service_name, init_service_name, clone=True):
"""Class for managing init resource
:param service_name: string - Name of service
:param init_service_name: string - Name service uses in init system
:param clone: bool - clone service across all units
:returns: None
"""
self.service_name = service_name
self.init_service_name = init_service_name
self.clone = clone
def configure_resource(self, crm):
""""Configure new init system service resource in crm
:param crm: CRM() instance - Config object for Pacemaker resources
:returns: None
"""
res_key = 'res_{}_{}'.format(
self.service_name.replace('-', '_'),
self.init_service_name.replace('-', '_'))
res_type = 'lsb:{}'.format(self.init_service_name)
_meta = 'migration-threshold="INFINITY" failure-timeout="5s"'
crm.primitive(
res_key, res_type, op='monitor interval="5s"', meta=_meta)
crm.init_services(self.init_service_name)
if self.clone:
clone_key = 'cl_{}'.format(res_key)
crm.clone(clone_key, res_key)
class VirtualIP(ResourceDescriptor):
def __init__(self, service_name, vip, nic=None, cidr=None):
"""Class for managing VIP resource
:param service_name: string - Name of service
:param vip: string - Virtual IP to be managed
:param nic: string - Network interface to bind vip to
:param cidr: string - Netmask for vip
:returns: None
"""
self.service_name = service_name
self.vip = vip
self.nic = nic
self.cidr = cidr
def configure_resource(self, crm):
"""Configure new vip resource in crm
:param crm: CRM() instance - Config object for Pacemaker resources
:returns: None
"""
if self.nic:
vip_key = 'res_{}_{}_vip'.format(self.service_name, self.nic)
else:
vip_key = 'res_{}_{}_vip'.format(
self.service_name,
hashlib.sha1(self.vip.encode('UTF-8')).hexdigest()[:7])
ipaddr = ipaddress.ip_address(self.vip)
if isinstance(ipaddr, ipaddress.IPv4Address):
res_type = 'ocf:heartbeat:IPaddr2'
res_params = 'ip="{}"'.format(self.vip)
else:
res_type = 'ocf:heartbeat:IPv6addr'
res_params = 'ipv6addr="{}"'.format(self.vip)
vip_params = 'ipv6addr'
vip_key = 'res_{}_{}_{}_vip'.format(self.service_name, self.nic,
vip_params)
if self.nic:
res_params = '{} nic="{}"'.format(res_params, self.nic)
if self.cidr:
res_params = '{} cidr_netmask="{}"'.format(res_params, self.cidr)
# Monitor the VIP
_op_monitor = 'monitor timeout="20s" interval="10s" depth="0"'
_meta = 'migration-threshold="INFINITY" failure-timeout="5s"'
crm.primitive(
vip_key, res_type, params=res_params, op=_op_monitor, meta=_meta)
class DNSEntry(ResourceDescriptor):
def __init__(self, service_name, ip, fqdn, endpoint_type):
"""Class for managing DNS entries
:param service_name: string - Name of service
:param ip: string - IP to point DNS entry at
:param fqdn: string - DNS Entry
:param endpoint_type: string - The type of the endpoint represented by
the DNS record eg public, admin etc
:returns: None
"""
self.service_name = service_name
self.ip = ip
self.fqdn = fqdn
self.endpoint_type = endpoint_type
def configure_resource(self, crm, res_type='ocf:maas:dns'):
"""Configure new DNS resource in crm
:param crm: CRM() instance - Config object for Pacemaker resources
:param res_type: string - Corosync Open Cluster Framework resource
agent to use for DNS HA
:returns: None
"""
res_key = 'res_{}_{}_hostname'.format(
self.service_name.replace('-', '_'),
self.endpoint_type)
res_params = ''
if self.fqdn:
res_params = '{} fqdn="{}"'.format(res_params, self.fqdn)
if self.ip:
res_params = '{} ip_address="{}"'.format(res_params, self.ip)
crm.primitive(res_key, res_type, params=res_params)
class SystemdService(ResourceDescriptor):
def __init__(self, service_name, systemd_service_name, clone=True):
"""Class for managing systemd resource
:param service_name: string - Name of service
:param systemd_service_name: string - Name service uses in
systemd system
:param clone: bool - clone service across all units
:returns: None
"""
self.service_name = service_name
self.systemd_service_name = systemd_service_name
self.clone = clone
def configure_resource(self, crm):
""""Configure new systemd system service resource in crm
:param crm: CRM() instance - Config object for Pacemaker resources
:returns: None
"""
res_key = 'res_{}_{}'.format(
self.service_name.replace('-', '_'),
self.systemd_service_name.replace('-', '_'))
res_type = 'systemd:{}'.format(self.systemd_service_name)
_meta = 'migration-threshold="INFINITY" failure-timeout="5s"'
crm.primitive(
res_key, res_type, op='monitor interval="5s"', meta=_meta)
crm.systemd_services(self.systemd_service_name)
if self.clone:
clone_key = 'cl_{}'.format(res_key)
crm.clone(clone_key, res_key)
| 36.158573
| 79
| 0.593932
|
032c43cbadf0ed11bd8c30dc9efbcdcb4549023e
| 1,176
|
py
|
Python
|
app/save_utils.py
|
MJafarMashhadi/keras-glove
|
43ce3a262a517e2c7aed04f1726bc7ea049fd031
|
[
"MIT"
] | null | null | null |
app/save_utils.py
|
MJafarMashhadi/keras-glove
|
43ce3a262a517e2c7aed04f1726bc7ea049fd031
|
[
"MIT"
] | null | null | null |
app/save_utils.py
|
MJafarMashhadi/keras-glove
|
43ce3a262a517e2c7aed04f1726bc7ea049fd031
|
[
"MIT"
] | null | null | null |
import pickle
from keras.models import Model
from keras.preprocessing.text import Tokenizer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
from app.config import *
def save_model(model: Model, tokenizer: Tokenizer):
"""
Saves the important parts of the model
:param model: Keras model to save
:param tokenizer: Keras Tokenizer to save
"""
for layer in model.layers:
if '_biases' in layer.name or '_embeddings' in layer.name:
np.save(file=f'{OUTPUT_FOLDER}{layer.name}', arr=layer.get_weights()[0])
# save tokenizer
pickle.dump(obj=tokenizer.index_word, file=open(f'{OUTPUT_FOLDER}{INDEX2WORD}', 'wb'))
pickle.dump(obj=tokenizer.word_index, file=open(f'{OUTPUT_FOLDER}{WORD2INDEX}', 'wb'))
# save combined embeddings & correlation matrix
agg_embeddings = np.load(f'{OUTPUT_FOLDER}{CENTRAL_EMBEDDINGS}.npy') + \
np.load(f'{OUTPUT_FOLDER}{CONTEXT_EMBEDDINGS}.npy')
np.save(file=f'{OUTPUT_FOLDER}{AGGREGATED_EMBEDDINGS}', arr=agg_embeddings)
np.save(file=f'{OUTPUT_FOLDER}{CORRELATION_MATRIX}', arr=cosine_similarity(cosine_similarity(agg_embeddings)))
| 37.935484
| 114
| 0.721088
|
03001b6df086f536ec51b86ea70897e6c53287d4
| 2,980
|
py
|
Python
|
FridgeCade-Project/ggpo/gui/ui/savestatesdialog_ui.py
|
Linuks777/FridgeCade
|
22de6933ae642d9ed393f86160670a6906c3ff9a
|
[
"WTFPL"
] | null | null | null |
FridgeCade-Project/ggpo/gui/ui/savestatesdialog_ui.py
|
Linuks777/FridgeCade
|
22de6933ae642d9ed393f86160670a6906c3ff9a
|
[
"WTFPL"
] | null | null | null |
FridgeCade-Project/ggpo/gui/ui/savestatesdialog_ui.py
|
Linuks777/FridgeCade
|
22de6933ae642d9ed393f86160670a6906c3ff9a
|
[
"WTFPL"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ggpo/gui/ui/savestatesdialog.ui'
#
# Created: Tue Aug 25 22:55:14 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_SavestatesDialog(object):
def setupUi(self, SavestatesDialog):
SavestatesDialog.setObjectName(_fromUtf8("SavestatesDialog"))
SavestatesDialog.resize(630, 600)
self.verticalLayout = QtGui.QVBoxLayout(SavestatesDialog)
self.verticalLayout.setContentsMargins(2, 0, 2, 6)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label = QtGui.QLabel(SavestatesDialog)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout.addWidget(self.label)
self.uiFilterLineEdit = QtGui.QLineEdit(SavestatesDialog)
self.uiFilterLineEdit.setText(_fromUtf8(""))
self.uiFilterLineEdit.setObjectName(_fromUtf8("uiFilterLineEdit"))
self.horizontalLayout.addWidget(self.uiFilterLineEdit)
self.verticalLayout.addLayout(self.horizontalLayout)
self.uiSavestatesTblv = QtGui.QTableView(SavestatesDialog)
self.uiSavestatesTblv.setObjectName(_fromUtf8("uiSavestatesTblv"))
self.verticalLayout.addWidget(self.uiSavestatesTblv)
self.uiButtonBox = QtGui.QDialogButtonBox(SavestatesDialog)
self.uiButtonBox.setOrientation(QtCore.Qt.Horizontal)
self.uiButtonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.uiButtonBox.setObjectName(_fromUtf8("uiButtonBox"))
self.verticalLayout.addWidget(self.uiButtonBox)
self.retranslateUi(SavestatesDialog)
QtCore.QObject.connect(self.uiButtonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), SavestatesDialog.accept)
QtCore.QObject.connect(self.uiButtonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), SavestatesDialog.reject)
QtCore.QMetaObject.connectSlotsByName(SavestatesDialog)
SavestatesDialog.setTabOrder(self.uiFilterLineEdit, self.uiSavestatesTblv)
SavestatesDialog.setTabOrder(self.uiSavestatesTblv, self.uiButtonBox)
def retranslateUi(self, SavestatesDialog):
SavestatesDialog.setWindowTitle(_translate("SavestatesDialog", "Unsupported game savestates", None))
self.label.setText(_translate("SavestatesDialog", "Filter:", None))
| 47.301587
| 113
| 0.745973
|
62130de2f3924f8f18689482f2d4d468f0bb5b72
| 7,944
|
py
|
Python
|
transformers/tests/tokenization_bert_japanese_test.py
|
DaDaMrX/ReaLiSe
|
25843e0c2c32b3a364cee857b2e4f5ba8b2764e9
|
[
"MIT"
] | 72
|
2021-05-27T06:32:20.000Z
|
2022-03-31T03:04:58.000Z
|
transformers/tests/tokenization_bert_japanese_test.py
|
DaDaMrX/ReaLiSe
|
25843e0c2c32b3a364cee857b2e4f5ba8b2764e9
|
[
"MIT"
] | 10
|
2021-05-27T05:13:50.000Z
|
2022-03-27T09:10:47.000Z
|
transformers/tests/tokenization_bert_japanese_test.py
|
DaDaMrX/ReaLiSe
|
25843e0c2c32b3a364cee857b2e4f5ba8b2764e9
|
[
"MIT"
] | 13
|
2021-06-24T04:44:14.000Z
|
2022-03-03T12:57:23.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import unittest
from io import open
from transformers.tokenization_bert import WordpieceTokenizer
from transformers.tokenization_bert_japanese import (BertJapaneseTokenizer,
MecabTokenizer, CharacterTokenizer,
VOCAB_FILES_NAMES)
from .tokenization_tests_commons import CommonTestCases
from .utils import slow, custom_tokenizers
@custom_tokenizers
class BertJapaneseTokenizationTest(CommonTestCases.CommonTokenizerTester):
tokenizer_class = BertJapaneseTokenizer
def setUp(self):
super(BertJapaneseTokenizationTest, self).setUp()
vocab_tokens = [u"[UNK]", u"[CLS]", u"[SEP]",
u"こんにちは", u"こん", u"にちは", u"ばんは", u"##こん", u"##にちは", u"##ばんは",
u"世界", u"##世界", u"、", u"##、", u"。", u"##。"]
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def get_tokenizer(self, **kwargs):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self):
input_text = u"こんにちは、世界。 \nこんばんは、世界。"
output_text = u"こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = self.tokenizer_class(self.vocab_file)
tokens = tokenizer.tokenize(u"こんにちは、世界。\nこんばんは、世界。")
self.assertListEqual(tokens,
[u"こんにちは", u"、", u"世界", u"。",
u"こん", u"##ばんは", u"、", u"世界", "。"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens),
[3, 12, 10, 14, 4, 9, 12, 10, 14])
def test_mecab_tokenizer(self):
tokenizer = MecabTokenizer()
self.assertListEqual(
tokenizer.tokenize(u" \tアップルストアでiPhone8 が \n 発売された 。 "),
[u"アップルストア", u"で", u"iPhone", u"8", u"が",
u"発売", u"さ", u"れ", u"た", u"。"])
def test_mecab_tokenizer_lower(self):
tokenizer = MecabTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(u" \tアップルストアでiPhone8 が \n 発売された 。 "),
[u"アップルストア", u"で", u"iphone", u"8", u"が",
u"発売", u"さ", u"れ", u"た", u"。"])
def test_mecab_tokenizer_no_normalize(self):
tokenizer = MecabTokenizer(normalize_text=False)
self.assertListEqual(
tokenizer.tokenize(u" \tアップルストアでiPhone8 が \n 発売された 。 "),
[u"アップルストア", u"で", u"iPhone", u"8", u"が",
u"発売", u"さ", u"れ", u"た", u" ", u"。"])
def test_wordpiece_tokenizer(self):
vocab_tokens = [u"[UNK]", u"[CLS]", u"[SEP]",
u"こんにちは", u"こん", u"にちは" u"ばんは", u"##こん", u"##にちは", u"##ばんは"]
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = WordpieceTokenizer(vocab=vocab, unk_token=u"[UNK]")
self.assertListEqual(tokenizer.tokenize(u""), [])
self.assertListEqual(tokenizer.tokenize(u"こんにちは"),
[u"こんにちは"])
self.assertListEqual(tokenizer.tokenize(u"こんばんは"),
[u"こん", u"##ばんは"])
self.assertListEqual(tokenizer.tokenize(u"こんばんは こんばんにちは こんにちは"),
[u"こん", u"##ばんは", u"[UNK]", u"こんにちは"])
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained("bert-base-japanese")
text = tokenizer.encode(u"ありがとう。", add_special_tokens=False)
text_2 = tokenizer.encode(u"どういたしまして。", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_2 + [3]
class BertJapaneseCharacterTokenizationTest(CommonTestCases.CommonTokenizerTester):
tokenizer_class = BertJapaneseTokenizer
def setUp(self):
super(BertJapaneseCharacterTokenizationTest, self).setUp()
vocab_tokens = [u"[UNK]", u"[CLS]", u"[SEP]",
u"こ", u"ん", u"に", u"ち", u"は", u"ば", u"世", u"界", u"、", u"。"]
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def get_tokenizer(self, **kwargs):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname,
subword_tokenizer_type="character",
**kwargs)
def get_input_output_texts(self):
input_text = u"こんにちは、世界。 \nこんばんは、世界。"
output_text = u"こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = self.tokenizer_class(self.vocab_file,
subword_tokenizer_type="character")
tokens = tokenizer.tokenize(u"こんにちは、世界。 \nこんばんは、世界。")
self.assertListEqual(tokens,
[u"こ", u"ん", u"に", u"ち", u"は", u"、", u"世", u"界", u"。",
u"こ", u"ん", u"ば", u"ん", u"は", u"、", u"世", u"界", u"。"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens),
[3, 4, 5, 6, 7, 11, 9, 10, 12,
3, 4, 8, 4, 7, 11, 9, 10, 12])
def test_character_tokenizer(self):
vocab_tokens = [u"[UNK]", u"[CLS]", u"[SEP]",
u"こ", u"ん", u"に", u"ち", u"は", u"ば", u"世", u"界"u"、", u"。"]
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = CharacterTokenizer(vocab=vocab, unk_token=u"[UNK]")
self.assertListEqual(tokenizer.tokenize(u""), [])
self.assertListEqual(tokenizer.tokenize(u"こんにちは"),
[u"こ", u"ん", u"に", u"ち", u"は"])
self.assertListEqual(tokenizer.tokenize(u"こんにちほ"),
[u"こ", u"ん", u"に", u"ち", u"[UNK]"])
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained("bert-base-japanese-char")
text = tokenizer.encode(u"ありがとう。", add_special_tokens=False)
text_2 = tokenizer.encode(u"どういたしまして。", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_2 + [3]
| 41.375
| 89
| 0.567095
|
f687ca5fc77e91b9befcbb326bbfa65104ccc7f2
| 2,574
|
py
|
Python
|
src/edge/command/model/subparser.py
|
fuzzylabs/vertex-edge
|
97a0df5ebad0dc35e262d9f8269e6e33190f6ad1
|
[
"Apache-2.0"
] | 20
|
2021-06-25T10:13:33.000Z
|
2022-01-31T16:34:32.000Z
|
src/edge/command/model/subparser.py
|
fuzzylabs/vertex-edge
|
97a0df5ebad0dc35e262d9f8269e6e33190f6ad1
|
[
"Apache-2.0"
] | 33
|
2021-07-15T12:22:59.000Z
|
2021-11-16T09:57:16.000Z
|
src/edge/command/model/subparser.py
|
fuzzylabs/vertex-edge
|
97a0df5ebad0dc35e262d9f8269e6e33190f6ad1
|
[
"Apache-2.0"
] | 9
|
2021-06-25T10:13:45.000Z
|
2021-10-20T15:20:27.000Z
|
import argparse
from edge.command.model.deploy import model_deploy
from edge.command.model.describe import describe_model
from edge.command.model.get_endpoint import get_model_endpoint
from edge.command.model.init import model_init
from edge.command.model.list import list_models
from edge.command.model.remove import remove_model
from edge.command.model.template import create_model_from_template
from edge.exception import EdgeException
def add_model_parser(subparsers):
parser = subparsers.add_parser("model", help="Model related actions")
actions = parser.add_subparsers(title="action", dest="action", required=True)
init_parser = actions.add_parser("init", help="Initialise model on Vertex AI")
init_parser.add_argument("model_name", metavar="model-name", help="Model name")
deploy_parser = actions.add_parser("deploy", help="Deploy model on Vertex AI")
deploy_parser.add_argument("model_name", metavar="model-name", help="Model name")
get_endpoint_parser = actions.add_parser("get-endpoint", help="Get Vertex AI endpoint URI")
get_endpoint_parser.add_argument("model_name", metavar="model-name", help="Model name")
actions.add_parser("list", help="List initialised models")
describe_parser = actions.add_parser("describe", help="Describe an initialised model")
describe_parser.add_argument("model_name", metavar="model-name", help="Model name")
remove_parser = actions.add_parser("remove", help="Remove an initialised model from vertex:edge")
remove_parser.add_argument("model_name", metavar="model-name", help="Model name")
template_parser = actions.add_parser("template", help="Create a model pipeline from a template")
template_parser.add_argument("model_name", metavar="model-name", help="Model name")
template_parser.add_argument("-f", action="store_true",
help="Force override a pipeline directory if already exists")
def run_model_actions(args: argparse.Namespace):
if args.action == "init":
model_init(args.model_name)
elif args.action == "deploy":
model_deploy(args.model_name)
elif args.action == "get-endpoint":
get_model_endpoint(args.model_name)
elif args.action == "list":
list_models()
elif args.action == "describe":
describe_model(args.model_name)
elif args.action == "remove":
remove_model(args.model_name)
elif args.action == "template":
create_model_from_template(args.model_name, args.f)
else:
raise EdgeException("Unexpected model command")
| 45.157895
| 101
| 0.737374
|
7b603284a06ea5cbb7e8ad2becb80525cec2db1f
| 166
|
py
|
Python
|
mash_place_api/config.py
|
MashSoftware/geo-api
|
79b7067729531325ef939ca74924a4044d332b4c
|
[
"MIT"
] | null | null | null |
mash_place_api/config.py
|
MashSoftware/geo-api
|
79b7067729531325ef939ca74924a4044d332b4c
|
[
"MIT"
] | null | null | null |
mash_place_api/config.py
|
MashSoftware/geo-api
|
79b7067729531325ef939ca74924a4044d332b4c
|
[
"MIT"
] | 1
|
2018-07-20T20:40:58.000Z
|
2018-07-20T20:40:58.000Z
|
import os
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', 'postgresql+psycopg2://vagrant:vagrant@localhost:5432/vagrant')
SQLALCHEMY_TRACK_MODIFICATIONS = False
| 33.2
| 115
| 0.831325
|
e00354f6d200b9df0d0bf2fe1bf2838b3c540a61
| 741
|
py
|
Python
|
data.py
|
byander/predict_stock_market
|
bf6c6b0e2418dc2520e9376478df579dc71d511b
|
[
"MIT"
] | null | null | null |
data.py
|
byander/predict_stock_market
|
bf6c6b0e2418dc2520e9376478df579dc71d511b
|
[
"MIT"
] | 1
|
2021-06-01T22:47:33.000Z
|
2021-06-01T22:47:33.000Z
|
data.py
|
byander/predict_stock_market
|
bf6c6b0e2418dc2520e9376478df579dc71d511b
|
[
"MIT"
] | 1
|
2020-03-22T16:37:55.000Z
|
2020-03-22T16:37:55.000Z
|
import pandas as pd
from db import connect
def get_dataset():
con = connect()
sql = '''SELECT symbol, date, open, high, low, close, volume FROM time_series_daily WHERE date >= '2018-01-01' ORDER BY date;'''
# sql = '''SELECT symbol, date, open, high, low, close, volume FROM time_series_daily WHERE date < '2018-01-01' ORDER BY date;'''
ds = pd.read_sql_query(sql, con)
con.close()
return ds
def get_data_close_last_30():
con = connect()
sql = '''SELECT symbol, date, open, high, low, close, volume FROM time_series_daily WHERE date >= '2018-01-01' ORDER BY date;'''
ds = pd.read_sql_query(sql, con)
con.close()
ds['day'] = pd.to_datetime(ds['date']).dt.day
print(ds[:8])
return ds
| 30.875
| 133
| 0.649123
|
e6ae54db453786c81c74fa41c58406f9fb3df5c1
| 56,140
|
py
|
Python
|
pcdsdevices/interface.py
|
pcdshub/pcds-devices
|
82de60f361814ede14e021d8ca03ce4e98f14b9d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
pcdsdevices/interface.py
|
pcdshub/pcds-devices
|
82de60f361814ede14e021d8ca03ce4e98f14b9d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
pcdsdevices/interface.py
|
pcdshub/pcds-devices
|
82de60f361814ede14e021d8ca03ce4e98f14b9d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
"""
Module for defining bell-and-whistles movement features.
"""
import functools
import logging
import numbers
import re
import shutil
import signal
import subprocess
import time
import typing
from contextlib import contextmanager
from pathlib import Path
from threading import Event
from types import MethodType, SimpleNamespace
from typing import Optional
from weakref import WeakSet
import ophyd
import yaml
from bluesky.utils import ProgressBar
from ophyd.device import Device
from ophyd.ophydobj import Kind, OphydObject
from ophyd.positioner import PositionerBase
from ophyd.signal import AttributeSignal, Signal
from . import utils
from .signal import NotImplementedSignal
try:
import fcntl
except ImportError:
fcntl = None
try:
from elog.utils import get_primary_elog
has_elog = True
except ImportError:
has_elog = False
logger = logging.getLogger(__name__)
engineering_mode = True
OphydObject_whitelist = []
BlueskyInterface_whitelist = []
Device_whitelist = ["stop"]
Signal_whitelist = ["value", "put", "get"]
Positioner_whitelist = ["settle_time", "timeout", "egu", "limits", "move",
"position", "moving", "set_current_position"]
class _TabCompletionHelper:
"""
Base class for `TabCompletionHelperClass`, `TabCompletionHelperInstance`.
"""
_includes: typing.Set[str]
_regex: typing.Optional[typing.Pattern]
def __init__(self):
self._includes = set()
self._regex = None
self.reset()
def build_regex(self) -> typing.Pattern:
"""Update the regular expression based on the current includes."""
self._regex = re.compile("|".join(sorted(self._includes)))
return self._regex
def reset(self):
"""Reset the tab-completion settings."""
self._regex = None
self._includes.clear()
def add(self, attr: str):
"""Add an attribute to the include list."""
self._includes.add(attr)
self._regex = None
def remove(self, attr: str):
"""Remove an attribute from the include list."""
self._includes.remove(attr)
self._regex = None
def __repr__(self):
return f'{self.__class__.__name__}(includes={self._includes})'
class TabCompletionHelperClass(_TabCompletionHelper):
"""
Tab completion helper for the class itself.
Parameters
----------
cls : subclass of BaseInterface
Class type object to generate tab completion information from.
"""
cls: typing.Type['BaseInterface']
def __init__(self, cls):
self.cls = cls
super().__init__()
def reset(self):
"""Reset the attribute includes to those annotated in the class."""
super().reset()
whitelist = []
for parent in self.cls.mro():
whitelist.extend(getattr(parent, 'tab_whitelist', []))
if getattr(parent, "tab_component_names", False):
for cpt_name in parent.component_names:
if getattr(parent, cpt_name).kind != Kind.omitted:
whitelist.append(cpt_name)
self._includes = set(whitelist)
def new_instance(self, instance) -> 'TabCompletionHelperInstance':
"""
Create a new :class:`TabCompletionHelperInstance` for the given object.
Parameters
----------
instance : object
The instance of `self.cls`.
"""
return TabCompletionHelperInstance(instance, self)
class TabCompletionHelperInstance(_TabCompletionHelper):
"""
Tab completion helper for one instance of a class.
Parameters
----------
instance : object
Instance of `class_helper.cls`.
class_helper : TabCompletionHelperClass
Class helper for defaults.
"""
class_helper: TabCompletionHelperClass
instance: 'BaseInterface'
super_dir: typing.Callable[[], typing.List[str]]
def __init__(self, instance, class_helper):
assert isinstance(instance, BaseInterface), 'Must mix in BaseInterface'
self.class_helper = class_helper
self.instance = instance
self.super_dir = super(BaseInterface, instance).__dir__
super().__init__()
def reset(self):
"""Reset the attribute includes to that defined by the class."""
super().reset()
self._includes = set(self.class_helper._includes)
def get_filtered_dir_list(self) -> typing.List[str]:
"""Get the dir list, filtered based on the whitelist."""
if self._regex is None:
self.build_regex()
return [
elem
for elem in self.super_dir()
if self._regex.fullmatch(elem)
]
def get_dir(self) -> typing.List[str]:
"""Get the dir list based on the engineering mode settings."""
if get_engineering_mode():
return self.super_dir()
return self.get_filtered_dir_list()
class BaseInterface:
"""
Interface layer to attach to any Device for SLAC features.
This class defines an API and some defaults for filtering tab-completion
results for new users to avoid confusion. The API involves setting the
tab_whitelist attribute on any subclass of BaseInterface. When in
non-engineering mode, only elements on the whitelists will be displayed to
the user.
Attributes
----------
tab_whitelist : list
List of string regex to show in autocomplete for non-engineering mode.
"""
tab_whitelist = (OphydObject_whitelist + BlueskyInterface_whitelist +
Device_whitelist + Signal_whitelist +
Positioner_whitelist)
_class_tab: TabCompletionHelperClass
_tab: TabCompletionHelperInstance
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
mro = cls.mro()
if Device in mro and mro.index(BaseInterface) > mro.index(Device):
order = '\n '.join(mro_cls.__name__ for mro_cls in mro)
raise RuntimeError(
f"{cls.__module__}.{cls.__name__} inherits from "
f"`BaseInterface`, but does not correctly mix it in. Device "
f"must come *after* `BaseInterface` in the class method "
f"resolution order (MRO). Try changing the order of class "
f"inheritance around or ask an expert. Current order is:\n"
f" {order}"
)
cls._class_tab = TabCompletionHelperClass(cls)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._tab = self._class_tab.new_instance(self)
def __dir__(self):
return self._tab.get_dir()
def __repr__(self):
"""Simplify the ophydobject repr to avoid crazy long represenations."""
prefix = getattr(self, 'prefix', None)
name = getattr(self, 'name', None)
return f"{self.__class__.__name__}({prefix}, name={name})"
def _repr_pretty_(self, pp, cycle):
"""
Set pretty-printing to show current status information.
This will also filter out errors from the ``status_info``
and ``format_status_info`` methods, making sure "something"
is printed.
We will not leverage the pretty-printing feature set here,
we will just use it as a convenient IPython entry point for
rendering our device info.
The parameter set is documented here in case we change our minds,
since I already wrote it out before deciding on a renderer-agnostic
approach.
Parameters
----------
pp: PrettyPrinter
An instance of PrettyPrinter is always passed into the method.
This is what you use to determine what gets printed.
pp.text('text') adds non-breaking text to the output.
pp.breakable() either adds a whitespace or breaks here.
pp.pretty(obj) pretty prints another object.
with pp.group(4, 'text', 'text') groups items into an intended set
on multiple lines.
cycle: bool
This is True when the pretty printer detects a cycle, e.g. to help
you avoid infinite loops. For example, your _repr_pretty_ method
may call pp.pretty to print a sub-object, and that object might
also call pp.pretty to print this object. Then cycle would be True
and you know not to make any further recursive calls.
"""
try:
status_text = self.format_status_info(self.status_info())
except Exception:
status_text = (f'{self}: Error showing status information. '
'Check IOC connection and device health.')
logger.debug(status_text, exc_info=True)
pp.text(status_text)
def status(self) -> str:
"""
Returns a str with the current pv values for the device.
"""
return self.format_status_info(self.status_info())
def format_status_info(self, status_info):
"""
Entry point for the mini status displays in the ipython terminal.
This can be overridden if a device wants a custom status printout.
Parameters
----------
status_info: dict
See self.status_info method
Returns
-------
status: str
Formatted string with all relevant status information.
"""
lines = self._status_info_lines(status_info)
if lines:
return '\n'.join(lines)
else:
return f'{self.name}: No status available'
def _status_info_lines(self, status_info, prefix='', indent=0):
full_name = status_info['name']
if full_name.startswith(prefix):
name = full_name.replace(prefix, '', 1)
else:
name = full_name
if status_info['is_device']:
# Set up a tree view
header_lines = ['', f'{name}', '-' * len(name)]
data_lines = []
extra_keys = ('name', 'kind', 'is_device')
for key in extra_keys:
status_info.pop(key)
for key, value in status_info.items():
if isinstance(value, dict):
# Go recursive
inner = self._status_info_lines(value,
prefix=full_name + '_',
indent=2)
data_lines.extend(inner)
else:
# Record extra value
data_lines.append(f'{key}: {value}')
if data_lines:
# Indent the subdevices
if indent:
for i, line in enumerate(data_lines):
data_lines[i] = ' ' * indent + line
return header_lines + data_lines
else:
# No data = do not print header
return []
else:
# Show the name/value pair for a signal
value = status_info['value']
units = status_info.get('units') or ''
if units:
units = f' [{units}]'
value_text = str(value)
if '\n' in value_text:
# Multiline values (arrays) need special handling
value_lines = value_text.split('\n')
for i, line in enumerate(value_lines):
value_lines[i] = ' ' * 2 + line
return [f'{name}:'] + value_lines
else:
return [f'{name}: {value}{units}']
def status_info(self):
"""
Get useful information for the status display.
This can be overridden if a device wants to feed custom information to
the formatter.
Returns
-------
info: dict
Nested dictionary. Each level has keys name, kind, and is_device.
If is_device is True, subdevice dictionaries may follow. Otherwise,
the only other key in the dictionary will be value.
"""
def subdevice_filter(info):
return bool(info['kind'] & Kind.normal)
return ophydobj_info(self, subdevice_filter=subdevice_filter)
def post_elog_status(self):
"""
Post device status to the primary elog, if possible.
"""
if not has_elog:
logger.info('No primary elog found, cannot post status.')
return
try:
elog = get_primary_elog()
except ValueError:
logger.info('elog exists but has not been registered')
return
final_post = f'<pre>{self.status()}</pre>'
elog.post(final_post, tags=['ophyd_status'],
title=f'{self.name} status report')
def screen(self):
"""
Open a screen for controlling the device.
Default behavior is the typhos screen, but this method can
be overridden for more specialized screens.
"""
if shutil.which('typhos') is None:
logger.error('typhos is not installed, ',
'screen cannot be opened')
return
arglist = ['typhos', f'{self.name}']
logger.info(f'Opening typhos screen for {self.name}...')
# capture stdout and stderr
subprocess.Popen(arglist,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def get_name(obj, default):
try:
return obj.name
except AttributeError:
try:
return str(obj)
except Exception:
return default
def get_kind(obj):
try:
return obj.kind
except Exception:
return Kind.omitted
def get_value(signal):
try:
# Minimize waiting, we aren't collecting data we're showing info
if signal.connected:
return signal.get(timeout=0.1, connection_timeout=0.1)
except Exception:
pass
return None
def get_units(signal):
attrs = ('derived_units', 'units', 'egu')
for attr in attrs:
try:
value = getattr(signal, attr, None) or signal.metadata[attr]
if isinstance(value, str):
return value
except Exception:
...
def ophydobj_info(obj, subdevice_filter=None, devices=None):
if isinstance(obj, Signal):
return signal_info(obj)
elif isinstance(obj, Device):
return device_info(obj, subdevice_filter=subdevice_filter,
devices=devices)
elif isinstance(obj, PositionerBase):
return positionerbase_info(obj)
else:
return {}
def device_info(device, subdevice_filter=None, devices=None):
if devices is None:
devices = set()
name = get_name(device, default='device')
kind = get_kind(device)
info = dict(name=name, kind=kind, is_device=True)
try:
# Show the current preset state if we have one
# This should be the first key in the ordered dict
has_presets = device.presets.has_presets
except AttributeError:
has_presets = False
if has_presets:
try:
info['preset'] = device.presets.state()
except Exception:
info['preset'] = 'ERROR'
try:
# Extra key for positioners
# This has ordered dict priority over everything but the preset state
info['position'] = device.position
except AttributeError:
pass
except Exception:
# Something else went wrong! We have a position but it didn't work
info['position'] = 'ERROR'
else:
try:
if not isinstance(info['position'], numbers.Integral):
# Give a floating point value, if possible, when not integral
info['position'] = float(info['position'])
except Exception:
...
try:
# Best-effort try at getting the units
info['units'] = get_units(device)
except Exception:
pass
if device not in devices:
devices.add(device)
for cpt_name, cpt_desc in device._sig_attrs.items():
# Skip lazy signals outright in all cases
# Usually these are lazy because they take too long to getattr
if cpt_desc.lazy:
continue
# Skip attribute signals
# Indeterminate get times, no real connected bool, etc.
if issubclass(cpt_desc.cls, AttributeSignal):
continue
# Skip not implemented signals
# They never have interesting information
if issubclass(cpt_desc.cls, NotImplementedSignal):
continue
try:
cpt = getattr(device, cpt_name)
except AttributeError:
# Why are we ever in this block?
logger.debug(f'Getattr {name}.{cpt_name} failed.',
exc_info=True)
continue
cpt_info = ophydobj_info(cpt, subdevice_filter=subdevice_filter,
devices=devices)
if 'position' in info:
# Drop some potential duplicate keys for positioners
try:
if cpt.name == cpt.parent.name:
continue
except AttributeError:
pass
if cpt_name in ('readback', 'user_readback'):
continue
if not callable(subdevice_filter) or subdevice_filter(cpt_info):
info[cpt_name] = cpt_info
return info
def signal_info(signal):
name = get_name(signal, default='signal')
kind = get_kind(signal)
value = get_value(signal)
units = get_units(signal)
return dict(name=name, kind=kind, is_device=False, value=value,
units=units)
def positionerbase_info(positioner):
name = get_name(positioner, default='positioner')
kind = get_kind(positioner)
return dict(name=name, kind=kind, is_device=True,
position=positioner.position)
def set_engineering_mode(expert):
"""
Switches between expert and user modes for :class:`BaseInterface` features.
Current features:
- Autocomplete filtering
Parameters
----------
expert : bool
Set to `True` to enable expert mode, or :keyword:`False` to
disable it. `True` is the starting value.
"""
global engineering_mode
engineering_mode = bool(expert)
def get_engineering_mode():
"""
Get the last value set by :meth:`set_engineering_mode`.
Returns
-------
expert : bool
The current engineering mode. See :meth:`set_engineering_mode`.
"""
return engineering_mode
class MvInterface(BaseInterface):
"""
Interface layer to attach to a positioner for motion shortcuts.
Defines common shortcuts that the beamline scientists like for moving
things on the command line. There is no need for these in a scripting
environnment, but this is a safe space for implementing move features that
would otherwise be disruptive to running scans and writing higher-level
applications.
"""
tab_whitelist = ["mv", "wm", "wm_update"]
_last_status: Optional[ophyd.status.MoveStatus]
_mov_ev: Event
def __init__(self, *args, **kwargs):
self._mov_ev = Event()
self._last_status = None
super().__init__(*args, **kwargs)
def _log_move_limit_error(self, position, ex):
logger.error('Failed to move %s from %s to %s: %s', self.name,
self.wm(), position, ex)
def _log_move(self, position):
logger.info('Moving %s from %s to %s', self.name, self.wm(), position)
def _log_move_end(self):
logger.info('%s reached position %s', self.name, self.wm())
def move(self, *args, **kwargs):
try:
st = super().move(*args, **kwargs)
except ophyd.utils.LimitError as ex:
# Pick out the position either in kwargs or args
try:
position = kwargs['position']
except KeyError:
position = args[0]
self._log_move_limit_error(position, ex)
raise
self._last_status = st
return st
def wait(self, timeout=None):
if self._last_status is None:
return
self._last_status.wait(timeout=timeout)
def mv(self, position, timeout=None, wait=False, log=True):
"""
Absolute move to a position.
Parameters
----------
position
Desired end position.
timeout : float, optional
If provided, the mover will throw an error if motion takes longer
than timeout to complete. If omitted, the mover's default timeout
will be use.
wait : bool, optional
If `True`, wait for motion completion before returning.
Defaults to :keyword:`False`.
log : bool, optional
If `True`, logs the move at INFO level.
"""
if log:
self._log_move(position)
try:
self.move(position, timeout=timeout, wait=wait)
except ophyd.utils.LimitError:
return
if wait and log:
self._log_move_end()
def wm(self):
"""Get the mover's current positon (where motor)."""
return self.position
def __call__(self, position=None, timeout=None, wait=False, log=True):
"""
Dispatches to :meth:`mv` or :meth:`wm` based on the arguments.
Calling the object will either move the object or get the current
position, depending on if the position argument is given. See the
docstrings for :meth:`mv` and :meth:`wm`.
"""
if position is None:
return self.wm()
else:
self.mv(position, timeout=timeout, wait=wait, log=log)
def camonitor(self):
"""
Shows a live-updating motor position in the terminal.
This will be the value that is returned by the :attr:`position`
attribute.
This method ends cleanly at a ctrl+c or after a call to
:meth:`end_monitor_thread`, which may be useful when this is called in
a background thread.
"""
try:
self._mov_ev.clear()
while not self._mov_ev.is_set():
print("\r {0:4f}".format(self.wm()), end=" ")
self._mov_ev.wait(0.1)
except KeyboardInterrupt:
pass
finally:
self._mov_ev.clear()
# Legacy alias
def wm_update(self):
return self.camonitor()
wm_update.__doc__ = camonitor.__doc__
def end_monitor_thread(self):
"""
Stop a :meth:`camonitor` or :meth:`wm_update` that is running in
another thread.
"""
self._mov_ev.set()
class FltMvInterface(MvInterface):
"""
Extension of :class:`MvInterface` for when the position is a float.
This lets us do more with the interface, such as relative moves.
Attributes
----------
presets : :class:`Presets`
Manager for preset positions.
"""
tab_whitelist = ["mvr", "umv", "umvr", "mv_ginput", "tweak",
"presets", "mv_.*", "wm_.*", "umv_.*"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.presets = Presets(self)
def wm(self):
pos = super().wm()
try:
return pos[0]
except Exception:
return pos
def mvr(self, delta, timeout=None, wait=False, log=True):
"""
Relative move from this position.
Parameters
----------
delta : float
Desired change in position.
timeout : float, optional
If provided, the mover will throw an error if motion takes longer
than timeout to complete. If omitted, the mover's default timeout
will be use.
wait : bool, optional
If `True`, wait for motion completion before returning.
Defaults to :keyword:`False`.
log : bool, optional
If `True`, logs the move at INFO level.
"""
self.mv(delta + self.wm(), timeout=timeout, wait=wait, log=log)
def umv(self, position, timeout=None, log=True, newline=True):
"""
Move to a position, wait, and update with a progress bar.
Parameters
----------
position : float
Desired end position.
timeout : float, optional
If provided, the mover will throw an error if motion takes longer
than timeout to complete. If omitted, the mover's default timeout
will be use.
log : bool, optional
If True, logs the move at INFO level.
newline : bool, optional
If True, inserts a newline after the updates.
"""
if log:
self._log_move(position)
try:
status = self.move(position, timeout=timeout, wait=False)
except ophyd.utils.LimitError:
return
pgb = AbsProgressBar([status])
try:
status.wait()
# Avoid race conditions involving the final update
pgb.manual_update()
pgb.no_more_updates()
except KeyboardInterrupt:
pgb.no_more_updates()
self.stop()
if pgb.has_updated and newline:
# If we made progress bar prints, we need an extra newline
print()
if log:
self._log_move_end()
def umvr(self, delta, timeout=None, log=True, newline=True):
"""
Relative move from this position, wait, and update with a progress bar.
Parameters
----------
delta : float
Desired change in position.
timeout : float, optional
If provided, the mover will throw an error if motion takes longer
than timeout to complete. If omitted, the mover's default timeout
will be use.
log : bool, optional
If True, logs the move at INFO level.
newline : bool, optional
If True, inserts a newline after the updates.
"""
self.umv(delta + self.wm(), timeout=timeout, log=log, newline=newline)
def mv_ginput(self, timeout=None):
"""
Moves to a location the user clicks on.
If there are existing plots, this will be the position on the most
recently active plot. If there are no existing plots, an empty plot
will be created with the motor's limits as the range.
"""
# Importing forces backend selection, so do inside method
import matplotlib.pyplot as plt # NOQA
logger.info(("Select new motor x-position in current plot "
"by mouseclick"))
if not plt.get_fignums():
upper_limit = 0
lower_limit = self.limits[0]
if self.limits[0] == self.limits[1]:
upper_limit = self.limits[0]+100
else:
upper_limit = self.limits[1]
limit_plot = []
for x in range(lower_limit, upper_limit):
limit_plot.append(x)
plt.plot(limit_plot)
pos = plt.ginput(1)[0][0]
try:
self.move(pos, timeout=timeout)
except ophyd.utils.LimitError:
return
def tweak(self, scale=0.1):
"""
Control this motor using the arrow keys.
Use left arrow to step negative and right arrow to step positive.
Use up arrow to increase step size and down arrow to decrease step
size. Press q or ctrl+c to quit.
Parameters
----------
scale : float
starting step size, default = 0.1
"""
return tweak_base(self, scale=scale)
def set_position(self, position):
"""
Alias for set_current_position.
Will fail if the motor does not have set_current_position.
"""
self.set_current_position(position)
def setup_preset_paths(**paths):
"""
Prepare the :class:`Presets` class.
Sets the paths for saving and loading presets.
Parameters
----------
**paths : str keyword args
A mapping from type of preset to destination path. These will be
directories that contain the yaml files that define the preset
positions.
"""
Presets._paths = {}
for k, v in paths.items():
Presets._paths[k] = Path(v)
for preset in Presets._registry:
preset.sync()
class Presets:
"""
Manager for device preset positions.
This provides methods for adding new presets, checking which presets are
active, and related utilities.
It will install the :meth:`mv_presetname` and :meth:`wm_presetname` methods
onto the associated device, and the :meth:`add_preset` and
:meth:`add_preset_here` methods onto itself.
Parameters
----------
device : :class:`~ophyd.device.Device`
The device to manage saved preset positions for. It must implement the
:class:`FltMvInterface`.
Attributes
----------
positions : :class:`~types.SimpleNamespace`
A namespace that contains all of the active presets as
:class:`PresetPosition` objects.
"""
_registry = WeakSet()
_paths = {}
def __init__(self, device):
self._device = device
self._methods = []
self._fd = None
self._registry.add(self)
self.name = device.name + '_presets'
self.sync()
def _path(self, preset_type):
"""Utility function to get the preset file :class:`~pathlib.Path`."""
path = self._paths[preset_type] / (self._device.name + '.yml')
logger.debug('select presets path %s', path)
return path
def _read(self, preset_type):
"""Utility function to get a particular preset's datum dictionary."""
logger.debug('read presets for %s', self._device.name)
with self._file_open_rlock(preset_type) as f:
f.seek(0)
return yaml.full_load(f) or {}
def _write(self, preset_type, data):
"""
Utility function to overwrite a particular preset's datum dictionary.
"""
logger.debug('write presets for %s', self._device.name)
with self._file_open_rlock(preset_type) as f:
f.seek(0)
yaml.dump(data, f, default_flow_style=False)
f.truncate()
@contextmanager
def _file_open_rlock(self, preset_type, timeout=1.0):
"""
File locking context manager for this object.
Works like threading.Rlock in that you can acquire it multiple times
safely.
Parameters
----------
fd : file
The file descriptor to lock on.
Raises
------
BlockingIOError
If we cannot acquire the file lock.
"""
if self._fd is None:
path = self._path(preset_type)
with open(path, 'r+') as fd:
# Set up file lock timeout with a raising handler
# We will need this handler due to PEP 475
def interrupt(signum, frame):
raise InterruptedError()
old_handler = signal.signal(signal.SIGALRM, interrupt)
try:
signal.setitimer(signal.ITIMER_REAL, timeout)
fcntl.flock(fd, fcntl.LOCK_EX)
except InterruptedError:
# Ignore interrupted and proceed to cleanup
pass
finally:
# Clean up file lock timeout
signal.setitimer(signal.ITIMER_REAL, 0)
signal.signal(signal.SIGALRM, old_handler)
# Error now if we still can't get the lock.
# Getting lock twice is safe.
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
logger.debug('acquired lock for %s', path)
self._fd = fd
yield fd
fcntl.flock(fd, fcntl.LOCK_UN)
logger.debug('released lock for %s', path)
self._fd = None
else:
logger.debug('using already open file descriptor')
yield self._fd
def _update(self, preset_type, name, value=None, comment=None,
active=True):
"""
Utility function to update a preset position.
Reads the existing preset's datum, updates the value the comment, and
the active state, and then writes the datum back to the file, updating
the history accordingly.
"""
logger.debug(('call %s presets._update(%s, %s, value=%s, comment=%s, '
'active=%s)'), self._device.name, preset_type, name,
value, comment, active)
if not isinstance(name, str):
raise TypeError(('name must be of type <str>, not type'
'{}'.format(type(name))))
if value is not None and not isinstance(value, numbers.Real):
raise TypeError(('value must be a real numeric type, not type'
'{}'.format(type(value))))
try:
path = self._path(preset_type)
if not path.exists():
path.touch()
path.chmod(0o666)
with self._file_open_rlock(preset_type):
data = self._read(preset_type)
if value is None and comment is not None:
value = data[name]['value']
if value is not None:
if name not in data:
data[name] = {}
ts = time.strftime('%d %b %Y %H:%M:%S')
data[name]['value'] = value
history = data[name].get('history', {})
if comment:
comment = ' ' + comment
else:
comment = ''
history[ts] = '{:10.4f}{}'.format(value, comment)
data[name]['history'] = history
if active:
data[name]['active'] = True
else:
data[name]['active'] = False
self._write(preset_type, data)
except BlockingIOError:
self._log_flock_error()
def sync(self):
"""Synchronize the presets with the database."""
logger.debug('call %s presets.sync()', self._device.name)
self._remove_methods()
self._cache = {}
logger.debug('filling %s cache', self.name)
for preset_type in self._paths.keys():
path = self._path(preset_type)
if path.exists():
try:
self._cache[preset_type] = self._read(preset_type)
except BlockingIOError:
self._log_flock_error()
else:
logger.debug('No %s preset file for %s',
preset_type, self._device.name)
self._create_methods()
def _log_flock_error(self):
logger.error(('Unable to acquire file lock for %s. '
'File may be being edited by another user.'), self.name)
logger.debug('', exc_info=True)
def _create_methods(self):
"""
Create the dynamic methods based on the configured paths.
Add methods to this object for adding presets of each type, add
methods to the associated device to move and check each preset, and
add :class:`PresetPosition` instances to :attr:`.positions` for
each preset name.
"""
logger.debug('call %s presets._create_methods()', self._device.name)
for preset_type in self._paths.keys():
add, add_here = self._make_add(preset_type)
self._register_method(self, 'add_' + preset_type, add)
self._register_method(self, 'add_here_' + preset_type, add_here)
for preset_type, data in self._cache.items():
for name, info in data.items():
if info['active']:
mv, umv = self._make_mv_pre(preset_type, name)
wm = self._make_wm_pre(preset_type, name)
self._register_method(self._device, 'mv_' + name, mv)
self._register_method(self._device, 'umv_' + name, umv)
self._register_method(self._device, 'wm_' + name, wm)
setattr(self.positions, name,
PresetPosition(self, preset_type, name))
def _register_method(self, obj, method_name, method):
"""
Utility function for managing dynamic methods.
Adds a method to the :attr:`._methods` list and binds the method to an
object.
"""
logger.debug('register method %s to %s', method_name, obj.name)
self._methods.append((obj, method_name))
setattr(obj, method_name, MethodType(method, obj))
if hasattr(obj, '_tab'):
obj._tab.add(method_name)
def _make_add(self, preset_type):
"""
Create the functions that add preset positions.
Creates suitable versions of :meth:`.add` and :meth:`.add_here` for a
particular preset type, e.g. ``add_preset_type`` and
``add_here_preset_type``.
"""
def add(self, name, value=None, comment=None):
"""
Add a preset position of type "{}".
Parameters
----------
name : str
The name of the new preset position.
value : float, optional
The value of the new preset_position. If unspecified, uses
the current position.
comment : str, optional
A comment to associate with the preset position.
"""
if value is None:
value = self._device.wm()
self._update(preset_type, name, value=value,
comment=comment)
self.sync()
def add_here(self, name, comment=None):
"""
Add a preset of the current position of type "{}".
Parameters
----------
name : str
The name of the new preset position.
comment : str, optional
A comment to associate with the preset position.
"""
add(self, name, self._device.wm(), comment=comment)
add.__doc__ = add.__doc__.format(preset_type)
add_here.__doc__ = add_here.__doc__.format(preset_type)
return add, add_here
def _make_mv_pre(self, preset_type, name):
"""
Create the functions that move to preset positions.
Creates a suitable versions of :meth:`~MvInterface.mv` and
:meth:`~MvInterface.umv` for a particular preset type and name
e.g. ``mv_sample``.
"""
def mv_pre(self, timeout=None, wait=False):
"""
Move to the {} preset position.
Parameters
----------
timeout : float, optional
If provided, the mover will throw an error if motion takes
longer than timeout to complete. If omitted, the mover's
default timeout will be use.
wait : bool, optional
If `True`, wait for motion completion before
returning. Defaults to :keyword:`False`.
"""
pos = self.presets._cache[preset_type][name]['value']
self.mv(pos, timeout=timeout, wait=wait)
def umv_pre(self, timeout=None):
"""
Update move to the {} preset position.
Parameters
----------
timeout : float, optional
If provided, the mover will throw an error if motion takes
longer than timeout to complete. If omitted, the mover's
default timeout will be use.
"""
pos = self.presets._cache[preset_type][name]['value']
self.umv(pos, timeout=timeout)
mv_pre.__doc__ = mv_pre.__doc__.format(name)
umv_pre.__doc__ = umv_pre.__doc__.format(name)
return mv_pre, umv_pre
def _make_wm_pre(self, preset_type, name):
"""
Create a method to get the offset from a preset position.
Creates a suitable version of :meth:`~MvInterface.wm` for a particular
preset type and name e.g. ``wm_sample``.
"""
def wm_pre(self):
"""
Check the offset from the {} preset position.
Returns
-------
offset : float
How far we are from the preset position. If this is near zero,
we are at the position. If this positive, the preset position
is in the positive direction from us.
"""
pos = self.presets._cache[preset_type][name]['value']
return pos - self.wm()
wm_pre.__doc__ = wm_pre.__doc__.format(name)
return wm_pre
def _remove_methods(self):
"""Remove all methods created in the last call to _create_methods."""
logger.debug('call %s presets._remove_methods()', self._device.name)
for obj, method_name in self._methods:
try:
delattr(obj, method_name)
except AttributeError:
pass
if hasattr(obj, '_tab'):
obj._tab.remove(method_name)
self._methods = []
self.positions = SimpleNamespace()
@property
def has_presets(self):
"""
Returns True if any preset positions are defined.
"""
return bool(self.positions.__dict__)
def state(self):
"""
Return the current active preset state name.
This will be the state string name, or Unknown if we're not at any
state.
"""
state = 'Unknown'
closest = 0.5
for device, method_name in self._methods:
if method_name.startswith('wm_'):
state_name = method_name.replace('wm_', '', 1)
wm_state = getattr(device, method_name)
diff = abs(wm_state())
if diff < closest:
state = state_name
closest = diff
return state
class PresetPosition:
"""
Manager for a single preset position.
Parameters
----------
presets : :class:`Presets`
The main :class:`Presets` object that manages this position.
name : str
The name of this preset position.
"""
def __init__(self, presets, preset_type, name):
self._presets = presets
self._preset_type = preset_type
self._name = name
def update_pos(self, pos=None, comment=None):
"""
Change this preset position and save it.
Parameters
----------
pos : float, optional
The position to use for this preset. If omitted, we'll use the
current position.
comment : str, optional
A comment to associate with the preset position.
"""
if pos is None:
pos = self._presets._device.wm()
self._presets._update(self._preset_type, self._name, value=pos,
comment=comment)
self._presets.sync()
def update_comment(self, comment):
"""
Revise the most recent comment in the preset history.
Parameters
----------
comment : str
A comment to associate with the preset position.
"""
self._presets._update(self._preset_type, self._name, comment=comment)
self._presets.sync()
def deactivate(self):
"""
Deactivate a preset from a device.
This can always be undone unless you edit the underlying file.
"""
self._presets._update(self._preset_type, self._name, active=False)
self._presets.sync()
@property
def info(self):
"""All information associated with this preset, returned as a dict."""
return self._presets._cache[self._preset_type][self._name]
@property
def pos(self):
"""The set position of this preset, returned as a float."""
return self.info['value']
@property
def history(self):
"""
This position history associated with this preset, returned as a dict.
"""
return self.info['history']
@property
def path(self):
"""The filepath that defines this preset, returned as a string."""
return str(self._presets._path(self._preset_type))
def __repr__(self):
return str(self.pos)
def tweak_base(*args, scale=0.1):
"""
Base function to control motors with the arrow keys.
With one motor, you can use the right and left arrow keys to move + and -.
With two motors, you can also use the up and down arrow keys for the second
motor.
Three motor and more are not yet supported.
The scale for the tweak can be doubled by pressing + and halved by pressing
-. Shift+up and shift+down can also be used, and the up and down keys will
also adjust the scaling in one motor mode. The starting scale can be set
with the keyword argument `scale`.
Ctrl+c will stop an ongoing move during a tweak without exiting the tweak.
Both q and ctrl+c will quit the tweak between moves.
"""
up = utils.arrow_up
down = utils.arrow_down
left = utils.arrow_left
right = utils.arrow_right
shift_up = utils.shift_arrow_up
shift_down = utils.shift_arrow_down
plus = utils.plus
minus = utils.minus
abs_status = '{}: {:.4f}'
exp_status = '{}: {:.4e}'
if len(args) == 1:
move_keys = (left, right)
scale_keys = (up, down, plus, minus, shift_up, shift_down)
elif len(args) == 2:
move_keys = (left, right, up, down)
scale_keys = (plus, minus, shift_up, shift_down)
def show_status():
if scale >= 0.0001:
template = abs_status
else:
template = exp_status
text = [template.format(mot.name, mot.wm()) for mot in args]
text.append(f'scale: {scale}')
print('\x1b[2K\r' + ', '.join(text), end='')
def usage():
print() # Newline
if len(args) == 1:
print(" Left: move x motor backward")
print(" Right: move x motor forward")
print(" Up or +: scale*2")
print(" Down or -: scale/2")
else:
print(" Left: move x motor left")
print(" Right: move x motor right")
print(" Down: move y motor down")
print(" Up: move y motor up")
print(" + or Shift_Up: scale*2")
print(" - or Shift_Down: scale/2")
print(" Press q to quit."
" Press any other key to display this message.")
print() # Newline
def edit_scale(scale, direction):
"""Function used to change the scale."""
if direction in (up, shift_up, plus):
scale = scale*2
elif direction in (down, shift_down, minus):
scale = scale/2
return scale
def movement(scale, direction):
"""Function used to know when and the direction to move the motor."""
try:
if direction == left:
args[0].umvr(-scale, log=False, newline=False)
elif direction == right:
args[0].umvr(scale, log=False, newline=False)
elif direction == up:
args[1].umvr(scale, log=False, newline=False)
elif direction == down:
args[1].umvr(-scale, log=False, newline=False)
except Exception as exc:
logger.error('Error in tweak move: %s', exc)
logger.debug('', exc_info=True)
start_text = ['{} at {:.4f}'.format(mot.name, mot.wm()) for mot in args]
logger.info('Started tweak of ' + ', '.join(start_text))
# Loop takes in user key input and stops when 'q' is pressed
is_input = True
while is_input is True:
show_status()
inp = utils.get_input()
if inp in ('q', None):
is_input = False
elif inp in move_keys:
movement(scale, inp)
elif inp in scale_keys:
scale = edit_scale(scale, inp)
else:
usage()
print()
logger.info('Tweak complete')
class AbsProgressBar(ProgressBar):
"""Progress bar that displays the absolute position as well."""
def __init__(self, *args, **kwargs):
self._last_position = None
self._name = None
self._no_more = False
self._manual_cbs = []
self.has_updated = False
super().__init__(*args, **kwargs)
# Allow manual updates for a final status print
for i, obj in enumerate(self.status_objs):
self._manual_cbs.append(functools.partial(self._status_cb, i))
def _status_cb(self, pos, status):
self.update(pos, name=self._name, current=self._last_position)
def update(self, *args, name=None, current=None, **kwargs):
# Escape hatch to avoid post-command prints
if self._no_more:
return
# Get cached position and name so they can always be displayed
current = current or self._last_position
self._name = self._name or name
name = self._name
try:
if isinstance(current, typing.Sequence):
# Single-valued pseudo positioner values can come through here.
assert len(current) == 1
current, = current
current = float(current)
# Expand name to include position to display with progress bar
# TODO: can we get access to the signal's precision?
if 0.0 < abs(current) < 1e-6:
fmt = '{}: ({:.4g})'
else:
fmt = '{}: ({:.4f})'
name = fmt.format(name, current)
self._last_position = current
except Exception:
# Fallback if there is no position data at all
name = name or self._name or 'motor'
try:
# Actually draw the bar
super().update(*args, name=name, current=current, **kwargs)
if not self._no_more:
self.has_updated = True
except Exception:
# Print method failure should never print junk to the screen
logger.debug('Error in progress bar update', exc_info=True)
def manual_update(self):
"""Execute a manual update of the progress bar."""
for cb, status in zip(self._manual_cbs, self.status_objs):
cb(status)
def no_more_updates(self):
"""Prevent all future prints from the progress bar."""
self.fp = NullFile()
self._no_more = True
self._manual_cbs.clear()
class NullFile:
def write(*args, **kwargs):
pass
class LightpathMixin(OphydObject):
"""
Mix-in class that makes it easier to establish a lightpath interface.
Use this on classes that are not state positioners but would still like to
be used as a top-level device in lightpath.
"""
SUB_STATE = 'state'
_default_sub = SUB_STATE
# Component names whose values are relevant for inserted/removed
lightpath_cpts = []
# Flag to signify that subclass is another mixin, rather than a device
_lightpath_mixin = False
def __init__(self, *args, **kwargs):
self._lightpath_values = {}
self._lightpath_ready = False
self._retry_lightpath = False
super().__init__(*args, **kwargs)
def __init_subclass__(cls, **kwargs):
# Magic to subscribe to the list of components
super().__init_subclass__(**kwargs)
if cls._lightpath_mixin:
# Child of cls will inherit this as False
cls._lightpath_mixin = False
else:
if not cls.lightpath_cpts:
raise NotImplementedError('Did not implement LightpathMixin')
for cpt_name in cls.lightpath_cpts:
cpt = getattr(cls, cpt_name)
cpt.sub_default(cls._update_lightpath)
def _set_lightpath_states(self, lightpath_values):
# Override based on the use case
# update self._inserted, self._removed,
# and optionally self._transmission
# Should return a dict or None
raise NotImplementedError('Did not implement LightpathMixin')
def _update_lightpath(self, *args, obj, **kwargs):
try:
# Universally cache values
self._lightpath_values[obj] = kwargs
# Only do the first lightpath state once all cpts have chimed in
if len(self._lightpath_values) >= len(self.lightpath_cpts):
self._retry_lightpath = False
# Pass user function the full set of values
self._set_lightpath_states(self._lightpath_values)
self._lightpath_ready = not self._retry_lightpath
if self._lightpath_ready:
# Tell lightpath to update
self._run_subs(sub_type=self.SUB_STATE)
elif self._retry_lightpath and not self._destroyed:
# Use this when the device wasn't ready to set states
kw = dict(obj=obj)
kw.update(kwargs)
utils.schedule_task(self._update_lightpath,
args=args, kwargs=kw, delay=1.0)
except Exception:
# Without this, callbacks fail silently
logger.exception('Error in lightpath update callback for %s.',
self.name)
@property
def inserted(self):
return self._lightpath_ready and bool(self._inserted)
@property
def removed(self):
return self._lightpath_ready and bool(self._removed)
@property
def transmission(self):
try:
return self._transmission
except AttributeError:
if self.inserted:
return 0
else:
return 1
class LightpathInOutMixin(LightpathMixin):
"""
LightpathMixin for parent device with InOut subdevices.
Also works recursively on other LightpathInOutMixin subclasses.
"""
_lightpath_mixin = True
def _set_lightpath_states(self, lightpath_values):
in_check = []
out_check = []
trans_check = []
for obj, kwarg_dct in lightpath_values.items():
if isinstance(obj, LightpathInOutMixin):
# The inserted/removed are always just a getattr
# Therefore, they are safe to call in a callback
in_check.append(obj.inserted)
out_check.append(obj.removed)
trans_check.append(obj.transmission)
else:
if not obj._state_initialized:
# This would prevent make check_inserted, etc. fail
self._retry_lightpath = True
return
# Inserted/removed are not getattr, they can check EPICS
# Instead, check status against the callback kwarg dict
in_check.append(obj.check_inserted(kwarg_dct['value']))
out_check.append(obj.check_removed(kwarg_dct['value']))
trans_check.append(obj.check_transmission(kwarg_dct['value']))
self._inserted = any(in_check)
self._removed = all(out_check)
self._transmission = functools.reduce(lambda a, b: a*b, trans_check)
return dict(in_check=in_check, out_check=out_check,
trans_check=trans_check)
| 33.376932
| 79
| 0.57948
|
ca24bdb9d2d27a4aa6e83f8661d8ec32c04ca7d1
| 2,525
|
py
|
Python
|
docs/conf.py
|
watsonpy/watson-html
|
2e7a7024ce2d9301a8a87c762d5a3c468e002e0a
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
watsonpy/watson-html
|
2e7a7024ce2d9301a8a87c762d5a3c468e002e0a
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
watsonpy/watson-html
|
2e7a7024ce2d9301a8a87c762d5a3c468e002e0a
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Watson - Html documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 17 14:49:48 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import watson.html
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinxcontrib.napoleon'
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'Watson - Html'
copyright = '2014, Simon Coulton'
version = watson.html.__version__
release = version
exclude_patterns = ['_build']
pygments_style = 'sphinx'
# -- Options for HTML output ----------------------------------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_static_path = ['_static']
htmlhelp_basename = 'Watson-Htmldoc'
# html_sidebars = {}
html_show_sourcelink = False
html_show_sphinx = False
# -- Options for manual page output ---------------------------------------
man_pages = [
('index', 'watson-html', 'Watson - Html Documentation',
['Simon Coulton'], 1)
]
# -- Options for Texinfo output -------------------------------------------
texinfo_documents = [
('index', 'Watson-Html', 'Watson - Html Documentation',
'Simon Coulton', 'Watson-Html', 'Utility methods for dealing with HTML.',
'Miscellaneous'),
]
texinfo_appendices = []
# Intersphinx Mapping
# Autodoc
def skip(app, what, name, obj, skip, options):
if name == '__init__':
return False
elif name in ('__module__', '__doc__', '__abstractmethods__'):
return True
return skip
def setup(app):
app.connect('autodoc-skip-member', skip)
| 24.754902
| 79
| 0.656238
|
7fed0648947965b7cfa081d1d73b4a0451a2aa0a
| 136
|
py
|
Python
|
zippy/edu.uci.python.test/src/tests/megaguards/reduction/test9.py
|
securesystemslab/zippy-megaguards
|
9e3324d6aea0327fe499b9e07b1a67194ddd1db3
|
[
"BSD-3-Clause"
] | 1
|
2018-07-19T21:15:29.000Z
|
2018-07-19T21:15:29.000Z
|
zippy/edu.uci.python.test/src/tests/megaguards/reduction/test9.py
|
securesystemslab/zippy-megaguards
|
9e3324d6aea0327fe499b9e07b1a67194ddd1db3
|
[
"BSD-3-Clause"
] | null | null | null |
zippy/edu.uci.python.test/src/tests/megaguards/reduction/test9.py
|
securesystemslab/zippy-megaguards
|
9e3324d6aea0327fe499b9e07b1a67194ddd1db3
|
[
"BSD-3-Clause"
] | null | null | null |
from functools import reduce
def t(a, b):
""" @MG:reduce-on """
return a + b + 3
print(reduce(t, [ i+1 for i in range(100)]))
| 17
| 44
| 0.573529
|
94266351bbb8a766243a23fa8a3227a6525da284
| 309
|
py
|
Python
|
easy/leetcode58.py
|
ayang818/LeetCode
|
f15276f550997652b81f456134c0b64bcb61f65c
|
[
"MIT"
] | 1
|
2019-03-12T04:05:41.000Z
|
2019-03-12T04:05:41.000Z
|
easy/leetcode58.py
|
ayang818/LeetCode
|
f15276f550997652b81f456134c0b64bcb61f65c
|
[
"MIT"
] | null | null | null |
easy/leetcode58.py
|
ayang818/LeetCode
|
f15276f550997652b81f456134c0b64bcb61f65c
|
[
"MIT"
] | null | null | null |
class Solution(object):
def lengthOfLastWord(self, s):
"""
:type s: str
:rtype: int
"""
count = 0
for i in s[::-1]:
if i != " ":
count+=1
else:
if count:
break
return count
| 20.6
| 34
| 0.352751
|
a8012ef4edec9beba5f3dffbcd796fe6618be483
| 647
|
py
|
Python
|
gpytorch/functions/dsmm.py
|
daviswert/gpytorch
|
b1b546eea573aead6c509c8b23ccf93bd9ce82ec
|
[
"MIT"
] | 1
|
2021-06-22T11:53:41.000Z
|
2021-06-22T11:53:41.000Z
|
gpytorch/functions/dsmm.py
|
daviswert/gpytorch
|
b1b546eea573aead6c509c8b23ccf93bd9ce82ec
|
[
"MIT"
] | null | null | null |
gpytorch/functions/dsmm.py
|
daviswert/gpytorch
|
b1b546eea573aead6c509c8b23ccf93bd9ce82ec
|
[
"MIT"
] | null | null | null |
import torch
from torch.autograd import Function, Variable
from ..utils import bdsmm
class DSMM(Function):
def __init__(self, sparse):
if isinstance(sparse, Variable):
sparse = sparse.data
self.sparse = sparse
def forward(self, dense):
if self.sparse.ndimension() == 3:
return bdsmm(self.sparse, dense)
else:
return torch.dsmm(self.sparse, dense)
def backward(self, grad_output):
if self.sparse.ndimension() == 3:
return bdsmm(self.sparse.transpose(1, 2), grad_output)
else:
return torch.dsmm(self.sparse.t(), grad_output)
| 28.130435
| 66
| 0.619784
|
795d5ea83b5219aba0764bec9724e9806ccf3bf5
| 856
|
py
|
Python
|
mysite/urls.py
|
KaptanSingh2020/pyblog
|
5b0af8dce4b1c324f0384f27f7331c156e9ad51e
|
[
"MIT"
] | null | null | null |
mysite/urls.py
|
KaptanSingh2020/pyblog
|
5b0af8dce4b1c324f0384f27f7331c156e9ad51e
|
[
"MIT"
] | null | null | null |
mysite/urls.py
|
KaptanSingh2020/pyblog
|
5b0af8dce4b1c324f0384f27f7331c156e9ad51e
|
[
"MIT"
] | null | null | null |
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('personal.urls')),
url(r'^blog/', include('blog.urls')),
]
| 34.24
| 79
| 0.689252
|
7e47f61d45652172f5cee0c818cb082bcedeca55
| 1,098
|
py
|
Python
|
src/utils/helpers.py
|
realprocrastinator/DIMY-grp
|
40fcf111ed8f6c525a5df74ddf84deff0c1b7736
|
[
"MIT"
] | null | null | null |
src/utils/helpers.py
|
realprocrastinator/DIMY-grp
|
40fcf111ed8f6c525a5df74ddf84deff0c1b7736
|
[
"MIT"
] | null | null | null |
src/utils/helpers.py
|
realprocrastinator/DIMY-grp
|
40fcf111ed8f6c525a5df74ddf84deff0c1b7736
|
[
"MIT"
] | null | null | null |
# can either take bytes type or hex string
def bytes_or_hexstr_to_decimal(s, typename):
# int in python can handle arbitary large number
if typename == "bytes":
return int(s.hex(), 16)
elif typename == "str":
return int(s, 16)
else:
raise TypeError("Can only handle str or bytes type.")
def ephid_bytes_or_hexstr_to_decimal(ephid, length=16):
if isinstance(ephid, bytes):
if (len(ephid) != length):
raise ValueError(f"EphID must be {length} bytes long")
return bytes_or_hexstr_to_decimal(ephid, "bytes")
elif isinstance(ephid, str):
# hex string takes two bytes to represent a byte in raw hex!
if (len(ephid) != length * 2):
raise ValueError(f"EphID must be {length} bytes long")
return bytes_or_hexstr_to_decimal(ephid, "str")
else:
raise TypeError("Can only handle str or bytes type.")
def ephid_decimal_to_bytes(n, length=16):
if not isinstance(n, int):
raise TypeError("Can only handle int type.")
# 128 bit to 16 bytes, since python uses big endian to represent bytes, we respect!
return n.to_bytes(length, "big")
| 32.294118
| 85
| 0.700364
|
3c05c72c78f1c9f4d76df8b143176e3a9e21bbb4
| 140
|
py
|
Python
|
run.py
|
natune/AdventCalendar
|
be36c855aed9cfc22eed91d2a748cbfe24390973
|
[
"MIT"
] | null | null | null |
run.py
|
natune/AdventCalendar
|
be36c855aed9cfc22eed91d2a748cbfe24390973
|
[
"MIT"
] | 4
|
2018-11-10T14:32:54.000Z
|
2018-11-18T14:51:20.000Z
|
run.py
|
natune/AdventCalendar
|
be36c855aed9cfc22eed91d2a748cbfe24390973
|
[
"MIT"
] | null | null | null |
from app import create_app
if __name__ == '__main__':
app = create_app()
app.run(host=app.config['HOST'], port=app.config['PORT'])
| 23.333333
| 61
| 0.671429
|
ab388d09cd1dc2870de06b65acb33cfeb4856e17
| 1,186
|
py
|
Python
|
BFGS.py
|
QSCTech-Sange/Optimization_Project
|
a3e1382f8dc4ff8ca6838c0be88f0d65157d5a58
|
[
"Apache-2.0"
] | null | null | null |
BFGS.py
|
QSCTech-Sange/Optimization_Project
|
a3e1382f8dc4ff8ca6838c0be88f0d65157d5a58
|
[
"Apache-2.0"
] | null | null | null |
BFGS.py
|
QSCTech-Sange/Optimization_Project
|
a3e1382f8dc4ff8ca6838c0be88f0d65157d5a58
|
[
"Apache-2.0"
] | null | null | null |
from func_tools import *
import pandas as pd
def backtrack(X,func,gX,d,B,alpha=1,gamma=0.01,sigma=0.5):
right = gamma*mat2vec(gX).T.dot(mat2vec(d)) ## first vec than T
## right should not include alpha
while func(X+alpha*d,B) - func(X,B)> alpha * right:
alpha = alpha * sigma
return alpha
def BFGS(X,func,grad,tol,p=1):
B = gen_B(len(X))
D = B.T
n = len(X)
d = len(X[0])
H = p*np.eye(n*d)
gX = grad(X,B,D)
norm_2 = norm(gX)
tol = tol**2
loss = [func(X,B)]
while(norm_2 > tol):
dk = - H.dot(mat2vec(gX))
dk = vec2mat(dk,n,d)
step_size = backtrack(X,func,gX,dk,B)
X_1 = X
X = X + step_size*dk
gX_1 = grad(X_1,B,D)
gX = grad(X,B,D)
norm_2 = norm(gX)
loss.append(func(X,B))
print("nomr_2:", norm_2)
s = mat2vec(X - X_1)
y = mat2vec(gX - gX_1)
if(s.T.dot(y))<0:
H = H
else:
w = s - H.dot(y)
H = H + ((np.outer(w,s) + np.outer(s,w)) / s.T.dot(y)) - (w.T.dot(y) / (s.T.dot(y))**2 ) * np.outer(s,s)
print("s.T.dot(y):",s.T.dot(y))
return X, loss
| 27.581395
| 116
| 0.486509
|
80e882c0cf628a351126c1a93ed9e9f615f08d12
| 43
|
py
|
Python
|
tests/__init__.py
|
mattkinsey/bucky-testing
|
313088a74d7290126e474c38f2fcab3254a34408
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
mattkinsey/bucky-testing
|
313088a74d7290126e474c38f2fcab3254a34408
|
[
"MIT"
] | 84
|
2020-12-16T06:47:47.000Z
|
2022-03-28T01:30:18.000Z
|
tests/__init__.py
|
mattkinsey/bucky-testing
|
313088a74d7290126e474c38f2fcab3254a34408
|
[
"MIT"
] | null | null | null |
"""Unit test package for bucky_testing."""
| 21.5
| 42
| 0.72093
|
2c4888b84cfe297fa8697995a72967921c1956c1
| 7,064
|
py
|
Python
|
cap2/pipeline/utils/conda.py
|
nanusefue/CAP2-1
|
670b343ac7629fe0e64e86263ae420b01952f427
|
[
"MIT"
] | null | null | null |
cap2/pipeline/utils/conda.py
|
nanusefue/CAP2-1
|
670b343ac7629fe0e64e86263ae420b01952f427
|
[
"MIT"
] | null | null | null |
cap2/pipeline/utils/conda.py
|
nanusefue/CAP2-1
|
670b343ac7629fe0e64e86263ae420b01952f427
|
[
"MIT"
] | null | null | null |
import luigi
import os
import sys
import subprocess
import logging
import yaml
from ..config import PipelineConfig
logger = logging.getLogger('cap2')
class SpecificationError(Exception):
pass
class CondaEnv(luigi.Task):
name = luigi.Parameter()
python = luigi.Parameter(default='3.7')
config_filename = luigi.Parameter()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.config = PipelineConfig(self.config_filename)
self.spec_dir = self.config.conda_spec_dir
self.base_path = self.config.conda_base_path
self.path = os.path.join(self.base_path, self.name)
self.spec_file = os.path.abspath(os.path.join(
self.spec_dir, '{}.yml'.format(self.name)
))
if not os.path.isdir(self.spec_dir):
os.makedirs(self.spec_dir)
if not os.path.isfile(self.spec_file):
with open(self.spec_file, 'w') as f:
f.write(
f'''
name: {self.name}
channels:
- defaults
'''
)
if not os.path.isdir(self.path):
os.makedirs(self.path)
@property
def bin(self):
return os.path.join(self.path, "bin")
@property
def pip(self):
return os.path.join(self.bin, "pip")
def add_to_path(self):
"""Add the bin folder to PATH if not already present."""
if self.bin not in os.environ['PATH']:
os.environ['PATH'] = os.environ['PATH'] + f':{self.bin}'
def get_path(self, tool):
return os.path.join(self.bin, tool)
def save_spec(self):
proc = subprocess.Popen(
' '.join(['conda', 'env', 'export', '--name', self.name]),
stdout=subprocess.PIPE,
shell=True
)
stdout = proc.communicate()[0]
# the output of conda env export itself is only a valid
# env description if, the line starting with "prefix: " is removed
with open(self.spec_file, 'w') as f:
for line in stdout.decode('utf-8').splitlines():
if "prefix: " in line:
continue
f.write(line)
f.write('\n')
def contains(self, package):
try:
with open(self.spec_file, 'r') as f:
deps = yaml.safe_load(f)
deps = deps.get('dependencies', [])
while True:
try:
dep = deps.pop()
except IndexError:
break
try:
if dep.startswith(package):
return True
except AttributeError:
deps += [x for x in dep.values()][0]
return False
except FileNotFoundError:
return False
def install(self, package, channel="anaconda"):
cmd = [
'conda', 'install',
'-p', self.path,
'-c', channel,
package, '-y'
]
logger.info('conda-installing: {} with {}'.format(package, ' '.join(cmd)))
try:
subprocess.check_call(' '.join(cmd), shell=True)
except:
print(f'Subprocess failed from {os.getcwd()}: {cmd}', file=sys.stderr)
raise
self.save_spec()
self.add_to_path()
def pypi_install(self, package):
cmd = [
self.pip,
'install',
package,
]
logger.info('pypi-installing: {} with {}'.format(package, ' '.join(cmd)))
try:
subprocess.check_call(' '.join(cmd), shell=True)
except:
print(f'Subprocess failed from {os.getcwd()}: {cmd}', file=sys.stderr)
raise
self.save_spec()
self.add_to_path()
def run(self):
"""
init conda env
"""
if True:
cmd = [
'conda', 'env', 'create', '-f',
self.spec_file, '-p', self.path, "python={}".format(self.python),
]
logger.info('init conda env: {}'.format(' '.join(cmd)))
subprocess.check_call(cmd)
else:
cmd = [
'conda', 'create', '-p', self.path, "python={}".format(self.python), '-y'
]
logger.info('init conda env: {}'.format(' '.join(cmd)))
try:
subprocess.check_call(' '.join(cmd), shell=True)
except:
print(f'Subprocess failed from {os.getcwd()}: {cmd}', file=sys.stderr)
raise
self.save_spec()
self.install('pip')
def output(self):
return luigi.LocalTarget(self.pip)
class CondaPackage(luigi.Task):
package = luigi.Parameter()
config_filename = luigi.Parameter()
executable = luigi.Parameter()
channel = luigi.Parameter(default="anaconda")
env = luigi.Parameter(default="CAP_v2")
version = luigi.Parameter(default="None")
python = luigi.IntParameter(default=3)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._env = CondaEnv(
name=self.env, python=self.python, config_filename=self.config_filename
)
self.bin = os.path.join(
self._env.bin, self.executable
)
def requires(self):
return self._env
def output(self):
return luigi.LocalTarget(
self.bin
)
def complete(self):
return self.output().exists()
def related_tool(self, name):
return self._env.get_path(name)
def run(self):
if not self._env.contains(self.package):
self._env.install(self.package, self.channel)
if not self.output().exists():
raise SpecificationError(
f'Tool {self.package} was not correctly installed'
)
class PyPiPackage(luigi.Task):
package = luigi.Parameter()
config_filename = luigi.Parameter()
executable = luigi.Parameter()
env = luigi.Parameter(default="CAP_v2")
version = luigi.Parameter(default="None")
python = luigi.IntParameter(default=3)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._env = CondaEnv(
name=self.env, python=self.python, config_filename=self.config_filename
)
self.bin = os.path.join(
self._env.bin, self.executable
)
def requires(self):
return self._env
def output(self):
return luigi.LocalTarget(
self.bin
)
def complete(self):
return self.output().exists()
def related_tool(self, name):
return self._env.get_path(name)
def run(self):
if not self._env.contains(self.package):
self._env.pypi_install(self.package)
if not self.output().exists():
raise SpecificationError(
f'Tool {self.package} was not correctly installed'
)
| 29.311203
| 89
| 0.537231
|
3caa6a89f68f90e41dbfac34ef61d2c2840ff614
| 50,569
|
py
|
Python
|
tests/test_model.py
|
OrquestraDigital/aboutcode-toolkit
|
d9ff859735a72635563fb5a9e265ecd7023d401a
|
[
"Apache-2.0"
] | 1
|
2021-08-31T10:58:29.000Z
|
2021-08-31T10:58:29.000Z
|
tests/test_model.py
|
sthagen/aboutcode-toolkit
|
cd74f15bcc223c7e1b7424f169481af8e55e0f38
|
[
"Apache-2.0"
] | null | null | null |
tests/test_model.py
|
sthagen/aboutcode-toolkit
|
cd74f15bcc223c7e1b7424f169481af8e55e0f38
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# ============================================================================
# Copyright (c) nexB Inc. http://www.nexb.com/ - All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import io
import json
import os
import posixpath
import shutil
import unittest
import mock
import saneyaml
from attributecode import CRITICAL
from attributecode import ERROR
from attributecode import INFO
from attributecode import WARNING
from attributecode import Error
from attributecode import model
from attributecode.util import add_unc, norm, on_windows
from attributecode.util import load_csv
from attributecode.util import to_posix
from attributecode.util import replace_tab_with_spaces
from testing_utils import extract_test_loc
from testing_utils import get_temp_dir
from testing_utils import get_temp_file
from testing_utils import get_test_loc
def check_csv(expected, result, regen=False, fix_cell_linesep=False):
"""
Assert that the contents of two CSV files locations `expected` and
`result` are equal.
"""
if regen:
shutil.copyfile(result, expected)
expected = sorted([sorted(d.items()) for d in load_csv(expected)])
result = [d.items() for d in load_csv(result)]
if fix_cell_linesep:
result = [list(fix_crlf(items)) for items in result]
result = sorted(sorted(items) for items in result)
assert expected == result
def fix_crlf(items):
"""
Hackish... somehow the CVS returned on Windows is sometimes using a backward
linesep convention:
instead of LF inside cells and CRLF at EOL,
they use CRLF everywhere.
This is fixing this until we find can why
"""
for key, value in items:
if isinstance(value, str) and '\r\n' in value:
value = value.replace('\r\n', '\n')
yield key, value
def check_json(expected, result):
"""
Assert that the contents of two JSON files are equal.
"""
with open(expected) as e:
expected = json.load(e, object_pairs_hook=dict)
with open(result) as r:
result = json.load(r, object_pairs_hook=dict)
assert expected == result
def get_test_content(test_location):
"""
Read file at test_location and return a unicode string.
"""
return get_unicode_content(get_test_loc(test_location))
def get_unicode_content(location):
"""
Read file at location and return a unicode string.
"""
with io.open(location, encoding='utf-8') as doc:
return doc.read()
class FieldTest(unittest.TestCase):
def test_Field_init(self):
model.Field()
model.StringField()
model.ListField()
model.UrlField()
model.BooleanField()
model.PathField()
model.FileTextField()
model.PackageUrlField()
def test_empty_Field_has_no_content(self):
field = model.Field()
assert not field.has_content
def test_empty_Field_has_default_value(self):
field = model.Field()
assert '' == field.value
def test_PathField_check_location(self):
test_file = 'license.LICENSE'
field = model.PathField(name='f', value=test_file, present=True)
base_dir = get_test_loc('test_model/base_dir')
errors = field.validate(base_dir=base_dir)
expected_errrors = []
assert expected_errrors == errors
result = field.value[test_file]
expected = add_unc(posixpath.join(to_posix(base_dir), test_file))
assert expected == result
def test_PathField_check_missing_location(self):
test_file = 'does.not.exist'
field = model.PathField(name='f', value=test_file, present=True)
base_dir = get_test_loc('test_model/base_dir')
errors = field.validate(base_dir=base_dir)
file_path = posixpath.join(base_dir, test_file)
err_msg = 'Field f: Path %s not found' % file_path
expected_errors = [
Error(CRITICAL, err_msg)]
assert expected_errors == errors
result = field.value[test_file]
assert None == result
def test_TextField_loads_file(self):
field = model.FileTextField(
name='f', value='license.LICENSE', present=True)
base_dir = get_test_loc('test_model/base_dir')
errors = field.validate(base_dir=base_dir)
assert [] == errors
expected = {'license.LICENSE': 'some license text'}
assert expected == field.value
def test_PackageUrlField_is_valid_url(self):
assert model.PackageUrlField.is_valid_purl('pkg:pypi/saneyaml@0.1')
def test_PackageUrlField_is_valid_url_no_version(self):
assert model.PackageUrlField.is_valid_purl('pkg:pypi/saneyaml')
def test_UrlField_is_valid_url(self):
assert model.UrlField.is_valid_url('http://www.google.com')
def test_UrlField_is_valid_url_not_starting_with_www(self):
assert model.UrlField.is_valid_url('https://nexb.com')
assert model.UrlField.is_valid_url('http://archive.apache.org/dist/httpcomponents/commons-httpclient/2.0/source/commons-httpclient-2.0-alpha2-src.tar.gz')
assert model.UrlField.is_valid_url('http://de.wikipedia.org/wiki/Elf (Begriffsklärung)')
assert model.UrlField.is_valid_url('http://nothing_here.com')
def test_UrlField_is_valid_url_no_schemes(self):
assert not model.UrlField.is_valid_url('google.com')
assert not model.UrlField.is_valid_url('www.google.com')
assert not model.UrlField.is_valid_url('')
def test_UrlField_is_valid_url_not_ends_with_com(self):
assert model.UrlField.is_valid_url('http://www.google')
def test_UrlField_is_valid_url_ends_with_slash(self):
assert model.UrlField.is_valid_url('http://www.google.co.uk/')
def test_UrlField_is_valid_url_empty_URL(self):
assert not model.UrlField.is_valid_url('http:')
def check_validate(self, field_class, value, expected, expected_errors):
"""
Check field values after validation
"""
field = field_class(name='s', value=value, present=True)
# check that validate can be applied multiple times without side effects
for _ in range(2):
errors = field.validate()
assert expected_errors == errors
assert expected == field.value
def test_StringField_validate_trailing_spaces_are_removed(self):
field_class = model.StringField
value = 'trailin spaces '
expected = 'trailin spaces'
self.check_validate(field_class, value, expected, expected_errors=[])
def test_ListField_contains_list_after_validate(self):
value = 'string'
field_class = model.ListField
expected = [value]
self.check_validate(field_class, value, expected, expected_errors=[])
def test_ListField_contains_stripped_strings_after_validate(self):
value = '''first line
second line '''
field_class = model.ListField
expected = ['first line', 'second line']
self.check_validate(field_class, value, expected, expected_errors=[])
def test_PathField_contains_stripped_strings_after_validate(self):
value = '''first line
second line '''
field_class = model.ListField
expected = ['first line', 'second line']
self.check_validate(field_class, value, expected, expected_errors=[])
def test_PathField_contains_dict_after_validate(self):
value = 'string'
field_class = model.PathField
expected = dict([('string', None)])
expected_errors = [
Error(ERROR, 'Field s: Unable to verify path: string: No base directory provided')
]
self.check_validate(field_class, value, expected, expected_errors)
def test_SingleLineField_has_errors_if_multiline(self):
value = '''line1
line2'''
field_class = model.SingleLineField
expected = value
expected_errors = [Error(ERROR, 'Field s: Cannot span multiple lines: line1\n line2')]
self.check_validate(field_class, value, expected, expected_errors)
class YamlParseTest(unittest.TestCase):
maxDiff = None
def test_saneyaml_load_can_parse_simple_fields(self):
test = get_test_content('test_model/parse/basic.about')
result = saneyaml.load(test)
expected = [
('single_line', 'optional'),
('other_field', 'value'),
]
assert expected == list(result.items())
def test_saneyaml_load_does_not_convert_to_crlf(self):
test = get_test_content('test_model/crlf/about.ABOUT')
result = saneyaml.load(test)
expected = [
(u'about_resource', u'.'),
(u'name', u'pytest'),
(u'description', u'first line\nsecond line\nthird line\n'),
(u'copyright', u'copyright')
]
assert expected == list(result.items())
def test_saneyaml_load_can_parse_continuations(self):
test = get_test_content('test_model/parse/continuation.about')
result = saneyaml.load(test)
expected = [
('single_line', 'optional'),
('other_field', 'value'),
(u'multi_line', u'some value and more and yet more')
]
assert expected == list(result.items())
def test_saneyaml_load_can_handle_multiline_texts_and_strips_text_fields(self):
test = get_test_content('test_model/parse/complex.about')
result = saneyaml.load(test)
expected = [
('single_line', 'optional'),
('other_field', 'value'),
('multi_line', 'some value and more and yet more'),
('yetanother', 'sdasd')]
assert expected == list(result.items())
def test_saneyaml_load_can_parse_verbatim_text_unstripped(self):
test = get_test_content('test_model/parse/continuation_verbatim.about')
result = saneyaml.load(test)
expected = [
(u'single_line', u'optional'),
(u'other_field', u'value'),
(u'multi_line', u'some value \n and more \n and yet more \n \n')
]
assert expected == list(result.items())
def test_saneyaml_load_can_parse_verbatim_tab_text_unstripped(self):
test = get_test_content('test_model/parse/continuation_verbatim_with_tab.about')
data = replace_tab_with_spaces(test)
result = saneyaml.load(data)
expected = [
(u'single_line', u'optional'),
(u'other_field', u'value'),
(u'multi_line', u'This is a long description\nwith tab.\n')
]
assert expected == list(result.items())
def test_saneyaml_load_report_error_for_invalid_field_name(self):
test = get_test_content('test_model/parse/invalid_names.about')
try:
saneyaml.load(test)
self.fail('Exception not raised')
except Exception:
pass
def test_saneyaml_dangling_text_is_not_an_invalid_continuation(self):
test = get_test_content('test_model/parse/invalid_continuation.about')
result = saneyaml.load(test)
expected = [
(u'single_line', u'optional'),
(u'other_field', u'value'),
(u'multi_line', u'some value and more\ninvalid continuation2')
]
assert expected == list(result.items())
def test_saneyaml_load_accepts_unicode_keys_and_values(self):
test = get_test_content('test_model/parse/non_ascii_field_name_value.about')
result = saneyaml.load(test)
expected = [
('name', 'name'),
('about_resource', '.'),
('owner', 'Matías Aguirre'),
(u'Matías', u'unicode field name')
]
assert expected == list(result.items())
def test_saneyaml_load_accepts_blank_lines_and_spaces_in_field_names(self):
test = '''
name: test space
version: 0.7.0
about_resource: about.py
field with spaces: This is a test case for field with spaces
'''
result = saneyaml.load(test)
expected = [
('name', 'test space'),
('version', '0.7.0'),
('about_resource', 'about.py'),
(u'field with spaces', u'This is a test case for field with spaces'),
]
assert expected == list(result.items())
def test_saneyaml_loads_blank_lines_and_lines_without_no_colon(self):
test = '''
name: no colon test
test
version: 0.7.0
about_resource: about.py
test with no colon
'''
try:
saneyaml.load(test)
self.fail('Exception not raised')
except Exception:
pass
class AboutTest(unittest.TestCase):
def test_About_load_ignores_original_field_order_and_uses_standard_predefined_order(self):
# fields in this file are not in the standard order
test_file = get_test_loc('test_model/parse/ordered_fields.ABOUT')
a = model.About(test_file)
assert [] == a.errors
expected = ['about_resource', 'name', 'version', 'download_url']
result = [f.name for f in a.all_fields() if f.present]
assert expected == result
def test_About_duplicate_field_names_are_detected_with_different_case(self):
# This test is failing because the YAML does not keep the order when
# loads the test files. For instance, it treat the 'About_Resource' as the
# first element and therefore the dup key is 'about_resource'.
test_file = get_test_loc('test_model/parse/dupe_field_name.ABOUT')
a = model.About(test_file)
expected = [
Error(WARNING, 'Field About_Resource is a duplicate. Original value: "." replaced with: "new value"'),
Error(WARNING, 'Field Name is a duplicate. Original value: "old" replaced with: "new"')
]
result = a.errors
assert sorted(expected) == sorted(result)
def test_About_duplicate_field_names_are_not_reported_if_same_value(self):
# This test is failing because the YAML does not keep the order when
# loads the test files. For instance, it treat the 'About_Resource' as the
# first element and therefore the dup key is 'about_resource'.
test_file = get_test_loc('test_model/parse/dupe_field_name_no_new_value.ABOUT')
a = model.About(test_file)
expected = [
]
result = a.errors
assert sorted(expected) == sorted(result)
def check_About_hydrate(self, about, fields):
expected = set([
'name',
'homepage_url',
'download_url',
'version',
'copyright',
'date',
'license_spdx',
'license_text_file',
'notice_file',
'about_resource'])
expected_errors = [
Error(INFO, 'Field date is a custom field.'),
Error(INFO, 'Field license_spdx is a custom field.'),
Error(INFO, 'Field license_text_file is a custom field.')]
errors = about.hydrate(fields)
assert expected_errors == errors
result = set([f.name for f in about.all_fields() if f.present])
assert expected == result
def test_About_hydrate_normalize_field_names_to_lowercase(self):
test_content = get_test_content('test_gen/parser_tests/upper_field_names.ABOUT')
fields = saneyaml.load(test_content).items()
a = model.About()
for _ in range(3):
self.check_About_hydrate(a, fields)
def test_About_with_existing_about_resource_has_no_error(self):
test_file = get_test_loc('test_gen/parser_tests/about_resource_field.ABOUT')
a = model.About(test_file)
assert [] == a.errors
result = a.about_resource.value['about_resource.c']
# this means we have a location
self.assertNotEqual([], result)
def test_About_has_errors_when_about_resource_is_missing(self):
test_file = get_test_loc('test_gen/parser_tests/.ABOUT')
a = model.About(test_file)
expected = [Error(CRITICAL, 'Field about_resource is required')]
result = a.errors
assert expected == result
def test_About_has_errors_when_about_resource_does_not_exist(self):
test_file = get_test_loc('test_gen/parser_tests/missing_about_ref.ABOUT')
file_path = posixpath.join(posixpath.dirname(test_file), 'about_file_missing.c')
a = model.About(test_file)
err_msg = 'Field about_resource: Path %s not found' % file_path
expected = [Error(INFO, err_msg)]
result = a.errors
assert expected == result
def test_About_has_errors_when_missing_required_fields_are_missing(self):
test_file = get_test_loc('test_model/parse/missing_required.ABOUT')
a = model.About(test_file)
expected = [
Error(CRITICAL, 'Field about_resource is required'),
Error(CRITICAL, 'Field name is required'),
]
result = a.errors
assert expected == result
def test_About_has_errors_when_required_fields_are_empty(self):
test_file = get_test_loc('test_model/parse/empty_required.ABOUT')
a = model.About(test_file)
expected = [
Error(CRITICAL, 'Field about_resource is required and empty'),
Error(CRITICAL, 'Field name is required and empty'),
]
result = a.errors
assert expected == result
def test_About_has_errors_with_empty_notice_file_field(self):
test_file = get_test_loc('test_model/parse/empty_notice_field.about')
a = model.About(test_file)
expected = [
Error(INFO, 'Field notice_file is present but empty.')]
result = a.errors
assert expected == result
def test_About_custom_fields_are_never_ignored(self):
test_file = get_test_loc('test_model/custom_fields/custom_fields.about')
a = model.About(test_file)
result = [(n, f.value) for n, f in a.custom_fields.items()]
expected = [
(u'single_line', u'README STUFF'),
(u'multi_line', u'line1\nline2'),
(u'other', u'sasasas'),
(u'empty', u'')
]
assert expected == result
def test_About_custom_fields_are_not_ignored_and_order_is_preserved(self):
test_file = get_test_loc('test_model/custom_fields/custom_fields.about')
a = model.About(test_file)
result = [(n, f.value) for n, f in a.custom_fields.items()]
expected = [
(u'single_line', u'README STUFF'),
(u'multi_line', u'line1\nline2'),
(u'other', u'sasasas'),
(u'empty', u'')
]
assert sorted(expected) == sorted(result)
def test_About_has_errors_for_illegal_custom_field_name(self):
test_file = get_test_loc('test_model/parse/illegal_custom_field.about')
a = model.About(test_file)
expected_errors = [
Error(INFO, 'Field hydrate is a custom field.'),
Error(CRITICAL, "Internal error with custom field: 'hydrate': 'illegal name'.")
]
assert expected_errors == a.errors
assert not hasattr(getattr(a, 'hydrate'), 'value')
field = list(a.custom_fields.values())[0]
assert 'hydrate' == field.name
assert 'illegal name' == field.value
def test_About_file_fields_are_empty_if_present_and_path_missing(self):
test_file = get_test_loc('test_model/parse/missing_notice_license_files.ABOUT')
a = model.About(test_file)
file_path1 = posixpath.join(posixpath.dirname(test_file), 'test.LICENSE')
file_path2 = posixpath.join(posixpath.dirname(test_file), 'test.NOTICE')
err_msg1 = Error(CRITICAL, 'Field license_file: Path %s not found' % file_path1)
err_msg2 = Error(CRITICAL, 'Field notice_file: Path %s not found' % file_path2)
expected_errors = [err_msg1, err_msg2]
assert expected_errors == a.errors
assert {'test.LICENSE': None} == a.license_file.value
assert {'test.NOTICE': None} == a.notice_file.value
def test_About_notice_and_license_text_are_loaded_from_file(self):
test_file = get_test_loc('test_model/parse/license_file_notice_file.ABOUT')
a = model.About(test_file)
expected = '''Tester holds the copyright for test component. Tester relinquishes copyright of
this software and releases the component to Public Domain.
* Email Test@tester.com for any questions'''
result = a.license_file.value['license_text.LICENSE']
assert expected == result
expected = '''Test component is released to Public Domain.'''
result = a.notice_file.value['notice_text.NOTICE']
assert expected == result
def test_About_license_and_notice_text_are_empty_if_field_missing(self):
test_file = get_test_loc('test_model/parse/no_file_fields.ABOUT')
a = model.About(test_file)
assert [] == a.errors
assert {} == a.license_file.value
assert {} == a.notice_file.value
def test_About_rejects_non_ascii_names_and_accepts_unicode_values(self):
test_file = get_test_loc('test_model/parse/non_ascii_field_name_value.about')
a = model.About(test_file)
expected = [
Error(CRITICAL, "Field name: 'mat\xedas' contains illegal name characters: 0 to 9, a to z, A to Z and _. (or empty spaces)")
]
assert expected == a.errors
def test_About_invalid_boolean_value(self):
test_file = get_test_loc('test_model/parse/invalid_boolean.about')
a = model.About(test_file)
expected_msg = "Field modified: Invalid flag value: 'blah'"
assert expected_msg in a.errors[0].message
def test_About_boolean_value(self):
test_file = get_test_loc('test_model/parse/boolean_data.about')
a = model.About(test_file)
expected_msg = "Field track_changes is present but empty."
assert expected_msg in a.errors[0].message
# Context of the test file
"""
about_resource: .
name: boolean_data
attribute: False
modified: true
internal_use_only: no
redistribute: yes
track_changes:
"""
assert a.attribute.value is False
assert a.modified.value is True
assert a.internal_use_only.value is False
assert a.redistribute.value is True
assert a.track_changes.value is None
def test_About_contains_about_file_path(self):
test_file = get_test_loc('test_model/serialize/about.ABOUT')
# TODO: I am not sure this override of the about_file_path makes sense
a = model.About(test_file, about_file_path='complete/about.ABOUT')
assert [] == a.errors
expected = 'complete/about.ABOUT'
result = a.about_file_path
assert expected == result
def test_About_equals(self):
test_file = get_test_loc('test_model/equal/complete/about.ABOUT')
a = model.About(test_file, about_file_path='complete/about.ABOUT')
b = model.About(test_file, about_file_path='complete/about.ABOUT')
assert a == b
def test_About_are_not_equal_with_small_text_differences(self):
test_file = get_test_loc('test_model/equal/complete2/about.ABOUT')
a = model.About(test_file, about_file_path='complete2/about.ABOUT')
test_file2 = get_test_loc('test_model/equal/complete/about.ABOUT')
b = model.About(test_file2, about_file_path='complete/about.ABOUT')
assert a.dumps() != b.dumps()
assert a != b
def test_get_field_names_only_returns_non_empties(self):
a = model.About()
a.custom_fields['f'] = model.StringField(
name='f', value='1', present=True)
b = model.About()
b.custom_fields['g'] = model.StringField(
name='g', value='1', present=True)
abouts = [a, b]
# ensure all fields (including custom fields) and
# about_resource are collected in the correct order
expected = [
model.About.ABOUT_RESOURCE_ATTR, 'name', 'f', 'g'
]
result = model.get_field_names(abouts)
assert expected == result
def test_get_field_names_does_not_return_duplicates_custom_fields(self):
a = model.About()
a.custom_fields['f'] = model.StringField(name='f', value='1',
present=True)
a.custom_fields['cf'] = model.StringField(name='cf', value='1',
present=True)
b = model.About()
b.custom_fields['g'] = model.StringField(name='g', value='1',
present=True)
b.custom_fields['cf'] = model.StringField(name='cf', value='2',
present=True)
abouts = [a, b]
# ensure all fields (including custom fields) and
# about_resource are collected in the correct order
expected = [
'about_resource',
'name',
'cf',
'f',
'g',
]
result = model.get_field_names(abouts)
assert expected == result
def test_comma_in_license(self):
test_file = get_test_loc('test_model/special_char/about.ABOUT')
a = model.About(test_file)
expected = Error(ERROR, "The following character(s) cannot be in the license_key: [',']")
assert a.errors[0] == expected
def test_load_dict_issue_433(self):
package_data = {
'about_resource': 'package1.zip',
'name': 'package',
'version': '1.0',
'copyright': 'copyright on package',
'license_expression': 'license1 AND license2',
'notice_file': 'package1.zip.NOTICE',
'licenses': [
{'key': 'license1', 'name': 'License1', 'file': 'license1.LICENSE', 'url': 'some_url'},
{'key': 'license2', 'name': 'License2', 'file': 'license2.LICENSE', 'url': 'some_url'},
],
}
about = model.About()
about.load_dict(package_data, base_dir='')
as_dict = about.as_dict()
expected = '''about_resource: package1.zip
name: package
version: '1.0'
license_expression: license1 AND license2
copyright: copyright on package
notice_file: package1.zip.NOTICE
licenses:
- key: license1
name: License1
file: license1.LICENSE
url: some_url
- key: license2
name: License2
file: license2.LICENSE
url: some_url
'''
lic_dict = {u'license1': [u'License1', u'', u'some_url'], u'license2' : [u'License2', u'', u'some_url']}
assert about.dumps(lic_dict) == expected
class SerializationTest(unittest.TestCase):
def test_About_dumps(self):
test_file = get_test_loc('test_model/dumps/about.ABOUT')
a = model.About(test_file)
assert [] == a.errors
expected = '''about_resource: .
name: AboutCode
version: 0.11.0
description: |
AboutCode is a tool
to process ABOUT files.
An ABOUT file is a file.
homepage_url: http://dejacode.org
license_expression: apache-2.0
copyright: Copyright (c) 2013-2014 nexB Inc.
notice_file: NOTICE
owner: nexB Inc.
author: Jillian Daguil, Chin Yeung Li, Philippe Ombredanne, Thomas Druez
vcs_tool: git
vcs_repository: https://github.com/dejacode/about-code-tool.git
licenses:
- key: apache-2.0
name: Apache 2.0
file: apache-2.0.LICENSE
'''
result = a.dumps()
assert expected == result
def test_About_dumps_does_all_non_empty_present_fields(self):
test_file = get_test_loc('test_model/parse/complete2/about.ABOUT')
a = model.About(test_file)
expected_error = [
Error(INFO, 'Field custom1 is a custom field.'),
Error(INFO, 'Field custom2 is a custom field.'),
Error(INFO, 'Field custom2 is present but empty.')
]
assert sorted(expected_error) == sorted(a.errors)
expected = '''about_resource: .
name: AboutCode
version: 0.11.0
custom1: |
multi
line
'''
result = a.dumps()
assert expected == result
def test_About_dumps_with_different_boolean_value(self):
test_file = get_test_loc('test_model/parse/complete2/about2.ABOUT')
a = model.About(test_file)
expected_error_msg = "Field track_changes: Invalid flag value: 'blah' is not one of"
assert len(a.errors) == 1
assert expected_error_msg in a.errors[0].message
expected = '''about_resource: .
name: AboutCode
version: 0.11.0
redistribute: no
attribute: yes
modified: yes
'''
result = a.dumps()
assert set(expected) == set(result)
def test_About_dumps_all_non_empty_fields(self):
test_file = get_test_loc('test_model/parse/complete2/about.ABOUT')
a = model.About(test_file)
expected_error = [
Error(INFO, 'Field custom1 is a custom field.'),
Error(INFO, 'Field custom2 is a custom field.'),
Error(INFO, 'Field custom2 is present but empty.')
]
assert sorted(expected_error) == sorted(a.errors)
expected = '''about_resource: .
name: AboutCode
version: 0.11.0
custom1: |
multi
line
'''
result = a.dumps()
assert expected == result
def test_About_as_dict_contains_special_paths(self):
test_file = get_test_loc('test_model/special/about.ABOUT')
a = model.About(test_file, about_file_path='complete/about.ABOUT')
expected_errors = []
assert expected_errors == a.errors
as_dict = a.as_dict()
expected = 'complete/about.ABOUT'
result = as_dict[model.About.ABOUT_FILE_PATH_ATTR]
assert expected == result
def test_load_dump_is_idempotent(self):
test_file = get_test_loc('test_model/this.ABOUT')
a = model.About()
a.load(test_file)
dumped_file = get_temp_file('that.ABOUT')
a.dump(dumped_file)
expected = get_unicode_content(test_file).splitlines()
result = get_unicode_content(dumped_file).splitlines()
# Ignore comment and empty line
filtered_result = []
for line in result:
if not line.startswith('#') and not line == '':
filtered_result.append(line)
assert expected == filtered_result
def test_load_can_load_unicode(self):
test_file = get_test_loc('test_model/unicode/nose-selecttests.ABOUT')
a = model.About()
a.load(test_file)
file_path = posixpath.join(posixpath.dirname(test_file), 'nose-selecttests-0.3.zip')
err_msg = 'Field about_resource: Path %s not found' % file_path
errors = [
Error(INFO, 'Field dje_license is a custom field.'),
Error(INFO, 'Field license_text_file is a custom field.'),
Error(INFO, 'Field scm_tool is a custom field.'),
Error(INFO, 'Field scm_repository is a custom field.'),
Error(INFO, 'Field test is a custom field.'),
Error(INFO, err_msg)]
assert errors == a.errors
assert 'Copyright (c) 2012, Domen Kožar' == a.copyright.value
def test_load_has_errors_for_non_unicode(self):
test_file = get_test_loc('test_model/unicode/not-unicode.ABOUT')
a = model.About()
a.load(test_file)
err = a.errors[0]
assert CRITICAL == err.severity
assert 'Cannot load invalid ABOUT file' in err.message
assert 'UnicodeDecodeError' in err.message
def test_as_dict_load_dict_ignores_empties(self):
test = {
'about_resource': '.',
'author': '',
'copyright': 'Copyright (c) 2013-2014 nexB Inc.',
'custom1': 'some custom',
'custom_empty': '',
'description': 'AboutCode is a tool\nfor files.',
'license_expression': 'apache-2.0',
'name': 'AboutCode',
'owner': 'nexB Inc.'}
expected = {
'about_file_path': None,
'about_resource': dict([('.', None)]),
'copyright': 'Copyright (c) 2013-2014 nexB Inc.',
'custom1': 'some custom',
'description': 'AboutCode is a tool\nfor files.',
'license_expression': 'apache-2.0',
'name': 'AboutCode',
'owner': 'nexB Inc.'}
a = model.About()
base_dir = 'some_dir'
a.load_dict(test, base_dir)
as_dict = a.as_dict()
# FIXME: why converting back to dict?
assert expected == dict(as_dict)
def test_load_dict_as_dict_is_idempotent_ignoring_special(self):
test = {
'about_resource': ['.'],
'attribute': 'yes',
'author': 'Jillian Daguil, Chin Yeung Li, Philippe Ombredanne, Thomas Druez',
'copyright': 'Copyright (c) 2013-2014 nexB Inc.',
'description': 'AboutCode is a tool to process ABOUT files. An ABOUT file is a file.',
'homepage_url': 'http://dejacode.org',
'license_expression': 'apache-2.0',
'name': 'AboutCode',
'owner': 'nexB Inc.',
'vcs_repository': 'https://github.com/dejacode/about-code-tool.git',
'vcs_tool': 'git',
'version': '0.11.0'}
a = model.About()
base_dir = 'some_dir'
a.load_dict(test, base_dir)
as_dict = a.as_dict()
expected = {
'about_file_path': None,
'about_resource': dict([('.', None)]),
'attribute': 'yes',
'author': 'Jillian Daguil, Chin Yeung Li, Philippe Ombredanne, Thomas Druez',
'copyright': 'Copyright (c) 2013-2014 nexB Inc.',
'description': 'AboutCode is a tool to process ABOUT files. An ABOUT file is a file.',
'homepage_url': 'http://dejacode.org',
'license_expression': 'apache-2.0',
'name': 'AboutCode',
'owner': 'nexB Inc.',
'vcs_repository': 'https://github.com/dejacode/about-code-tool.git',
'vcs_tool': 'git',
'version': '0.11.0'}
assert expected == dict(as_dict)
def test_about_model_class_from_dict_constructor(self):
about_data = {
'about_resource': ['.'],
'attribute': 'yes',
'author': 'Jillian Daguil, Chin Yeung Li, Philippe Ombredanne, Thomas Druez',
'copyright': 'Copyright (c) 2013-2014 nexB Inc.',
'description': 'AboutCode is a tool to process ABOUT files. An ABOUT file is a file.',
'homepage_url': 'http://dejacode.org',
'license_expression': 'apache-2.0',
'name': 'AboutCode',
'owner': 'nexB Inc.',
'vcs_repository': 'https://github.com/dejacode/about-code-tool.git',
'vcs_tool': 'git',
'version': '0.11.0',
}
about = model.About.from_dict(about_data)
assert isinstance(about, model.About)
about_data.update({
'about_file_path': None,
'about_resource': dict([('.', None)]),
})
assert about_data == about.as_dict()
def test_write_output_csv(self):
path = 'test_model/this.ABOUT'
test_file = get_test_loc(path)
abouts = model.About(location=test_file, about_file_path=path)
result = get_temp_file()
model.write_output([abouts], result, format='csv')
expected = get_test_loc('test_model/expected.csv')
check_csv(expected, result)
def test_write_output_csv_with_multiple_files(self):
path = 'test_model/multiple_files.ABOUT'
test_file = get_test_loc(path)
abouts = model.About(location=test_file, about_file_path=path)
result = get_temp_file()
model.write_output([abouts], result, format='csv')
expected = get_test_loc('test_model/multiple_files_expected.csv')
check_csv(expected, result)
def test_write_output_json(self):
path = 'test_model/this.ABOUT'
test_file = get_test_loc(path)
abouts = model.About(location=test_file, about_file_path=path)
result = get_temp_file()
model.write_output([abouts], result, format='json')
expected = get_test_loc('test_model/expected.json')
check_json(expected, result)
def test_android_module_license(self):
path = 'test_model/android/single_license.c.ABOUT'
test_file = get_test_loc(path)
abouts = model.About(location=test_file, about_file_path=path)
parent_dir = get_temp_dir()
abouts.android_module_license(parent_dir)
assert os.path.exists(os.path.join(parent_dir, 'MODULE_LICENSE_PUBLIC_DOMAIN'))
def test_android_module_multi_licenses(self):
path = 'test_model/android/multi_license.c.ABOUT'
test_file = get_test_loc(path)
abouts = model.About(location=test_file, about_file_path=path)
parent_dir = get_temp_dir()
abouts.android_module_license(parent_dir)
assert os.path.exists(os.path.join(parent_dir, 'MODULE_LICENSE_BSD_NEW'))
assert os.path.exists(os.path.join(parent_dir, 'MODULE_LICENSE_BSD_SIMPLIFIED'))
def test_android_notice(self):
path = 'test_model/android/single_license.c.ABOUT'
test_file = get_test_loc(path)
abouts = model.About(location=test_file, about_file_path=path)
parent_dir = get_temp_dir()
notice_path, notice_context = abouts.android_notice(parent_dir)
expected_path = os.path.join(parent_dir, 'NOTICE')
assert os.path.normpath(notice_path) == expected_path
expected_notice = '''Copyright (c) xyz
This component is released to the public domain by the author.
'''
assert notice_context == expected_notice
class CollectorTest(unittest.TestCase):
def test_collect_inventory_return_errors(self):
test_loc = get_test_loc('test_model/collect_inventory_errors')
errors, _abouts = model.collect_inventory(test_loc)
file_path1 = posixpath.join(test_loc, 'distribute_setup.py')
file_path2 = posixpath.join(test_loc, 'date_test.py')
err_msg1 = 'non-supported_date_format.ABOUT: Field about_resource: Path %s not found' % file_path1
err_msg2 = 'supported_date_format.ABOUT: Field about_resource: Path %s not found' % file_path2
expected_errors = [
Error(INFO, 'non-supported_date_format.ABOUT: Field date is a custom field.'),
Error(INFO, 'supported_date_format.ABOUT: Field date is a custom field.'),
Error(INFO, err_msg1),
Error(INFO, err_msg2)]
assert sorted(expected_errors) == sorted(errors)
def test_collect_inventory_with_long_path(self):
test_loc = extract_test_loc('test_model/longpath.zip')
_errors, abouts = model.collect_inventory(test_loc)
assert 2 == len(abouts)
expected_paths = (
'longpath/longpath1/longpath1/longpath1/longpath1/longpath1/longpath1'
'/longpath1/longpath1/longpath1/longpath1/longpath1/longpath1/longpath1'
'/longpath1/longpath1/longpath1/longpath1/longpath1/longpath1/longpath1'
'/longpath1/longpath1/longpath1/longpath1/longpath1/longpath1/longpath1'
'/longpath1/non-supported_date_format.ABOUT',
'longpath/longpath1/longpath1/longpath1/longpath1/longpath1/longpath1'
'/longpath1/longpath1/longpath1/longpath1/longpath1/longpath1/longpath1'
'/longpath1/longpath1/longpath1/longpath1/longpath1/longpath1/longpath1'
'/longpath1/longpath1/longpath1/longpath1/longpath1/longpath1/longpath1'
'/longpath1/supported_date_format.ABOUT'
)
results = [a.about_file_path for a in abouts]
assert all(r.endswith(expected_paths) for r in results)
expected_name = ['distribute', 'date_test']
result_name = [a.name.value for a in abouts]
assert sorted(expected_name) == sorted(result_name)
def test_collect_inventory_can_collect_a_single_file(self):
test_loc = get_test_loc('test_model/single_file/django_snippets_2413.ABOUT')
_errors, abouts = model.collect_inventory(test_loc)
assert 1 == len(abouts)
expected = ['single_file/django_snippets_2413.ABOUT']
result = [a.about_file_path for a in abouts]
assert expected == result
def test_collect_inventory_return_no_warnings_and_model_can_uuse_relative_paths(self):
test_loc = get_test_loc('test_model/rel/allAboutInOneDir')
errors, _abouts = model.collect_inventory(test_loc)
expected_errors = []
result = [(level, e) for level, e in errors if level > INFO]
assert expected_errors == result
def test_collect_inventory_populate_about_file_path(self):
test_loc = get_test_loc('test_model/inventory/complete')
errors, abouts = model.collect_inventory(test_loc)
assert [] == errors
expected = 'about.ABOUT'
result = abouts[0].about_file_path
assert expected == result
def test_collect_inventory_with_multi_line(self):
test_loc = get_test_loc('test_model/parse/multi_line_license_expresion.ABOUT')
errors, abouts = model.collect_inventory(test_loc)
assert [] == errors
expected_lic_url = [
'https://enterprise.dejacode.com/urn/?urn=urn:dje:license:mit',
'https://enterprise.dejacode.com/urn/?urn=urn:dje:license:apache-2.0']
returned_lic_url = abouts[0].license_url.value
assert expected_lic_url == returned_lic_url
def test_collect_inventory_with_license_expression(self):
test_loc = get_test_loc('test_model/parse/multi_line_license_expresion.ABOUT')
errors, abouts = model.collect_inventory(test_loc)
assert [] == errors
expected_lic = 'mit or apache-2.0'
returned_lic = abouts[0].license_expression.value
assert expected_lic == returned_lic
def test_collect_inventory_always_collects_custom_fieldsg(self):
test_loc = get_test_loc('test_model/inventory/custom_fields.ABOUT')
errors, abouts = model.collect_inventory(test_loc)
expected_msg1 = 'Field resource is a custom field'
assert len(errors) == 2
assert expected_msg1 in errors[0].message
# The not supported 'resource' value is collected
assert abouts[0].resource.value
def test_collect_inventory_does_not_raise_error_and_maintains_order_on_custom_fields(self):
test_loc = get_test_loc('test_model/inventory/custom_fields2.ABOUT')
errors, abouts = model.collect_inventory(test_loc)
expected_errors = [
Error(INFO, 'inventory/custom_fields2.ABOUT: Field resource is a custom field.'),
Error(INFO, 'inventory/custom_fields2.ABOUT: Field custom_mapping is a custom field.')
]
assert expected_errors == errors
expected = [u'about_resource: .\nname: test\nresource: .\ncustom_mapping: test\n']
assert expected == [a.dumps() for a in abouts]
def test_parse_license_expression(self):
spec_char, returned_lic = model.parse_license_expression('mit or apache-2.0')
expected_lic = ['mit', 'apache-2.0']
expected_spec_char = []
assert expected_lic == returned_lic
assert expected_spec_char == spec_char
def test_parse_license_expression_with_special_chara(self):
spec_char, returned_lic = model.parse_license_expression('mit, apache-2.0')
expected_lic = []
expected_spec_char = [',']
assert expected_lic == returned_lic
assert expected_spec_char == spec_char
def test_collect_inventory_works_with_relative_paths(self):
# FIXME: This test need to be run under src/attributecode/
# or otherwise it will fail as the test depends on the launching
# location
test_loc = get_test_loc('test_model/inventory/relative')
# Use '.' as the indication of the current directory
test_loc1 = test_loc + '/./'
# Use '..' to go back to the parent directory
test_loc2 = test_loc + '/../relative'
errors1, abouts1 = model.collect_inventory(test_loc1)
errors2, abouts2 = model.collect_inventory(test_loc2)
assert [] == errors1
assert [] == errors2
expected = 'about.ABOUT'
result1 = abouts1[0].about_file_path
result2 = abouts2[0].about_file_path
assert expected == result1
assert expected == result2
def test_collect_inventory_basic_from_directory(self):
location = get_test_loc('test_model/inventory/basic')
result = get_temp_file()
errors, abouts = model.collect_inventory(location)
model.write_output(abouts, result, format='csv')
expected_errors = []
assert expected_errors == errors
expected = get_test_loc('test_model/inventory/basic/expected.csv')
check_csv(expected, result)
def test_collect_inventory_with_about_resource_path_from_directory(self):
location = get_test_loc('test_model/inventory/basic_with_about_resource_path')
result = get_temp_file()
errors, abouts = model.collect_inventory(location)
model.write_output(abouts, result, format='csv')
expected_errors = []
assert expected_errors == errors
expected = get_test_loc('test_model/inventory/basic_with_about_resource_path/expected.csv')
check_csv(expected, result)
def test_collect_inventory_with_no_about_resource_from_directory(self):
location = get_test_loc('test_model/inventory/no_about_resource_key')
result = get_temp_file()
errors, abouts = model.collect_inventory(location)
model.write_output(abouts, result, format='csv')
expected_errors = [Error(CRITICAL, 'about/about.ABOUT: Field about_resource is required')]
assert expected_errors == errors
def test_collect_inventory_complex_from_directory(self):
location = get_test_loc('test_model/inventory/complex')
result = get_temp_file()
errors, abouts = model.collect_inventory(location)
model.write_output(abouts, result, format='csv')
assert all(e.severity == INFO for e in errors)
expected = get_test_loc('test_model/inventory/complex/expected.csv')
check_csv(expected, result, fix_cell_linesep=True, regen=False)
def test_collect_inventory_does_not_convert_lf_to_crlf_from_directory(self):
location = get_test_loc('test_model/crlf/about.ABOUT')
result = get_temp_file()
errors, abouts = model.collect_inventory(location)
errors2 = model.write_output(abouts, result, format='csv')
errors.extend(errors2)
assert all(e.severity == INFO for e in errors)
expected = get_test_loc('test_model/crlf/expected.csv')
check_csv(expected, result, fix_cell_linesep=True, regen=False)
def test_copy_redist_src_no_structure(self):
test_loc = get_test_loc('test_model/redistribution/')
copy_list = [get_test_loc('test_model/redistribution/this.c'), get_test_loc('test_model/redistribution/test/subdir')]
output = get_temp_dir()
expected_file = ['this.c', 'subdir']
with_structure = False
err = model.copy_redist_src(copy_list, test_loc, output, with_structure)
assert err == []
from os import listdir
copied_files = listdir(output)
assert len(expected_file) == len(copied_files)
assert err == []
for file in expected_file:
assert file in copied_files
def test_copy_redist_src_with_structure(self):
test_loc = get_test_loc('test_model/redistribution/')
copy_list = [get_test_loc('test_model/redistribution/this.c'), get_test_loc('test_model/redistribution/test/subdir')]
output = get_temp_dir()
expected_file = ['this.c', 'test']
with_structure = True
err = model.copy_redist_src(copy_list, test_loc, output, with_structure)
assert err == []
from os import listdir
copied_files = listdir(output)
assert len(expected_file) == len(copied_files)
assert err == []
for file in expected_file:
assert file in copied_files
def test_get_copy_list(self):
location = get_test_loc('test_model/redistribution/')
result = get_temp_file()
errors, abouts = model.collect_inventory(location)
copy_list, err = model.get_copy_list(abouts, location)
assert err == []
expected = [os.path.join(location, 'this.c'), os.path.join(location, 'test/subdir')]
if on_windows:
norm_list = []
for c in copy_list:
norm_list.append(norm(c))
assert norm_list == expected
else:
assert copy_list == expected
class FetchLicenseTest(unittest.TestCase):
@mock.patch.object(model, 'urlopen')
def test_valid_api_url(self, mock_data):
mock_data.return_value = ''
assert model.valid_api_url('non_valid_url') is False
@mock.patch('attributecode.util.have_network_connection')
@mock.patch('attributecode.model.valid_api_url')
def test_pre_process_and_fetch_license_dict(self, have_network_connection, valid_api_url):
have_network_connection.return_value = True
valid_api_url.return_value = False
error_msg = (
'Network problem. Please check your Internet connection. '
'License generation is skipped.')
expected = ({}, [Error(ERROR, error_msg)])
assert model.pre_process_and_fetch_license_dict([], '', '') == expected
valid_api_url.return_value = True
expected = ({}, [])
assert model.pre_process_and_fetch_license_dict([], '', '') == expected
| 39.140093
| 162
| 0.649528
|
02fa8f7ab940236fee3f65ec6c0cc1aa7d9251f1
| 1,569
|
py
|
Python
|
nuwa_pytorch/training.py
|
CurisZhou/nuwa-pytorch
|
be4f34be819b368def7b065457e5a85997beeee9
|
[
"MIT"
] | 1
|
2022-02-23T03:00:33.000Z
|
2022-02-23T03:00:33.000Z
|
nuwa_pytorch/training.py
|
CurisZhou/nuwa-pytorch
|
be4f34be819b368def7b065457e5a85997beeee9
|
[
"MIT"
] | null | null | null |
nuwa_pytorch/training.py
|
CurisZhou/nuwa-pytorch
|
be4f34be819b368def7b065457e5a85997beeee9
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
from torch.optim import AdamW, Adam
from nuwa_pytorch.nuwa_pytorch import VQGanVAE
# helper functions
def separate_weight_decayable_params(params):
no_wd_params = set([param for param in params if param.ndim < 2])
wd_params = set(params) - no_wd_params
return wd_params, no_wd_params
def get_optimizer(
params,
lr = 3e-4,
wd = 1e-1,
filter_by_requires_grad = False
):
if filter_by_requires_grad:
params = list(filter(lambda t: t.requires_grad, params))
params = set(params)
wd_params, no_wd_params = separate_weight_decayable_params(params)
param_groups = [
{'params': list(wd_params)},
{'params': list(no_wd_params), 'weight_decay': 0},
]
return AdamW(param_groups, lr = lr, weight_decay = wd)
# classes
class VQGanVAETrainer(nn.Module):
def __init__(
self,
*,
vae,
lr = 3e-4
):
super().__init__()
assert isinstance(vae, VQGanVAE), 'vae must be instance of VQGanVAE'
self.vae = vae
self.optim = Adam(vae.parameters(), lr = lr)
self.register_buffer('state', torch.ones((1,), dtype = torch.bool))
def forward(self, img):
return_loss_key = 'return_loss' if self.state else 'return_discr_loss'
vae_kwargs = {return_loss_key: True}
loss = self.vae(img, **vae_kwargs)
loss.backward()
self.optim.step()
self.optim.zero_grad()
self.state = self.state.data.copy_(~self.state)
return loss, bool(self.state)
| 26.15
| 78
| 0.644997
|
1c98df62f5ac421aa766fe9b19da151cc193fbbb
| 3,008
|
py
|
Python
|
src/m3_robot_code.py
|
lcopland18/99-CapstoneProject-201930
|
34b68b0010d649a9bf503b1da6d9c53a36aad4b7
|
[
"MIT"
] | null | null | null |
src/m3_robot_code.py
|
lcopland18/99-CapstoneProject-201930
|
34b68b0010d649a9bf503b1da6d9c53a36aad4b7
|
[
"MIT"
] | null | null | null |
src/m3_robot_code.py
|
lcopland18/99-CapstoneProject-201930
|
34b68b0010d649a9bf503b1da6d9c53a36aad4b7
|
[
"MIT"
] | null | null | null |
"""
Capstone Project. Code to run on the EV3 robot (NOT on a laptop).
Author: Your professors (for the framework)
and Ruth Hammond.
Spring term, 2018-2019.
"""
# TODO 1: Put your name in the above.
import mqtt_remote_method_calls as mqtt
import rosebot
import m1_robot_code as m1
import m2_robot_code as m2
class MyRobotDelegate(object):
"""
Defines methods that are called by the MQTT listener when that listener
gets a message (name of the method, plus its arguments)
from a LAPTOP via MQTT.
"""
def __init__(self, robot):
self.robot = robot # type: rosebot.RoseBot
self.mqtt_sender = None # type: mqtt.MqttClient
self.is_time_to_quit = False # Set this to True to exit the robot code
def set_mqtt_sender(self, mqtt_sender):
self.mqtt_sender = mqtt_sender
# TODO: Add methods here as needed.
def arm_up(self, speed):
print("Robot recieved Arm Up")
real_speed = int(speed)
while True:
self.robot.arm_and_claw.motor.turn_on(real_speed)
if (self.robot.arm_and_claw.touch_sensor.is_pressed() is True):
break
self.robot.arm_and_claw.motor.turn_off()
def move_arm_to_position(self,speed,position):
print("Robot recieved Arm To")
real_speed = speed
if position < self.robot.arm_and_claw.motor.get_position():
self.robot.arm_and_claw.motor.turn_on(-real_speed)
while True:
if self.robot.arm_and_claw.motor.get_position() == position:
self.robot.arm_and_claw.motor.turn_off()
break
if position > self.robot.arm_and_claw.motor.get_position():
self.robot.arm_and_claw.motor.turn_on(real_speed)
while True:
if self.robot.arm_and_claw.motor.get_position() == position:
self.robot.arm_and_claw.motor.turn_off()
break
def calibrate(self):
print("Calibrate")
self.robot.arm_and_claw.motor.reset_position()
def arm_down(self,speed):
print("Robot recieved Arm Down")
real_speed = -int(speed)
while True:
self.robot.arm_and_claw.motor.turn_on(real_speed)
if self.robot.arm_and_claw.motor.get_position()==0:
break
self.robot.arm_and_claw.motor.turn_off()
# def goes_until_sees_color(self,speed,color):
self.robot.drive_system.go(speed, speed)
while True:
if self.sensor_system:
self.robot.drive_system.stop()
break
if self.robot.drive_system.right_motor.get_position() >= len_deg:
self.robot.drive_system.stop()
break
def print_message_received(method_name, arguments=None):
print()
print("The robot's delegate has received a message")
print("for the ", method_name, " method, with arguments", arguments)
# TODO: Add functions here as needed.
| 34.574713
| 79
| 0.636968
|
6e8eb55a752fbcafcfdb84ee5e056c3b1ca93073
| 6,980
|
py
|
Python
|
src/CT_RNN.py
|
ptolmachev/FORCE_learning
|
7515d23483600ac7b5ef80bb514aefdc915c57af
|
[
"MIT"
] | null | null | null |
src/CT_RNN.py
|
ptolmachev/FORCE_learning
|
7515d23483600ac7b5ef80bb514aefdc915c57af
|
[
"MIT"
] | null | null | null |
src/CT_RNN.py
|
ptolmachev/FORCE_learning
|
7515d23483600ac7b5ef80bb514aefdc915c57af
|
[
"MIT"
] | null | null | null |
'''
A script containing a continuous-time RNN with feedback loop
governed by the equations:
tau dv/dt = -v + W_rec * sigma(v) + W_inp * sigma(u) + W_fb * sigma(z) + b
z = W_out @ sigma(v)
# For now output z is a scalar!
sigma(h) function described in 'state_function.py'
'''
from matplotlib import pyplot as plt
from copy import deepcopy
from collections import deque
import numpy as np
from scipy.sparse import random
from scipy.sparse.linalg import eigs
from tqdm.auto import tqdm
from scipy.stats import uniform
def generate_recurrent_weights(N, density, sr):
A = (1.0/(density * np.sqrt(N))) * np.array(random(N, N, density, data_rvs=uniform(-1, 2).rvs).todense())
#get eigenvalues
w, v = eigs(A)
A = A * (sr/np.max(np.abs(w)))
return A
class CT_RNN():
def __init__(self, N, num_inps, num_outs, dt, tau=25, sr=0.9, maxlen=1000000, bias=False, sparcity_param = 0.1,
input_scaling=1, fb_scaling=1):
self.maxlen = maxlen
self.tau = tau
self.dt = dt
self.N = N
self.num_inps = num_inps
self.num_outs = num_outs
self.input_scaling = input_scaling
self.fb_scaling = fb_scaling
self.sr = sr
self.sparcity_param = sparcity_param
W_rec = generate_recurrent_weights(self.N, density=self.sparcity_param, sr=self.sr)
W_inp = self.input_scaling * (2 * np.random.rand(N, num_inps) - 1)
W_fb = self.fb_scaling * (2 * np.random.rand(N, num_outs) - 1)
W_out = 1 / (np.sqrt(self.N)) * np.random.rand(num_outs, N)
self.W_rec = W_rec
self.W_inp = W_inp
self.W_fb = W_fb
self.W_out = W_out
if bias == False:
self.b = np.zeros(self.N)
else:
self.b = 0.1 * np.random.randn(self.N)
self.v_init = 0.01 * np.random.randn(self.N)
self.v = self.v_init
self.t = 0
self.activation = lambda x: np.tanh(x)
self.v_history = deque(maxlen=maxlen)
def rhs(self, v, inp_vect, noise_amp):
z = self.W_out @ self.activation(v)
return (1.0/self.tau) * (-v +
(self.W_rec @ self.activation(v)
+ self.W_inp @ self.activation(inp_vect)
+ self.W_fb @ (self.activation(z) + noise_amp * np.random.randn(self.num_outs))
+ self.b))
def step(self, inp_vect, noise_amp):
# k1 = self.dt * self.rhs(self.v, inp_vect, noise_amp)
# k2 = self.dt * self.rhs(self.v + k1 / 2, inp_vect, noise_amp)
# k3 = self.dt * self.rhs(self.v + k2 / 2, inp_vect, noise_amp)
# k4 = self.dt * self.rhs(self.v + k3, inp_vect, noise_amp)
# self.v = self.v + (k1 + 2 * k2 + 2 * k3 + k4) / 6
self.v = self.v + self.dt * self.rhs(self.v, inp_vect, noise_amp)
return None
def update_history(self):
self.v_history.append(deepcopy(self.v))
self.t += self.dt
return None
def clear_history(self):
self.v_history = deque(maxlen=self.maxlen)
return None
def reset_v(self):
self.v = self.v_init
return None
def run(self, T, input_array, noise_amp=0):
#input array must be the length of int(np.ceil(T/self.dt))!
N_steps = int(np.ceil(T/self.dt))
for i in (range(N_steps)):
inp_vect = input_array[:, i]
self.step(inp_vect, noise_amp=0)
self.update_history()
return None
def train(self, T, input_array, target_array, noise_amp):
# both input_array and target_array must be the length of int(np.ceil(T/self.dt))!
N_steps = int(np.ceil(T / self.dt))
# initialize estimate of inverse hessian matrix
self.P = np.eye(self.N)
# initialize buffers for useful statistics
self.error_buffer = []
self.dw_norm_buffer = []
self.z_buffer = []
for i in tqdm(range(N_steps)):
inp_vect = input_array[:, i]
target = target_array[i] #scalar for now
self.step(inp_vect, noise_amp=noise_amp)
z, e, dW = self.get_weight_update(target)
self.W_out = deepcopy(self.W_out + dW)
self.error_buffer.append(e**2)
self.dw_norm_buffer.append(np.linalg.norm(dW))
self.z_buffer.append(z)
self.update_history()
return self.z_buffer, self.error_buffer, self.dw_norm_buffer
def get_weight_update(self, target):
# update the error for the linear readout: y_t - r_t^T w_{out}_{t-1}
# where y_t is the target at time t,
# r_t is the vector of neural firing rates at time t,
# and the \w_{out}_{t-1} - readout weights
# WARNING
# WORKS ONLY WITH num_outs = 1 for now!
r = self.activation(self.v)
z = self.W_out @ r # output signal
e = (target - z)
# # update an estimate of (X^T X)^{-1} matrix:
# P_{t} = P_{t-1} - ((P_{t-1} r_t) (r_t^T P_{t-1})) / (1 + r_t^T P_{t-1} r_t)
Pr = (self.P @ r.reshape(-1, 1)).flatten()
rPr = np.sum(r * Pr)
c = 1.0 / (1 + rPr)
self.P = self.P - c * (Pr.reshape(-1, 1) @ Pr.reshape(1, -1))
# update the output weights
dw = e * Pr * c
return z, e, dw
def get_history(self):
v_array = np.array(self.v_history)
return v_array.T
def plot_history(self, list_of_neurons=None):
transients = 100
if list_of_neurons is None:
v_array = self.get_history()[:,transients:]
else:
v_array = self.get_history()[list_of_neurons,transients:]
num_neurons = v_array.shape[0]
fig, ax = plt.subplots(num_neurons, 1, figsize=(15, num_neurons*1))
t_array = np.arange(v_array.shape[-1]) * self.dt
for i in range(num_neurons):
ax[i].plot(t_array, v_array[i, :], linewidth=2, color='k')
ax[i].set_yticks([])
if (i == num_neurons//2):
ax[i].set_ylabel(f'v', fontsize=24, rotation=0)
ax[-1].set_xlabel('t', fontsize=24)
plt.subplots_adjust(hspace=0)
plt.suptitle(f"Trajectory of a neural network, N={self.N}, tau={self.tau}, dt={self.dt}", fontsize=24)
return fig, ax
if __name__ == '__main__':
N = 100
tau = 25 #ms
dt = 1 #ms
num_inputs = 2
num_outs = 1
T = 4000 #ms
rnn = CT_RNN(N, num_inps=num_inputs, num_outs=num_outs, dt=dt, tau=tau, sr=1.2, input_scaling=3)
#periodic input
period = 200 #ms
phi = 2 * np.pi * np.random.rand()
input_array = np.zeros((num_inputs, int(np.ceil(T/dt))))
input_array[0, :] = np.sin(2 * np.pi * np.arange(int(np.ceil(T / dt))) / (period))
input_array[1, :] = np.sin(2 * np.pi * np.arange(int(np.ceil(T / dt))) / (2 * period) + phi)
rnn.run(T, input_array)
fig, ax = rnn.plot_history(list_of_neurons=np.arange(5))
plt.show(block=True)
| 36.931217
| 115
| 0.579656
|
b195a238bb574340fe36d03ce93504d9b63bf2ea
| 13,835
|
py
|
Python
|
updatedb.py
|
FuelRats/EDDB_JsonAPI
|
454de1b1b376de70419ffd71cdc7fc2aa2506154
|
[
"BSD-3-Clause"
] | 3
|
2017-06-28T05:46:05.000Z
|
2019-05-28T16:10:06.000Z
|
updatedb.py
|
FuelRats/EDDB_JsonAPI
|
454de1b1b376de70419ffd71cdc7fc2aa2506154
|
[
"BSD-3-Clause"
] | 2
|
2018-04-23T20:14:22.000Z
|
2019-12-11T20:00:43.000Z
|
updatedb.py
|
FuelRats/EDDB_JsonAPI
|
454de1b1b376de70419ffd71cdc7fc2aa2506154
|
[
"BSD-3-Clause"
] | 2
|
2019-12-11T05:26:25.000Z
|
2019-12-11T23:38:23.000Z
|
import os, sys, transaction, requests
from odo import odo, dshape
import requests
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from sqlalchemy import engine_from_config, sql, orm, schema
from zope.sqlalchemy import mark_changed
from eddb_jsonapi.mymodels import (
DBSession,
System,
Body,
Base,
PopulatedSystem,
Faction,
Station,
Listing)
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri>\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) != 2:
usage(argv)
config_uri = argv[1]
setup_logging(config_uri)
settings = get_appsettings(config_uri)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
print("Beginning update.")
PopulatedSystem.__table__.drop(engine)
Listing.__table__.drop(engine)
Station.__table__.drop(engine)
Faction.__table__.drop(engine)
Body.__table__.drop(engine)
Faction.__table__.create(engine)
PopulatedSystem.__table__.create(engine)
Body.__table__.create(engine)
Station.__table__.create(engine)
Listing.__table__.create(engine)
mark_changed(DBSession())
transaction.commit()
#
# Factions
#
print("Updating factions...")
print("Downloading factions.jsonl from EDDB.io...")
r = requests.get("https://eddb.io/archive/v5/factions.jsonl", stream=True)
with open('factions.json', 'wb') as f:
for chunk in r.iter_content(chunk_size=4096):
if chunk:
f.write(chunk)
print("Saved factions.json. Updating...")
url = str(engine.url) + "::" + Faction.__tablename__
ds = dshape("var *{ id: ?int64, name: ?string, updated_at: ?int64, government_id: ?int64, "
"government: ?string, allegiance_id: ?int64, allegiance: ?string, "
"state_id: ?int64, state: ?string, home_system_id: ?int64, "
"is_player_faction: ?bool }")
t = odo('jsonlines://factions.json', url, dshape=ds)
print("Done! Creating index...")
DBSession.execute("CREATE INDEX factions_idx ON factions(id)")
mark_changed(DBSession())
transaction.commit()
print("Completed processing factions.")
#
# Systems
#
print("Downloading systems_recently.csv from EDDB.io...")
r = requests.get("https://eddb.io/archive/v5/systems_recently.csv", stream=True)
with open('systems_recently.csv', 'wb') as f:
for chunk in r.iter_content(chunk_size=4096):
if chunk:
f.write(chunk)
print("Saved systems_recently.csv. Creating temporary table and importing...")
DBSession.execute("CREATE TEMP TABLE systems_tmp (LIKE systems)")
url = str(engine.url) + "::systems_tmp"
ds = dshape("var *{ id: ?int64, edsm_id: ?int64, name: ?string, x: ?float64, y: ?float64, "
"z: ?float64, population: ?int64, is_populated: ?bool, government_id: ?int64, "
"government: ?string, allegiance_id: ?int64, allegiance: ?string, "
"state_id: ?int64, state: ?string, security_id: ?float64, security: ?string, "
"primary_economy_id: ?float64, primary_economy: ?string, power: ?string, "
"power_state: ?string, power_state_id: ?string, needs_permit: ?bool, "
"updated_at: ?int64, simbad_ref: ?string, controlling_minor_faction_id: ?string, "
"controlling_minor_faction: ?string, reserve_type_id: ?float64, reserve_type: ?string }")
t = odo('systems_recently.csv', url, dshape=ds)
print("Updating systems...")
DBSession.execute("INSERT INTO systems(id, edsm_id, name, x, y, z, population, is_populated, government_id, "
"government, allegiance_id, allegiance, state_id, state, security_id, security, "
"primary_economy_id, primary_economy, power, power_state, power_state_id, needs_permit, "
"updated_at, simbad_ref, controlling_minor_faction_id, controlling_minor_faction, "
"reserve_type_id, reserve_type) SELECT id, edsm_id, name, x, y, z, population, is_populated, "
"government_id, government, allegiance_id, allegiance, state_id, state, security_id, security, "
"primary_economy_id, primary_economy, power, power_state, power_state_id, needs_permit, "
"updated_at, simbad_ref, controlling_minor_faction_id, controlling_minor_faction, "
"reserve_type_id, reserve_type from systems_tmp ON CONFLICT DO UPDATE "
"SET edsm_id = EXCLUDED.edsm_id, name = EXCLUDED.name, x = EXCLUDED.x, "
"y = EXCLUDED.y, z = EXCLUDED.z, population = EXCLUDED.population, "
"is_populated = EXCLUDED.population, government_id = EXCLUDED.government_id, "
"government = EXCLUDED.government, allegiance_id = EXCLUDED.allegiance_id, "
"allegiance = EXCLUDED.allegiance, state_id = EXCLUDED.state_id, "
"state = EXCLUDED.state, security_id = EXCLUDED.security_id, security = EXCLUDED.security, "
"primary_economy_id = EXCLUDED.primary_economy_id, primary_economy = EXCLUDED.primary_economy, "
"power = EXCLUDED.power, power_state = EXCLUDED.power_state, power_state_id = "
"EXCLUDED.power_state_id, needs_permit = EXCLUDED.needs_permit, updated_at = "
"EXCLUDED.updated_at, simbad_ref = EXCLUDED.simbad_ref,"
"controlling_minor_faction_id = EXCLUDED.controlling_minor_faction_id, "
"reserve_type_id = EXCLUDED.reserve_type_id, reserve_type = EXCLUDED.reserve_type")
mark_changed(DBSession())
transaction.commit()
print("Done!")
#
# Bodies
#
print("Downloading bodies.jsonl from EDDB.io...")
r = requests.get("https://eddb.io/archive/v5/bodies.jsonl", stream=True)
with open('bodies.json', 'wb') as f:
for chunk in r.iter_content(chunk_size=4096):
if chunk:
f.write(chunk)
print("Saved bodies.jsonl. Converting JSONL to SQL.")
DBSession.execute("CREATE TEMP TABLE bodies_tmp (LIKE bodies)")
url = str(engine.url) + "::bodies_tmp"
ds = dshape("var *{ id: ?int64, created_at: ?int64, updated_at: ?int64, name: ?string, "
"system_id: ?int64, group_id: ?int64, group_name: ?string, type_id: ?int64, "
"type_name: ?string, distance_to_arrival: ?int64, full_spectral_class: ?string, "
"spectral_class: ?string, spectral_sub_class: ?string, luminosity_class: ?string, "
"luminosity_sub_class: ?string, surface_temperature: ?int64, is_main_star: ?bool, "
"age: ?int64, solar_masses: ?float64, solar_radius: ?float64, catalogue_gliese_id : ?string, "
"catalogue_hipp_id: ?string, catalogue_hd_id: ?string, volcanism_type_id: ?int64, "
"volcanism_type_name: ?string, atmosphere_type_id: ?int64, atmosphere_type_name: ?string, "
"terraforming_state_id: ?int64, terraforming_state_name: ?string, earth_masses: ?float64, "
"radius: ?int64, gravity: ?float64, surface_pressure: ?int64, orbital_period: ?float64, "
"semi_major_axis: ?float64, orbital_eccentricity: ?float64, orbital_inclination: ?float64, "
"arg_of_periapsis: ?float64, rotational_period: ?float64, "
"is_rotational_period_tidally_locked: ?bool, axis_tilt: ?float64, eg_id: ?int64, "
"belt_moon_masses: ?float64, ring_type_id: ?int64, ring_type_name: ?string, "
"ring_mass: ?int64, ring_inner_radius: ?float64, ring_outer_radius: ?float64, "
"rings: ?json, atmosphere_composition: ?json, solid_composition: ?json, "
"materials: ?json, is_landable: ?bool}")
#url = str(engine.url) + "::" + Body.__tablename__
t = odo('jsonlines://bodies.json', url, dshape=ds)
print("Creating indexes...")
DBSession.execute("CREATE INDEX bodies_idx ON bodies(name text_pattern_ops)")
mark_changed(DBSession())
transaction.commit()
DBSession.execute("CREATE INDEX systemid_idx ON bodies(system_id)")
mark_changed(DBSession())
transaction.commit()
print("Done!")
#
# Populated systems
#
print("Downloading systems_populated.jsonl from EDDB.io...")
r = requests.get("https://eddb.io/archive/v5/systems_populated.jsonl", stream=True)
with open('systems_populated.json', 'wb') as f:
for chunk in r.iter_content(chunk_size=4096):
if chunk:
f.write(chunk)
print("Saved systems_populated.json. Updating...")
url = str(engine.url) + "::" + PopulatedSystem.__tablename__
ds = dshape("var *{ id: ?int64, edsm_id: ?int64, name: ?string, x: ?float64, y: ?float64, "
"z: ?float64, population: ?int64, is_populated: ?bool, government_id: ?int64, "
"government: ?string, allegiance_id: ?int64, allegiance: ?string, "
"state_id: ?int64, state: ?string, security_id: ?float64, security: ?string, "
"primary_economy_id: ?float64, primary_economy: ?string, power: ?string, "
"power_state: ?string, power_state_id: ?string, needs_permit: ?int64, "
"updated_at: ?int64, simbad_ref: ?string, controlling_minor_faction_id: ?string, "
"controlling_minor_faction: ?string, reserve_type_id: ?float64, reserve_type: ?string,"
"minor_faction_presences: ?json }")
t = odo('jsonlines://systems_populated.json', url, dshape=ds)
print("Done! Uppercasing system names...")
DBSession.execute("UPDATE populated_systems SET name = UPPER(name)")
mark_changed(DBSession())
transaction.commit()
print("Creating indexes...")
DBSession.execute("CREATE INDEX index_populated_system_names_trigram ON populated_systems "
"USING GIN(name gin_trgm_ops)")
mark_changed(DBSession())
transaction.commit()
DBSession.execute("CREATE INDEX index_populated_system_names_btree ON populated_systems (name)")
mark_changed(DBSession())
transaction.commit()
print("Completed processing populated systems.")
#
# Stations
#
print("Downloading stations.jsonl from EDDB.io...")
r = requests.get("https://eddb.io/archive/v5/stations.jsonl", stream=True)
with open('stations.json', 'wb') as f:
for chunk in r.iter_content(chunk_size=4096):
if chunk:
f.write(chunk)
print("Saved stations.json. Updating...")
DBSession.execute("CREATE TEMP TABLE stations_tmp (LIKE stations)")
url = str(engine.url) + "::stations_tmp"
#url = str(engine.url) + "::" + Station.__tablename__
ds = dshape("var *{ id: ?int64, name: ?string, system_id: ?int64, updated_at: ?int64, "
"max_landing_pad_size: ?string, distance_to_star: ?int64, government_id: ?int64, "
"government: ?string, allegiance_id: ?int64, allegiance: ?string, "
"state_id: ?int64, state: ?string, type_id: ?int64, type: ?string, "
"has_blackmarket: ?bool, has_market: ?bool, has_refuel: ?bool, "
"has_repair: ?bool, has_rearm: ?bool, has_outfitting: ?bool, "
"has_shipyard: ?bool, has_docking: ?bool, has_commodities: ?bool, "
"import_commodities: ?json, export_commodities: ?json, prohibited_commodities: ?json, "
"economies: ?json, shipyard_updated_at: ?int64, outfitting_updated_at: ?int64, "
"market_updated_at: ?int64, is_planetary: ?bool, selling_ships: ?json, "
"selling_modules: ?json, settlement_size_id: ?string, settlement_size: ?int64, "
"settlement_security_id: ?int64, settlement_security: ?string, body_id: ?int64,"
"controlling_minor_faction_id: ?int64 }")
t = odo('jsonlines://stations.json', url, dshape=ds)
print("Done! Cleaning stations without body references...")
DBSession.execute("DELETE FROM stations_tmp WHERE body_id NOT IN (SELECT b.id from bodies b)")
mark_changed(DBSession())
transaction.commit()
DBSession.execute("UPDATE stations SET id=t.id, name=t.name, system_id=t.system_id, updated_at=t.updated_at, "
"max_landing_pad_size=t.max_landing_pad_size, ")
DBSession.execute("CREATE INDEX index_stations_systemid_btree ON stations(system_id)")
mark_changed(DBSession())
transaction.commit()
DBSession.execute("CREATE INDEX index_stations_btree ON stations(id)")
mark_changed(DBSession())
transaction.commit()
print("Completed processing stations.")
#
# Listings
#
print("Downloading listings.csv from EDDB.io...")
r = requests.get("https://eddb.io/archive/v5/listings.csv", stream=True)
with open('listings.csv', 'wb') as f:
for chunk in r.iter_content(chunk_size=4096):
if chunk:
f.write(chunk)
print("Saved listings.csv. Updating...")
url = str(engine.url) + "::" + Listing.__tablename__
ds = dshape("var *{ id: ?int64, station_id: ?int64, commodity: ?int64, supply: ?int64, "
"buy_price: ?int64, sell_price: ?int64, demand: ?int64, collected_at: ?int64 }")
t = odo('listings.csv', url, dshape=ds)
print("Creating indexes...")
DBSession.execute("CREATE INDEX index_listings_stationid_btree ON listings(station_id)")
mark_changed(DBSession())
transaction.commit()
print("Updates complete.")
main()
| 53.007663
| 118
| 0.642429
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.