hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f8eefa185a49def0174464251e27beb1238c9c88 | 93,179 | py | Python | src/west/manifest.py | Mierunski/west | 38e656b05ea8f4c8d80b953f6d88b1ed604d11f8 | [
"Apache-2.0"
] | null | null | null | src/west/manifest.py | Mierunski/west | 38e656b05ea8f4c8d80b953f6d88b1ed604d11f8 | [
"Apache-2.0"
] | null | null | null | src/west/manifest.py | Mierunski/west | 38e656b05ea8f4c8d80b953f6d88b1ed604d11f8 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018, 2019, 2020 Nordic Semiconductor ASA
# Copyright 2018, 2019 Foundries.io Ltd
#
# SPDX-License-Identifier: Apache-2.0
'''
Parser and abstract data types for west manifests.
'''
import configparser
import enum
import errno
import logging
import os
from pathlib import PurePosixPath, Path
import re
import shlex
import subprocess
import sys
from typing import Any, Callable, Dict, Iterable, List, NoReturn, \
NamedTuple, Optional, Set, Tuple, TYPE_CHECKING, Union
from packaging.version import parse as parse_version
import pykwalify.core
import yaml
from west import util
from west.util import PathType
import west.configuration as cfg
#
# Public constants
#
#: Index in a Manifest.projects attribute where the `ManifestProject`
#: instance for the workspace is stored.
MANIFEST_PROJECT_INDEX = 0
#: A git revision which points to the most recent `Project` update.
MANIFEST_REV_BRANCH = 'manifest-rev'
#: A fully qualified reference to `MANIFEST_REV_BRANCH`.
QUAL_MANIFEST_REV_BRANCH = 'refs/heads/' + MANIFEST_REV_BRANCH
#: Git ref space used by west for internal purposes.
QUAL_REFS_WEST = 'refs/west/'
#: The latest manifest schema version supported by this west program.
#:
#: This value changes when a new version of west includes new manifest
#: file features not supported by earlier versions of west.
SCHEMA_VERSION = '0.10'
# MAINTAINERS:
#
# If you want to update the schema version, you need to make sure that
# it has the exact same value as west.version.__version__ when the
# next release is cut.
#
# Internal helpers
#
# Type aliases
# The value of a west-commands as passed around during manifest
# resolution. It can become a list due to resolving imports, even
# though it's just a str in each individual file right now.
WestCommandsType = Union[str, List[str]]
# Type for the importer callback passed to the manifest constructor.
# (ImportedContentType is just an alias for what it gives back.)
ImportedContentType = Optional[Union[str, List[str]]]
ImporterType = Callable[['Project', str], ImportedContentType]
# Type for an import map filter function, which takes a Project and
# returns a bool. The various allowlists and blocklists are used to
# create these filter functions. A None value is treated as a function
# which always returns True.
ImapFilterFnType = Optional[Callable[['Project'], bool]]
# A list of group names to enable and disable, like ['+foo', '-bar'].
GroupFilterType = List[str]
# A list of group names belonging to a project, like ['foo', 'bar']
GroupsType = List[str]
# The parsed contents of a manifest YAML file as returned by _load(),
# after sanitychecking with validate().
ManifestDataType = Union[str, Dict]
# Logging
_logger = logging.getLogger(__name__)
# Type for the submodule value passed through the manifest file.
class Submodule(NamedTuple):
'''Represents a Git submodule within a project.'''
path: str
name: Optional[str] = None
# Submodules may be a list of values or a bool.
SubmodulesType = Union[List[Submodule], bool]
# Manifest locating, parsing, loading, etc.
_DEFAULT_REV = 'master'
_WEST_YML = 'west.yml'
_SCHEMA_PATH = os.path.join(os.path.dirname(__file__), "manifest-schema.yml")
_SCHEMA_VER = parse_version(SCHEMA_VERSION)
_EARLIEST_VER_STR = '0.6.99' # we introduced the version feature after 0.6
_VALID_SCHEMA_VERS = [_EARLIEST_VER_STR, '0.7', '0.8', '0.9', SCHEMA_VERSION]
# Manifest import handling
_RESERVED_GROUP_RE = re.compile(r'(^[+-]|[\s,:])')
_INVALID_PROJECT_NAME_RE = re.compile(r'([/\\])')
#
# Public functions
#
def manifest_path() -> str:
'''Absolute path of the manifest file in the current workspace.
Exceptions raised:
- `west.util.WestNotFound` if called from outside of a west
workspace
- `MalformedConfig` if the configuration file has no
``manifest.path`` key
- ``FileNotFoundError`` if no manifest file exists as determined by
``manifest.path`` and ``manifest.file``
'''
(mpath, mname) = _mpath()
ret = os.path.join(util.west_topdir(), mpath, mname)
# It's kind of annoying to manually instantiate a FileNotFoundError.
# This seems to be the best way.
if not os.path.isfile(ret):
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), ret)
return ret
def validate(data: Any) -> None:
'''Validate manifest data
Raises an exception if the manifest data is not valid for loading
by this version of west. (Actually attempting to load the data may
still fail if the it contains imports which cannot be resolved.)
:param data: YAML manifest data as a string or object
'''
if isinstance(data, str):
as_str = data
data = _load(data)
if not isinstance(data, dict):
raise MalformedManifest(f'{as_str} is not a YAML dictionary')
elif not isinstance(data, dict):
raise TypeError(f'{data} has type {type(data)}, '
'expected valid manifest data')
if 'manifest' not in data:
raise MalformedManifest('manifest data contains no "manifest" key')
data = data['manifest']
# Make sure this version of west can load this manifest data.
# This has to happen before the schema check -- later schemas
# may incompatibly extend this one.
if 'version' in data:
# As a convenience for the user, convert floats to strings.
# This avoids forcing them to write:
#
# version: "0.8"
#
# by explicitly allowing:
#
# version: 0.8
if not isinstance(data['version'], str):
min_version_str = str(data['version'])
casted_to_str = True
else:
min_version_str = data['version']
casted_to_str = False
min_version = parse_version(min_version_str)
if min_version > _SCHEMA_VER:
raise ManifestVersionError(min_version_str)
if min_version_str not in _VALID_SCHEMA_VERS:
msg = (f'invalid version {min_version_str}; must be one of: ' +
', '.join(_VALID_SCHEMA_VERS))
if casted_to_str:
msg += ('. Do you need to quote the value '
'(e.g. "0.10" instead of 0.10)?')
raise MalformedManifest(msg)
try:
pykwalify.core.Core(source_data=data,
schema_files=[_SCHEMA_PATH]).validate()
except pykwalify.errors.SchemaError as se:
raise MalformedManifest(se.msg) from se
# A 'raw' element in a project 'groups:' or manifest 'group-filter:' list,
# as it is parsed from YAML, before conversion to string.
RawGroupType = Union[str, int, float]
def is_group(raw_group: RawGroupType) -> bool:
'''Is a 'raw' project group value 'raw_group' valid?
Valid groups are strings that don't contain whitespace, commas
(","), or colons (":"), and do not start with "-" or "+".
As a special case, groups may also be nonnegative numbers, to
avoid forcing users to quote these values in YAML files.
:param raw_group: the group value to check
'''
# Implementation notes:
#
# - not starting with "-" because "-foo" means "disable group
# foo", and not starting with "+" because "+foo" means
# "enable group foo".
#
# - no commas because that's a separator character in
# manifest.group-filter and 'west update --group-filter'
#
# - no whitespace mostly to guarantee that printing
# comma-separated lists of groups won't cause 'word' breaks
# in 'west list' pipelines to cut(1) or similar
#
# - no colons to reserve some namespace for potential future
# use; we might want to do something like
# "--group-filter=path-prefix:foo" to create additional logical
# groups based on the workspace layout or other metadata
return ((raw_group >= 0) if isinstance(raw_group, (float, int)) else
bool(raw_group and not _RESERVED_GROUP_RE.search(raw_group)))
#
# Exception types
#
class MalformedManifest(Exception):
'''Manifest parsing failed due to invalid data.
'''
class MalformedConfig(Exception):
'''The west configuration was malformed in a way that made a
manifest operation fail.
'''
class ManifestImportFailed(Exception):
'''An operation required to resolve a manifest failed.
Attributes:
- ``project``: the Project instance with the missing manifest data
- ``filename``: the missing file, as a str
'''
class ManifestVersionError(Exception):
'''The manifest required a version of west more recent than the
current version.
'''
#
# The main Manifest class and its public helper types, like Project
# and ImportFlag.
#
class ImportFlag(enum.IntFlag):
'''Bit flags for handling imports when resolving a manifest.
Note that any "path-prefix:" values set in an "import:" still take
effect for the project itself even when IGNORE or IGNORE_PROJECTS are
given. For example, in this manifest::
manifest:
projects:
- name: foo
import:
path-prefix: bar
Project 'foo' has path 'bar/foo' regardless of whether IGNORE or
IGNORE_PROJECTS is given. This ensures the Project has the same path
attribute as it normally would if imported projects weren't being
ignored.
'''
#: The default value, 0, reads the file system to resolve
#: "self: import:", and runs git to resolve a "projects:" import.
DEFAULT = 0
#: Ignore projects added via "import:" in "self:" and "projects:"
IGNORE = 1
#: Always invoke importer callback for "projects:" imports
FORCE_PROJECTS = 2
#: Ignore projects added via "import:" : in "projects:" only;
#: including any projects added via "import:" : in "self:"
IGNORE_PROJECTS = 4
class Project:
'''Represents a project defined in a west manifest.
Attributes:
- ``name``: project's unique name
- ``url``: project fetch URL
- ``revision``: revision to fetch from ``url`` when the
project is updated
- ``path``: relative path to the project within the workspace
(i.e. from ``topdir`` if that is set)
- ``abspath``: absolute path to the project in the native path name
format (or ``None`` if ``topdir`` is)
- ``posixpath``: like ``abspath``, but with slashes (``/``) as
path separators
- ``clone_depth``: clone depth to fetch when first cloning the
project, or ``None`` (the revision should not be a SHA
if this is used)
- ``west_commands``: list of YAML files where extension commands in
the project are declared
- ``topdir``: the top level directory of the west workspace
the project is part of, or ``None``
- ``remote_name``: the name of the remote which should be set up
when the project is being cloned (default: 'origin')
- ``groups``: the project's groups (as a list) as given in the manifest.
If the manifest data contains no groups for the project, this is
an empty list.
- ``submodules``: the project's submodules configuration; either
a list of Submodule objects, or a boolean.
- ``userdata``: the parsed 'userdata' field in the manifest, or None
'''
def __init__(self, name: str, url: str,
revision: Optional[str] = None,
path: Optional[PathType] = None,
submodules: SubmodulesType = False,
clone_depth: Optional[int] = None,
west_commands: Optional[WestCommandsType] = None,
topdir: Optional[PathType] = None,
remote_name: Optional[str] = None,
groups: Optional[GroupsType] = None,
userdata: Optional[Any] = None):
'''Project constructor.
If *topdir* is ``None``, then absolute path attributes
(``abspath`` and ``posixpath``) will also be ``None``.
:param name: project's ``name:`` attribute in the manifest
:param url: fetch URL
:param revision: fetch revision
:param path: path (relative to topdir), or None for *name*
:param submodules: submodules to pull within the project
:param clone_depth: depth to use for initial clone
:param west_commands: path to a west commands specification YAML
file in the project, relative to its base directory,
or list of these
:param topdir: the west workspace's top level directory
:param remote_name: the name of the remote which should be
set up if the project is being cloned (default: 'origin')
:param groups: a list of groups found in the manifest data for
the project, after conversion to str and validation.
'''
self.name = name
self.url = url
self.submodules = submodules
self.revision = revision or _DEFAULT_REV
self.clone_depth = clone_depth
self.path = os.fspath(path or name)
self.west_commands = _west_commands_list(west_commands)
self.topdir = os.fspath(topdir) if topdir else None
self.remote_name = remote_name or 'origin'
self.groups: GroupsType = groups or []
self.userdata: Any = userdata
@property
@path.setter
@property
@property
@property
def as_dict(self) -> Dict:
'''Return a representation of this object as a dict, as it
would be parsed from an equivalent YAML manifest.
'''
ret: Dict = {}
ret['name'] = self.name
ret['url'] = self.url
ret['revision'] = self.revision
if self.path != self.name:
ret['path'] = self.path
if self.clone_depth:
ret['clone-depth'] = self.clone_depth
if self.west_commands:
ret['west-commands'] = \
_west_commands_maybe_delist(self.west_commands)
if self.groups:
ret['groups'] = self.groups
if self.userdata:
ret['userdata'] = self.userdata
return ret
#
# Git helpers
#
def git(self, cmd: Union[str, List[str]],
extra_args: Iterable[str] = (),
capture_stdout: bool = False,
capture_stderr: bool = False,
check: bool = True,
cwd: Optional[PathType] = None) -> subprocess.CompletedProcess:
'''Run a git command in the project repository.
:param cmd: git command as a string (or list of strings)
:param extra_args: sequence of additional arguments to pass to
the git command (useful mostly if *cmd* is a string).
:param capture_stdout: if True, git's standard output is
captured in the ``CompletedProcess`` instead of being
printed.
:param capture_stderr: Like *capture_stdout*, but for standard
error. Use with caution: this may prevent error messages
from being shown to the user.
:param check: if given, ``subprocess.CalledProcessError`` is
raised if git finishes with a non-zero return code
:param cwd: directory to run git in (default: ``self.abspath``)
'''
if isinstance(cmd, str):
cmd_list = shlex.split(cmd)
else:
cmd_list = list(cmd)
extra_args = list(extra_args)
if cwd is None:
if self.abspath is not None:
cwd = self.abspath
else:
raise ValueError('no abspath; cwd must be given')
elif sys.version_info < (3, 6, 1) and not isinstance(cwd, str):
# Popen didn't accept a PathLike cwd on Windows until
# python v3.7; this was backported onto cpython v3.6.1,
# though. West currently supports "python 3.6", though, so
# in the unlikely event someone is running 3.6.0 on
# Windows, do the right thing.
cwd = os.fspath(cwd)
args = ['git'] + cmd_list + extra_args
cmd_str = util.quote_sh_list(args)
_logger.debug(f"running '{cmd_str}' in {cwd}")
popen = subprocess.Popen(
args, cwd=cwd,
stdout=subprocess.PIPE if capture_stdout else None,
stderr=subprocess.PIPE if capture_stderr else None)
stdout, stderr = popen.communicate()
# We use logger style % formatting here to avoid the
# potentially expensive overhead of formatting long
# stdout/stderr strings if the current log level isn't DEBUG,
# which is the usual case.
_logger.debug('"%s" exit code: %d stdout: %r stderr: %r',
cmd_str, popen.returncode, stdout, stderr)
if check and popen.returncode:
raise subprocess.CalledProcessError(popen.returncode, cmd_list,
output=stdout, stderr=stderr)
else:
return subprocess.CompletedProcess(popen.args, popen.returncode,
stdout, stderr)
def sha(self, rev: str, cwd: Optional[PathType] = None) -> str:
'''Get the SHA for a project revision.
:param rev: git revision (HEAD, v2.0.0, etc.) as a string
:param cwd: directory to run command in (default:
self.abspath)
'''
# Though we capture stderr, it will be available as the stderr
# attribute in the CalledProcessError raised by git() in
# Python 3.5 and above if this call fails.
cp = self.git(f'rev-parse {rev}^{{commit}}', capture_stdout=True,
cwd=cwd, capture_stderr=True)
# Assumption: SHAs are hex values and thus safe to decode in ASCII.
# It'll be fun when we find out that was wrong and how...
return cp.stdout.decode('ascii').strip()
def is_ancestor_of(self, rev1: str, rev2: str,
cwd: Optional[PathType] = None) -> bool:
'''Check if 'rev1' is an ancestor of 'rev2' in this project.
Returns True if rev1 is an ancestor commit of rev2 in the
given project; rev1 and rev2 can be anything that resolves to
a commit. (If rev1 and rev2 refer to the same commit, the
return value is True, i.e. a commit is considered an ancestor
of itself.) Returns False otherwise.
:param rev1: commit that could be the ancestor of *rev2*
:param rev2: commit that could be a descendant or *rev1*
:param cwd: directory to run command in (default:
``self.abspath``)
'''
rc = self.git(f'merge-base --is-ancestor {rev1} {rev2}',
check=False, cwd=cwd).returncode
if rc == 0:
return True
elif rc == 1:
return False
else:
raise RuntimeError(f'unexpected git merge-base result {rc}')
def is_up_to_date_with(self, rev: str,
cwd: Optional[PathType] = None) -> bool:
'''Check if the project is up to date with *rev*, returning
``True`` if so.
This is equivalent to ``is_ancestor_of(rev, 'HEAD',
cwd=cwd)``.
:param rev: base revision to check if project is up to date
with.
:param cwd: directory to run command in (default:
``self.abspath``)
'''
return self.is_ancestor_of(rev, 'HEAD', cwd=cwd)
def is_up_to_date(self, cwd: Optional[PathType] = None) -> bool:
'''Check if the project HEAD is up to date with the manifest.
This is equivalent to ``is_up_to_date_with(self.revision,
cwd=cwd)``.
:param cwd: directory to run command in (default:
``self.abspath``)
'''
return self.is_up_to_date_with(self.revision, cwd=cwd)
def is_cloned(self, cwd: Optional[PathType] = None) -> bool:
'''Returns ``True`` if ``self.abspath`` looks like a git
repository's top-level directory, and ``False`` otherwise.
:param cwd: directory to run command in (default:
``self.abspath``)
'''
if not self.abspath or not os.path.isdir(self.abspath):
return False
# --is-inside-work-tree doesn't require that the directory is
# the top-level directory of a Git repository. Use --show-cdup
# instead, which prints an empty string (i.e., just a newline,
# which we strip) for the top-level directory.
_logger.debug(f'{self.name}: checking if cloned')
res = self.git('rev-parse --show-cdup', check=False, cwd=cwd,
capture_stderr=True, capture_stdout=True)
return not (res.returncode or res.stdout.strip())
def read_at(self, path: PathType, rev: Optional[str] = None,
cwd: Optional[PathType] = None) -> bytes:
'''Read file contents in the project at a specific revision.
:param path: relative path to file in this project
:param rev: revision to read *path* from (default: ``self.revision``)
:param cwd: directory to run command in (default: ``self.abspath``)
'''
if rev is None:
rev = self.revision
cp = self.git(['show', f'{rev}:{os.fspath(path)}'],
capture_stdout=True, capture_stderr=True, cwd=cwd)
return cp.stdout
def listdir_at(self, path: PathType, rev: Optional[str] = None,
cwd: Optional[PathType] = None,
encoding: Optional[str] = None) -> List[str]:
'''List of directory contents in the project at a specific revision.
The return value is the directory contents as a list of files and
subdirectories.
:param path: relative path to file in this project
:param rev: revision to read *path* from (default: ``self.revision``)
:param cwd: directory to run command in (default: ``self.abspath``)
:param encoding: directory contents encoding (default: 'utf-8')
'''
if rev is None:
rev = self.revision
if encoding is None:
encoding = 'utf-8'
# git-ls-tree -z means we get NUL-separated output with no quoting
# of the file names. Using 'git-show' or 'git-cat-file -p'
# wouldn't work for files with special characters in their names.
out = self.git(['ls-tree', '-z', f'{rev}:{os.fspath(path)}'], cwd=cwd,
capture_stdout=True, capture_stderr=True).stdout
# A tab character separates the SHA from the file name in each
# NUL-separated entry.
return [f.decode(encoding).split('\t', 1)[1]
for f in out.split(b'\x00') if f]
# FIXME: this whole class should just go away. See #327.
class ManifestProject(Project):
'''Represents the manifest repository as a `Project`.
Meaningful attributes:
- ``name``: the string ``"manifest"``
- ``topdir``: the top level directory of the west workspace
the manifest project controls, or ``None``
- ``path``: relative path to the manifest repository within the
workspace, or ``None`` (i.e. from ``topdir`` if that is set)
- ``abspath``: absolute path to the manifest repository in the
native path name format (or ``None`` if ``topdir`` is)
- ``posixpath``: like ``abspath``, but with slashes (``/``) as
path separators
- ``west_commands``:``west_commands:`` key in the manifest's
``self:`` map. This may be a list of such if the self
section imports multiple additional files with west commands.
Other readable attributes included for Project compatibility:
- ``url``: the empty string; the west manifest is not
version-controlled by west itself, even though 'west init'
can fetch a manifest repository from a Git remote
- ``revision``: ``"HEAD"``
- ``clone_depth``: ``None``, because there's no URL
- ``groups``: the empty list
'''
def __init__(self, path: Optional[PathType] = None,
west_commands: Optional[WestCommandsType] = None,
topdir: Optional[PathType] = None):
'''
:param path: Relative path to the manifest repository in the
west workspace, if known.
:param west_commands: path to a west commands specification YAML
file in the project, relative to its base directory,
or list of these
:param topdir: Root of the west workspace the manifest
project is inside. If not given, all absolute path
attributes (abspath and posixpath) will be None.
'''
self.name: str = 'manifest'
# Pretending that this is a Project, even though it's not (#327)
self.url: str = ''
self.submodules = False
self.revision: str = 'HEAD'
self.clone_depth: Optional[int] = None
self.groups = []
# The following type: ignore is necessary since every Project
# actually has a non-None _path attribute, so the parent class
# defines its type as 'str', where here we need it to be
# an Optional[str].
self._path = os.fspath(path) if path else None # type: ignore
# Path related attributes
self.topdir: Optional[str] = os.fspath(topdir) if topdir else None
self._abspath: Optional[str] = None
self._posixpath: Optional[str] = None
# Extension commands.
self.west_commands = _west_commands_list(west_commands)
@property
def as_dict(self) -> Dict:
'''Return a representation of this object as a dict, as it would be
parsed from an equivalent YAML manifest.'''
ret: Dict = {}
if self.path:
ret['path'] = self.path
if self.west_commands:
ret['west-commands'] = \
_west_commands_maybe_delist(self.west_commands)
return ret
class Manifest:
'''The parsed contents of a west manifest file.
'''
@staticmethod
def from_file(source_file: Optional[PathType] = None,
**kwargs) -> 'Manifest':
'''Manifest object factory given a source YAML file.
The default behavior is to find the current west workspace's
manifest file and resolve it.
Results depend on the keyword arguments given in *kwargs*:
- If both *source_file* and *topdir* are given, the
returned Manifest object is based on the data in
*source_file*, rooted at *topdir*. The configuration
variable ``manifest.path`` is ignored in this case, though
``manifest.group-filter`` will still be read if it exists.
This allows parsing a manifest file "as if" its project
hierarchy were rooted at another location in the system.
- If neither *source_file* nor *topdir* is given, the file
system is searched for *topdir*. That workspace's
``manifest.path`` configuration option is used to find
*source_file*, ``topdir/<manifest.path>/<manifest.file>``.
- If only *source_file* is given, *topdir* is found
starting there. The directory containing *source_file*
doesn't have to be ``manifest.path`` in this case.
- If only *topdir* is given, that workspace's
``manifest.path`` is used to find *source_file*.
Exceptions raised:
- `west.util.WestNotFound` if no *topdir* can be found
- `MalformedManifest` if *source_file* contains invalid
data
- `ManifestVersionError` if this version of west is too
old to parse the manifest.
- `MalformedConfig` if ``manifest.path`` is needed and
can't be read
- ``ValueError`` if *topdir* is given but is not a west
workspace root
:param source_file: source file to load
:param kwargs: Manifest.__init__ keyword arguments
'''
topdir = kwargs.get('topdir')
if topdir is None:
if source_file is None:
# neither source_file nor topdir: search the filesystem
# for the workspace and use its manifest.path.
topdir = util.west_topdir()
(mpath, mname) = _mpath(topdir=topdir)
kwargs.update({
'topdir': topdir,
'source_file': os.path.join(topdir, mpath, mname),
'manifest_path': mpath
})
else:
# Just source_file: find topdir starting there.
# We need source_file in kwargs as that's what gets used below.
kwargs.update({
'source_file': source_file,
'topdir':
util.west_topdir(start=os.path.dirname(source_file))
})
elif source_file is None:
# Just topdir.
# Verify topdir is a real west workspace root.
msg = f'topdir {topdir} is not a west workspace root'
try:
real_topdir = util.west_topdir(start=topdir, fall_back=False)
except util.WestNotFound:
raise ValueError(msg)
if Path(topdir) != Path(real_topdir):
raise ValueError(f'{msg}; but {real_topdir} is')
# Read manifest.path from topdir/.west/config, and use it
# to locate source_file.
(mpath, mname) = _mpath(topdir=topdir)
source_file = os.path.join(topdir, mpath, mname)
kwargs.update({
'source_file': source_file,
'manifest_path': mpath,
})
else:
# Both source_file and topdir.
kwargs['source_file'] = source_file
return Manifest(**kwargs)
@staticmethod
def from_data(source_data: ManifestDataType, **kwargs) -> 'Manifest':
'''Manifest object factory given parsed YAML data.
This factory does not read any configuration files.
Letting the return value be ``m``. Results then depend on
keyword arguments in *kwargs*:
- Unless *topdir* is given, all absolute paths in ``m``,
like ``m.projects[1].abspath``, are ``None``.
- Relative paths, like ``m.projects[1].path``, are taken
from *source_data*.
- If ``source_data['manifest']['self']['path']`` is not
set, then ``m.projects[MANIFEST_PROJECT_INDEX].abspath``
will be set to *manifest_path* if given.
Returns the same exceptions as the Manifest constructor.
:param source_data: parsed YAML data as a Python object, or a
string with unparsed YAML data
:param kwargs: Manifest.__init__ keyword arguments
'''
kwargs.update({'source_data': source_data})
return Manifest(**kwargs)
def __init__(self, source_file: Optional[PathType] = None,
source_data: Optional[ManifestDataType] = None,
manifest_path: Optional[PathType] = None,
topdir: Optional[PathType] = None,
importer: Optional[ImporterType] = None,
import_flags: ImportFlag = ImportFlag.DEFAULT,
**kwargs: Dict[str, Any]):
'''
Using `from_file` or `from_data` is usually easier than direct
instantiation.
Instance attributes:
- ``projects``: sequence of `Project`
- ``topdir``: west workspace top level directory, or
None
- ``path``: path to the manifest file itself, or None
- ``has_imports``: bool, True if the manifest contains
an "import:" attribute in "self:" or "projects:"; False
otherwise
- ``group_filter``: a group filter value equivalent to
the resolved manifest's "group-filter:", along with any
values from imported manifests. This value may be simpler
than the actual input data.
Exactly one of *source_file* and *source_data* must be given.
If *source_file* is given:
- If *topdir* is too, ``projects`` is rooted there.
- Otherwise, *topdir* is found starting at *source_file*.
If *source_data* is given:
- If *topdir* is too, ``projects`` is rooted there.
- Otherwise, there is no root: ``projects[i].abspath`` and
other absolute path attributes are ``None``.
- If ``source_data['manifest']['self']['path']`` is unset,
*manifest_path* is used as a fallback.
The *importer* kwarg, if given, is a callable. It is called
when *source_file* requires importing manifest data that
aren't found locally. It will be called as:
``importer(project, file)``
where ``project`` is a `Project` and ``file`` is the missing
file. The file's contents at refs/heads/manifest-rev should
usually be returned, potentially after fetching the project's
revision from its remote URL and updating that ref.
The return value should be a string containing manifest data,
or a list of strings if ``file`` is a directory containing
YAML files. A return value of None will cause the import to be
ignored.
Exceptions raised:
- `MalformedManifest`: if the manifest data is invalid
- `ManifestImportFailed`: if the manifest could not be
resolved due to import errors
- `ManifestVersionError`: if this version of west is too
old to parse the manifest
- `WestNotFound`: if *topdir* was needed and not found
- ``ValueError``: for other invalid arguments
:param source_file: YAML file containing manifest data
:param source_data: parsed YAML data as a Python object, or a
string containing unparsed YAML data
:param manifest_path: fallback `ManifestProject` ``path``
attribute
:param topdir: used as the west workspace top level
directory
:param importer: callback to resolve missing manifest import
data
:param import_flags: bit mask, controls import resolution
'''
if source_file and source_data:
raise ValueError('both source_file and source_data were given')
if not _flags_ok(import_flags):
raise ValueError(f'bad import_flags {import_flags:x}')
self.path: Optional[str] = None
'''Path to the file containing the manifest, or None if
created from data rather than the file system.
'''
if source_file:
source_file = Path(source_file)
source_data = source_file.read_text()
self.path = os.path.abspath(source_file)
if not source_data:
self._malformed('manifest contains no data')
if isinstance(source_data, str):
source_data = _load(source_data)
# Validate the manifest. Wrap a couple of the exceptions with
# extra context about the problematic file in case of errors,
# to help debugging.
try:
validate(source_data)
except ManifestVersionError as mv:
raise ManifestVersionError(mv.version, file=source_file) from mv
except MalformedManifest as mm:
self._malformed(mm.args[0], parent=mm)
except TypeError as te:
self._malformed(te.args[0], parent=te)
# The above validate() and exception handling block's job is
# to ensure this, but pacify the type checker in a way that
# crashes if something goes wrong with that.
assert isinstance(source_data, dict)
self._projects: List[Project] = []
'''Sequence of `Project` objects representing manifest
projects.
Index 0 (`MANIFEST_PROJECT_INDEX`) contains a
`ManifestProject` representing the manifest repository. The
rest of the sequence contains projects in manifest file order
(or resolution order if the manifest contains imports).
'''
self.topdir: Optional[str] = None
'''The west workspace's top level directory, or None.'''
if topdir:
self.topdir = os.fspath(topdir)
self.has_imports: bool = False
# This will be overwritten in _load() as needed.
self.group_filter: GroupFilterType = []
# Private state which backs self.group_filter. This also
# gets overwritten as needed.
self._disabled_groups: Set[str] = set()
# Stash the importer and flags in instance attributes. These
# don't change as we recurse, so they don't belong in _import_ctx.
self._importer: ImporterType = importer or _default_importer
self._import_flags = import_flags
ctx: Optional[_import_ctx] = \
kwargs.get('import-context') # type: ignore
if ctx is None:
ctx = _import_ctx(projects={},
group_filter=[],
imap_filter=None,
path_prefix=Path('.'))
else:
assert isinstance(ctx, _import_ctx)
if manifest_path:
mpath: Optional[Path] = Path(manifest_path)
else:
mpath = None
self._load(source_data['manifest'], mpath, ctx)
def get_projects(self,
# any str name is also a PathType
project_ids: Iterable[PathType],
allow_paths: bool = True,
only_cloned: bool = False) -> List[Project]:
'''Get a list of `Project` objects in the manifest from
*project_ids*.
If *project_ids* is empty, a copy of ``self.projects``
attribute is returned as a list. Otherwise, the returned list
has projects in the same order as *project_ids*.
``ValueError`` is raised if:
- *project_ids* contains unknown project IDs
- (with *only_cloned*) an uncloned project was found
The ``ValueError`` *args* attribute is a 2-tuple with a list
of unknown *project_ids* at index 0, and a list of uncloned
`Project` objects at index 1.
:param project_ids: a sequence of projects, identified by name
or (absolute or relative) path. Names are matched first; path
checking can be disabled with *allow_paths*.
:param allow_paths: if false, *project_ids* is assumed to contain
names only, not paths
:param only_cloned: raise an exception for uncloned projects
'''
projects = list(self.projects)
unknown: List[PathType] = [] # project_ids with no Projects
uncloned: List[Project] = [] # if only_cloned, the uncloned Projects
ret: List[Project] = [] # result list of resolved Projects
# If no project_ids are specified, use all projects.
if not project_ids:
if only_cloned:
uncloned = [p for p in projects if not p.is_cloned()]
if uncloned:
raise ValueError(unknown, uncloned)
return projects
# Otherwise, resolve each of the project_ids to a project,
# returning the result or raising ValueError.
for pid in project_ids:
project: Optional[Project] = None
if isinstance(pid, str):
project = self._projects_by_name.get(pid)
if project is None and allow_paths:
project = self._projects_by_rpath.get(Path(pid).resolve())
if project is None:
unknown.append(pid)
continue
ret.append(project)
if only_cloned and not project.is_cloned():
uncloned.append(project)
if unknown or (only_cloned and uncloned):
raise ValueError(unknown, uncloned)
return ret
def as_dict(self) -> Dict:
'''Returns a dict representing self, fully resolved.
The value is "resolved" in that the result is as if all
projects had been defined in a single manifest without any
import attributes.
'''
return self._as_dict_helper()
def as_frozen_dict(self) -> Dict:
'''Returns a dict representing self, but frozen.
The value is "frozen" in that all project revisions are the
full SHAs pointed to by `QUAL_MANIFEST_REV_BRANCH` references.
Raises ``RuntimeError`` if a project SHA can't be resolved.
'''
return self._as_dict_helper(pdict=pdict)
def as_yaml(self, **kwargs) -> str:
'''Returns a YAML representation for self, fully resolved.
The value is "resolved" in that the result is as if all
projects had been defined in a single manifest without any
import attributes.
:param kwargs: passed to yaml.safe_dump()
'''
return yaml.safe_dump(self.as_dict(), **kwargs)
def as_frozen_yaml(self, **kwargs) -> str:
'''Returns a YAML representation for self, but frozen.
The value is "frozen" in that all project revisions are the
full SHAs pointed to by `QUAL_MANIFEST_REV_BRANCH` references.
Raises ``RuntimeError`` if a project SHA can't be resolved.
:param kwargs: passed to yaml.safe_dump()
'''
return yaml.safe_dump(self.as_frozen_dict(), **kwargs)
@property
def is_active(self, project: Project,
extra_filter: Optional[Iterable[str]] = None) -> bool:
'''Is a project active?
Projects with empty 'project.groups' lists are always active.
Otherwise, if any group in 'project.groups' is enabled by this
manifest's 'group-filter:' list (and the
'manifest.group-filter' local configuration option, if we have
a workspace), returns True.
Otherwise, i.e. if all of the project's groups are disabled,
this returns False.
"Inactive" projects should generally be considered absent from
the workspace for purposes like updating it, listing projects,
etc.
:param project: project to check
:param extra_filter: an optional additional group filter
'''
if not project.groups:
# Projects without any groups are always active, so just
# exit early. Note that this happens to treat the
# ManifestProject as though it's always active. This is
# important for keeping it in the 'west list' output for
# now.
return True
# Load manifest.group-filter from the configuration file if we
# haven't already. Only do this once so we don't hit the file
# system for every project when looping over the manifest.
cfg_gf = self._config_group_filter
# Figure out what the disabled groups are. Skip reallocation
# if possible.
if cfg_gf or extra_filter is not None:
disabled_groups = set(self._disabled_groups)
if cfg_gf:
_update_disabled_groups(disabled_groups, cfg_gf)
if extra_filter is not None:
extra_filter = self._validated_group_filter(None,
list(extra_filter))
_update_disabled_groups(disabled_groups, extra_filter)
else:
disabled_groups = self._disabled_groups
return any(group not in disabled_groups for group in project.groups)
@property
| 39.701321 | 79 | 0.598 | # Copyright (c) 2018, 2019, 2020 Nordic Semiconductor ASA
# Copyright 2018, 2019 Foundries.io Ltd
#
# SPDX-License-Identifier: Apache-2.0
'''
Parser and abstract data types for west manifests.
'''
import configparser
import enum
import errno
import logging
import os
from pathlib import PurePosixPath, Path
import re
import shlex
import subprocess
import sys
from typing import Any, Callable, Dict, Iterable, List, NoReturn, \
NamedTuple, Optional, Set, Tuple, TYPE_CHECKING, Union
from packaging.version import parse as parse_version
import pykwalify.core
import yaml
from west import util
from west.util import PathType
import west.configuration as cfg
#
# Public constants
#
#: Index in a Manifest.projects attribute where the `ManifestProject`
#: instance for the workspace is stored.
MANIFEST_PROJECT_INDEX = 0
#: A git revision which points to the most recent `Project` update.
MANIFEST_REV_BRANCH = 'manifest-rev'
#: A fully qualified reference to `MANIFEST_REV_BRANCH`.
QUAL_MANIFEST_REV_BRANCH = 'refs/heads/' + MANIFEST_REV_BRANCH
#: Git ref space used by west for internal purposes.
QUAL_REFS_WEST = 'refs/west/'
#: The latest manifest schema version supported by this west program.
#:
#: This value changes when a new version of west includes new manifest
#: file features not supported by earlier versions of west.
SCHEMA_VERSION = '0.10'
# MAINTAINERS:
#
# If you want to update the schema version, you need to make sure that
# it has the exact same value as west.version.__version__ when the
# next release is cut.
#
# Internal helpers
#
# Type aliases
# The value of a west-commands as passed around during manifest
# resolution. It can become a list due to resolving imports, even
# though it's just a str in each individual file right now.
WestCommandsType = Union[str, List[str]]
# Type for the importer callback passed to the manifest constructor.
# (ImportedContentType is just an alias for what it gives back.)
ImportedContentType = Optional[Union[str, List[str]]]
ImporterType = Callable[['Project', str], ImportedContentType]
# Type for an import map filter function, which takes a Project and
# returns a bool. The various allowlists and blocklists are used to
# create these filter functions. A None value is treated as a function
# which always returns True.
ImapFilterFnType = Optional[Callable[['Project'], bool]]
# A list of group names to enable and disable, like ['+foo', '-bar'].
GroupFilterType = List[str]
# A list of group names belonging to a project, like ['foo', 'bar']
GroupsType = List[str]
# The parsed contents of a manifest YAML file as returned by _load(),
# after sanitychecking with validate().
ManifestDataType = Union[str, Dict]
# Logging
_logger = logging.getLogger(__name__)
# Type for the submodule value passed through the manifest file.
class Submodule(NamedTuple):
'''Represents a Git submodule within a project.'''
path: str
name: Optional[str] = None
# Submodules may be a list of values or a bool.
SubmodulesType = Union[List[Submodule], bool]
# Manifest locating, parsing, loading, etc.
class _defaults(NamedTuple):
remote: Optional[str]
revision: str
_DEFAULT_REV = 'master'
_WEST_YML = 'west.yml'
_SCHEMA_PATH = os.path.join(os.path.dirname(__file__), "manifest-schema.yml")
_SCHEMA_VER = parse_version(SCHEMA_VERSION)
_EARLIEST_VER_STR = '0.6.99' # we introduced the version feature after 0.6
_VALID_SCHEMA_VERS = [_EARLIEST_VER_STR, '0.7', '0.8', '0.9', SCHEMA_VERSION]
def _is_yml(path: PathType) -> bool:
return Path(path).suffix in ['.yml', '.yaml']
def _load(data: str) -> Any:
try:
return yaml.safe_load(data)
except yaml.scanner.ScannerError as e:
raise MalformedManifest(data) from e
def _west_commands_list(west_commands: Optional[WestCommandsType]) -> \
List[str]:
# Convert the raw data from a manifest file to a list of
# west_commands locations. (If it's already a list, make a
# defensive copy.)
if west_commands is None:
return []
elif isinstance(west_commands, str):
return [west_commands]
else:
return list(west_commands)
def _west_commands_maybe_delist(west_commands: List[str]) -> WestCommandsType:
# Convert a west_commands list to a string if there's
# just one element, otherwise return the list itself.
if len(west_commands) == 1:
return west_commands[0]
else:
return west_commands
def _west_commands_merge(wc1: List[str], wc2: List[str]) -> List[str]:
# Merge two west_commands lists, filtering out duplicates.
if wc1 and wc2:
return wc1 + [wc for wc in wc2 if wc not in wc1]
else:
return wc1 or wc2
def _mpath(cp: Optional[configparser.ConfigParser] = None,
topdir: Optional[PathType] = None) -> Tuple[str, str]:
# Return the value of the manifest.path configuration option
# in *cp*, a ConfigParser. If not given, create a new one and
# load configuration options with the given *topdir* as west
# workspace root.
#
# TODO: write a cfg.get(section, key)
# wrapper, with friends for update and delete, to avoid
# requiring this boilerplate.
if cp is None:
cp = cfg._configparser()
cfg.read_config(configfile=cfg.ConfigFile.LOCAL, config=cp, topdir=topdir)
try:
path = cp.get('manifest', 'path')
filename = cp.get('manifest', 'file', fallback=_WEST_YML)
return (path, filename)
except (configparser.NoOptionError, configparser.NoSectionError) as e:
raise MalformedConfig('no "manifest.path" config option is set') from e
# Manifest import handling
def _default_importer(project: 'Project', file: str) -> NoReturn:
raise ManifestImportFailed(project, file)
def _manifest_content_at(project: 'Project', path: PathType,
rev: str = QUAL_MANIFEST_REV_BRANCH) \
-> ImportedContentType:
# Get a list of manifest data from project at path
#
# The data are loaded from Git at ref QUAL_MANIFEST_REV_BRANCH,
# *NOT* the file system.
#
# If path is a tree at that ref, the contents of the YAML files
# inside path are returned, as strings. If it's a file at that
# ref, it's a string with its contents.
#
# Though this module and the "west update" implementation share
# this code, it's an implementation detail, not API.
path = os.fspath(path)
_logger.debug(f'{project.name}: looking up path {path} type at {rev}')
# Returns 'blob', 'tree', etc. for path at revision, if it exists.
out = project.git(['ls-tree', rev, path], capture_stdout=True,
capture_stderr=True).stdout
if not out:
# It's a bit inaccurate to raise FileNotFoundError for
# something that isn't actually file, but this is internal
# API, and git is a content addressable file system, so close
# enough!
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), path)
ptype = out.decode('utf-8').split()[1]
if ptype == 'blob':
# Importing a file: just return its content.
return project.read_at(path, rev=rev).decode('utf-8')
elif ptype == 'tree':
# Importing a tree: return the content of the YAML files inside it.
ret = []
# Use a PurePosixPath because that's the form git seems to
# store internally, even on Windows.
pathobj = PurePosixPath(path)
for f in filter(_is_yml, project.listdir_at(path, rev=rev)):
ret.append(project.read_at(pathobj / f, rev=rev).decode('utf-8'))
return ret
else:
raise MalformedManifest(f"can't decipher project {project.name} "
f'path {path} revision {rev} '
f'(git type: {ptype})')
class _import_map(NamedTuple):
file: str
name_allowlist: List[str]
path_allowlist: List[str]
name_blocklist: List[str]
path_blocklist: List[str]
path_prefix: str
def _is_imap_list(value: Any) -> bool:
# Return True if the value is a valid import map 'blocklist' or
# 'allowlist'. Empty strings and lists are OK, and list nothing.
return (isinstance(value, str) or
(isinstance(value, list) and
all(isinstance(item, str) for item in value)))
def _imap_filter(imap: _import_map) -> ImapFilterFnType:
# Returns either None (if no filter is necessary) or a
# filter function for the given import map.
if any([imap.name_allowlist, imap.path_allowlist,
imap.name_blocklist, imap.path_blocklist]):
return lambda project: _is_imap_ok(imap, project)
else:
return None
def _ensure_list(item: Union[str, List[str]]) -> List[str]:
# Converts item to a list containing it if item is a string, or
# returns item.
if isinstance(item, str):
return [item]
return item
def _is_imap_ok(imap: _import_map, project: 'Project') -> bool:
# Return True if a project passes an import map's filters,
# and False otherwise.
nwl, pwl, nbl, pbl = [_ensure_list(lst) for lst in
(imap.name_allowlist, imap.path_allowlist,
imap.name_blocklist, imap.path_blocklist)]
name = project.name
path = Path(project.path)
blocked = (name in nbl) or any(path.match(p) for p in pbl)
allowed = (name in nwl) or any(path.match(p) for p in pwl)
no_allowlists = not (nwl or pwl)
if blocked:
return allowed
else:
return allowed or no_allowlists
class _import_ctx(NamedTuple):
# Holds state that changes as we recurse down the manifest import tree.
# The current map from already-defined project names to Projects.
#
# This is shared, mutable state between Manifest() constructor
# calls that happen during resolution. We mutate this directly
# when handling 'manifest: projects:' lists. Manifests which are
# imported earlier get higher precedence: if a 'projects:' list
# contains a name which is already present here, we ignore that
# element.
projects: Dict[str, 'Project']
# The current shared group filter. This is mutable state in the
# same way 'projects' is. Manifests which are imported earlier get
# higher precedence here too.
#
# This is done by prepending (NOT appending) any 'manifest:
# group-filter:' lists we encounter during import resolution onto
# this list. Since group-filter lists have "last entry wins"
# semantics, earlier manifests take precedence.
group_filter: GroupFilterType
# The current restrictions on which projects the importing
# manifest is interested in.
#
# These accumulate as we pick up additional allowlists and
# blocklists in 'import: <map>' values. We handle this composition
# using _compose_ctx_and_imap().
imap_filter: ImapFilterFnType
# The current prefix which should be added to any project paths
# as defined by all the importing manifests up to this point.
# These accumulate as we pick up 'import: path-prefix: ...' values,
# also using _compose_ctx_and_imap().
path_prefix: Path
def _compose_ctx_and_imap(ctx: _import_ctx, imap: _import_map) -> _import_ctx:
# Combine the map data from "some-map" in a manifest's
# "import: some-map" into an existing import context type,
# returning the new context.
return _import_ctx(projects=ctx.projects,
group_filter=ctx.group_filter,
imap_filter=_compose_imap_filters(ctx.imap_filter,
_imap_filter(imap)),
path_prefix=ctx.path_prefix / imap.path_prefix)
def _imap_filter_allows(imap_filter: ImapFilterFnType,
project: 'Project') -> bool:
# imap_filter(project) if imap_filter is not None; True otherwise.
return (imap_filter is None) or imap_filter(project)
def _compose_imap_filters(imap_filter1: ImapFilterFnType,
imap_filter2: ImapFilterFnType) -> ImapFilterFnType:
# Return an import map filter which gives back the logical AND of
# what the two argument filter functions would return.
if imap_filter1 and imap_filter2:
# These type annotated versions silence mypy warnings.
fn1: Callable[['Project'], bool] = imap_filter1
fn2: Callable[['Project'], bool] = imap_filter2
return lambda project: (fn1(project) and fn2(project))
else:
return imap_filter1 or imap_filter2
_RESERVED_GROUP_RE = re.compile(r'(^[+-]|[\s,:])')
_INVALID_PROJECT_NAME_RE = re.compile(r'([/\\])')
def _update_disabled_groups(disabled_groups: Set[str],
group_filter: GroupFilterType):
# Update a set of disabled groups in place based on
# 'group_filter'.
for item in group_filter:
if item.startswith('-'):
disabled_groups.add(item[1:])
elif item.startswith('+'):
group = item[1:]
if group in disabled_groups:
disabled_groups.remove(group)
else:
# We should never get here. This private helper is only
# meant to be invoked on valid data.
assert False, \
(f"Unexpected group filter item {item}. "
"This is a west bug. Please report it to the developers "
"along with as much information as you can, such as the "
"stack trace that preceded this message.")
def _is_submodule_dict_ok(subm: Any) -> bool:
# Check whether subm is a dict that contains the expected
# submodule fields of proper types.
class _failed(Exception):
pass
def _assert(cond):
if not cond:
raise _failed()
try:
_assert(isinstance(subm, dict))
# Required key
_assert('path' in subm)
# Allowed keys
for k in subm:
_assert(k in ['path', 'name'])
_assert(isinstance(subm[k], str))
except _failed:
return False
return True
#
# Public functions
#
def manifest_path() -> str:
'''Absolute path of the manifest file in the current workspace.
Exceptions raised:
- `west.util.WestNotFound` if called from outside of a west
workspace
- `MalformedConfig` if the configuration file has no
``manifest.path`` key
- ``FileNotFoundError`` if no manifest file exists as determined by
``manifest.path`` and ``manifest.file``
'''
(mpath, mname) = _mpath()
ret = os.path.join(util.west_topdir(), mpath, mname)
# It's kind of annoying to manually instantiate a FileNotFoundError.
# This seems to be the best way.
if not os.path.isfile(ret):
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), ret)
return ret
def validate(data: Any) -> None:
'''Validate manifest data
Raises an exception if the manifest data is not valid for loading
by this version of west. (Actually attempting to load the data may
still fail if the it contains imports which cannot be resolved.)
:param data: YAML manifest data as a string or object
'''
if isinstance(data, str):
as_str = data
data = _load(data)
if not isinstance(data, dict):
raise MalformedManifest(f'{as_str} is not a YAML dictionary')
elif not isinstance(data, dict):
raise TypeError(f'{data} has type {type(data)}, '
'expected valid manifest data')
if 'manifest' not in data:
raise MalformedManifest('manifest data contains no "manifest" key')
data = data['manifest']
# Make sure this version of west can load this manifest data.
# This has to happen before the schema check -- later schemas
# may incompatibly extend this one.
if 'version' in data:
# As a convenience for the user, convert floats to strings.
# This avoids forcing them to write:
#
# version: "0.8"
#
# by explicitly allowing:
#
# version: 0.8
if not isinstance(data['version'], str):
min_version_str = str(data['version'])
casted_to_str = True
else:
min_version_str = data['version']
casted_to_str = False
min_version = parse_version(min_version_str)
if min_version > _SCHEMA_VER:
raise ManifestVersionError(min_version_str)
if min_version_str not in _VALID_SCHEMA_VERS:
msg = (f'invalid version {min_version_str}; must be one of: ' +
', '.join(_VALID_SCHEMA_VERS))
if casted_to_str:
msg += ('. Do you need to quote the value '
'(e.g. "0.10" instead of 0.10)?')
raise MalformedManifest(msg)
try:
pykwalify.core.Core(source_data=data,
schema_files=[_SCHEMA_PATH]).validate()
except pykwalify.errors.SchemaError as se:
raise MalformedManifest(se.msg) from se
# A 'raw' element in a project 'groups:' or manifest 'group-filter:' list,
# as it is parsed from YAML, before conversion to string.
RawGroupType = Union[str, int, float]
def is_group(raw_group: RawGroupType) -> bool:
'''Is a 'raw' project group value 'raw_group' valid?
Valid groups are strings that don't contain whitespace, commas
(","), or colons (":"), and do not start with "-" or "+".
As a special case, groups may also be nonnegative numbers, to
avoid forcing users to quote these values in YAML files.
:param raw_group: the group value to check
'''
# Implementation notes:
#
# - not starting with "-" because "-foo" means "disable group
# foo", and not starting with "+" because "+foo" means
# "enable group foo".
#
# - no commas because that's a separator character in
# manifest.group-filter and 'west update --group-filter'
#
# - no whitespace mostly to guarantee that printing
# comma-separated lists of groups won't cause 'word' breaks
# in 'west list' pipelines to cut(1) or similar
#
# - no colons to reserve some namespace for potential future
# use; we might want to do something like
# "--group-filter=path-prefix:foo" to create additional logical
# groups based on the workspace layout or other metadata
return ((raw_group >= 0) if isinstance(raw_group, (float, int)) else
bool(raw_group and not _RESERVED_GROUP_RE.search(raw_group)))
#
# Exception types
#
class MalformedManifest(Exception):
'''Manifest parsing failed due to invalid data.
'''
class MalformedConfig(Exception):
'''The west configuration was malformed in a way that made a
manifest operation fail.
'''
class ManifestImportFailed(Exception):
'''An operation required to resolve a manifest failed.
Attributes:
- ``project``: the Project instance with the missing manifest data
- ``filename``: the missing file, as a str
'''
def __init__(self, project: 'Project', filename: PathType):
super().__init__(project, filename)
self.project = project
self.filename = os.fspath(filename)
def __str__(self):
return (f'ManifestImportFailed: project {self.project} '
f'file {self.filename}')
class ManifestVersionError(Exception):
'''The manifest required a version of west more recent than the
current version.
'''
def __init__(self, version: str, file: Optional[PathType] = None):
super().__init__(version, file)
self.version = version
'''The minimum version of west that was required.'''
self.file = os.fspath(file) if file else None
'''The file that required this version of west, if any.'''
class _ManifestImportDepth(ManifestImportFailed):
# A hack to signal to main.py what happened.
pass
#
# The main Manifest class and its public helper types, like Project
# and ImportFlag.
#
class ImportFlag(enum.IntFlag):
'''Bit flags for handling imports when resolving a manifest.
Note that any "path-prefix:" values set in an "import:" still take
effect for the project itself even when IGNORE or IGNORE_PROJECTS are
given. For example, in this manifest::
manifest:
projects:
- name: foo
import:
path-prefix: bar
Project 'foo' has path 'bar/foo' regardless of whether IGNORE or
IGNORE_PROJECTS is given. This ensures the Project has the same path
attribute as it normally would if imported projects weren't being
ignored.
'''
#: The default value, 0, reads the file system to resolve
#: "self: import:", and runs git to resolve a "projects:" import.
DEFAULT = 0
#: Ignore projects added via "import:" in "self:" and "projects:"
IGNORE = 1
#: Always invoke importer callback for "projects:" imports
FORCE_PROJECTS = 2
#: Ignore projects added via "import:" : in "projects:" only;
#: including any projects added via "import:" : in "self:"
IGNORE_PROJECTS = 4
def _flags_ok(flags: ImportFlag) -> bool:
# Sanity-check the combination of flags.
F_I = ImportFlag.IGNORE
F_FP = ImportFlag.FORCE_PROJECTS
F_IP = ImportFlag.IGNORE_PROJECTS
if (flags & F_I) or (flags & F_IP):
return not flags & F_FP
elif flags & (F_FP | F_IP):
return bool((flags & F_FP) ^ (flags & F_IP))
else:
return True
class Project:
'''Represents a project defined in a west manifest.
Attributes:
- ``name``: project's unique name
- ``url``: project fetch URL
- ``revision``: revision to fetch from ``url`` when the
project is updated
- ``path``: relative path to the project within the workspace
(i.e. from ``topdir`` if that is set)
- ``abspath``: absolute path to the project in the native path name
format (or ``None`` if ``topdir`` is)
- ``posixpath``: like ``abspath``, but with slashes (``/``) as
path separators
- ``clone_depth``: clone depth to fetch when first cloning the
project, or ``None`` (the revision should not be a SHA
if this is used)
- ``west_commands``: list of YAML files where extension commands in
the project are declared
- ``topdir``: the top level directory of the west workspace
the project is part of, or ``None``
- ``remote_name``: the name of the remote which should be set up
when the project is being cloned (default: 'origin')
- ``groups``: the project's groups (as a list) as given in the manifest.
If the manifest data contains no groups for the project, this is
an empty list.
- ``submodules``: the project's submodules configuration; either
a list of Submodule objects, or a boolean.
- ``userdata``: the parsed 'userdata' field in the manifest, or None
'''
def __eq__(self, other):
return NotImplemented
def __repr__(self):
return (f'Project("{self.name}", "{self.url}", '
f'revision="{self.revision}", path={repr(self.path)}, '
f'clone_depth={self.clone_depth}, '
f'west_commands={self.west_commands}, '
f'topdir={repr(self.topdir)}, '
f'groups={repr(self.groups)}, '
f'userdata={repr(self.userdata)})')
def __str__(self):
path_repr = repr(self.abspath or self.path)
return f'<Project {self.name} ({path_repr}) at {self.revision}>'
def __init__(self, name: str, url: str,
revision: Optional[str] = None,
path: Optional[PathType] = None,
submodules: SubmodulesType = False,
clone_depth: Optional[int] = None,
west_commands: Optional[WestCommandsType] = None,
topdir: Optional[PathType] = None,
remote_name: Optional[str] = None,
groups: Optional[GroupsType] = None,
userdata: Optional[Any] = None):
'''Project constructor.
If *topdir* is ``None``, then absolute path attributes
(``abspath`` and ``posixpath``) will also be ``None``.
:param name: project's ``name:`` attribute in the manifest
:param url: fetch URL
:param revision: fetch revision
:param path: path (relative to topdir), or None for *name*
:param submodules: submodules to pull within the project
:param clone_depth: depth to use for initial clone
:param west_commands: path to a west commands specification YAML
file in the project, relative to its base directory,
or list of these
:param topdir: the west workspace's top level directory
:param remote_name: the name of the remote which should be
set up if the project is being cloned (default: 'origin')
:param groups: a list of groups found in the manifest data for
the project, after conversion to str and validation.
'''
self.name = name
self.url = url
self.submodules = submodules
self.revision = revision or _DEFAULT_REV
self.clone_depth = clone_depth
self.path = os.fspath(path or name)
self.west_commands = _west_commands_list(west_commands)
self.topdir = os.fspath(topdir) if topdir else None
self.remote_name = remote_name or 'origin'
self.groups: GroupsType = groups or []
self.userdata: Any = userdata
@property
def path(self) -> str:
return self._path
@path.setter
def path(self, path: PathType) -> None:
self._path: str = os.fspath(path)
# Invalidate the absolute path attributes. They'll get
# computed again next time they're accessed.
self._abspath: Optional[str] = None
self._posixpath: Optional[str] = None
@property
def abspath(self) -> Optional[str]:
if self._abspath is None and self.topdir:
self._abspath = os.path.abspath(Path(self.topdir) /
self.path)
return self._abspath
@property
def posixpath(self) -> Optional[str]:
if self._posixpath is None and self.abspath is not None:
self._posixpath = Path(self.abspath).as_posix()
return self._posixpath
@property
def name_and_path(self) -> str:
return f'{self.name} ({self.path})'
def as_dict(self) -> Dict:
'''Return a representation of this object as a dict, as it
would be parsed from an equivalent YAML manifest.
'''
ret: Dict = {}
ret['name'] = self.name
ret['url'] = self.url
ret['revision'] = self.revision
if self.path != self.name:
ret['path'] = self.path
if self.clone_depth:
ret['clone-depth'] = self.clone_depth
if self.west_commands:
ret['west-commands'] = \
_west_commands_maybe_delist(self.west_commands)
if self.groups:
ret['groups'] = self.groups
if self.userdata:
ret['userdata'] = self.userdata
return ret
#
# Git helpers
#
def git(self, cmd: Union[str, List[str]],
extra_args: Iterable[str] = (),
capture_stdout: bool = False,
capture_stderr: bool = False,
check: bool = True,
cwd: Optional[PathType] = None) -> subprocess.CompletedProcess:
'''Run a git command in the project repository.
:param cmd: git command as a string (or list of strings)
:param extra_args: sequence of additional arguments to pass to
the git command (useful mostly if *cmd* is a string).
:param capture_stdout: if True, git's standard output is
captured in the ``CompletedProcess`` instead of being
printed.
:param capture_stderr: Like *capture_stdout*, but for standard
error. Use with caution: this may prevent error messages
from being shown to the user.
:param check: if given, ``subprocess.CalledProcessError`` is
raised if git finishes with a non-zero return code
:param cwd: directory to run git in (default: ``self.abspath``)
'''
if isinstance(cmd, str):
cmd_list = shlex.split(cmd)
else:
cmd_list = list(cmd)
extra_args = list(extra_args)
if cwd is None:
if self.abspath is not None:
cwd = self.abspath
else:
raise ValueError('no abspath; cwd must be given')
elif sys.version_info < (3, 6, 1) and not isinstance(cwd, str):
# Popen didn't accept a PathLike cwd on Windows until
# python v3.7; this was backported onto cpython v3.6.1,
# though. West currently supports "python 3.6", though, so
# in the unlikely event someone is running 3.6.0 on
# Windows, do the right thing.
cwd = os.fspath(cwd)
args = ['git'] + cmd_list + extra_args
cmd_str = util.quote_sh_list(args)
_logger.debug(f"running '{cmd_str}' in {cwd}")
popen = subprocess.Popen(
args, cwd=cwd,
stdout=subprocess.PIPE if capture_stdout else None,
stderr=subprocess.PIPE if capture_stderr else None)
stdout, stderr = popen.communicate()
# We use logger style % formatting here to avoid the
# potentially expensive overhead of formatting long
# stdout/stderr strings if the current log level isn't DEBUG,
# which is the usual case.
_logger.debug('"%s" exit code: %d stdout: %r stderr: %r',
cmd_str, popen.returncode, stdout, stderr)
if check and popen.returncode:
raise subprocess.CalledProcessError(popen.returncode, cmd_list,
output=stdout, stderr=stderr)
else:
return subprocess.CompletedProcess(popen.args, popen.returncode,
stdout, stderr)
def sha(self, rev: str, cwd: Optional[PathType] = None) -> str:
'''Get the SHA for a project revision.
:param rev: git revision (HEAD, v2.0.0, etc.) as a string
:param cwd: directory to run command in (default:
self.abspath)
'''
# Though we capture stderr, it will be available as the stderr
# attribute in the CalledProcessError raised by git() in
# Python 3.5 and above if this call fails.
cp = self.git(f'rev-parse {rev}^{{commit}}', capture_stdout=True,
cwd=cwd, capture_stderr=True)
# Assumption: SHAs are hex values and thus safe to decode in ASCII.
# It'll be fun when we find out that was wrong and how...
return cp.stdout.decode('ascii').strip()
def is_ancestor_of(self, rev1: str, rev2: str,
cwd: Optional[PathType] = None) -> bool:
'''Check if 'rev1' is an ancestor of 'rev2' in this project.
Returns True if rev1 is an ancestor commit of rev2 in the
given project; rev1 and rev2 can be anything that resolves to
a commit. (If rev1 and rev2 refer to the same commit, the
return value is True, i.e. a commit is considered an ancestor
of itself.) Returns False otherwise.
:param rev1: commit that could be the ancestor of *rev2*
:param rev2: commit that could be a descendant or *rev1*
:param cwd: directory to run command in (default:
``self.abspath``)
'''
rc = self.git(f'merge-base --is-ancestor {rev1} {rev2}',
check=False, cwd=cwd).returncode
if rc == 0:
return True
elif rc == 1:
return False
else:
raise RuntimeError(f'unexpected git merge-base result {rc}')
def is_up_to_date_with(self, rev: str,
cwd: Optional[PathType] = None) -> bool:
'''Check if the project is up to date with *rev*, returning
``True`` if so.
This is equivalent to ``is_ancestor_of(rev, 'HEAD',
cwd=cwd)``.
:param rev: base revision to check if project is up to date
with.
:param cwd: directory to run command in (default:
``self.abspath``)
'''
return self.is_ancestor_of(rev, 'HEAD', cwd=cwd)
def is_up_to_date(self, cwd: Optional[PathType] = None) -> bool:
'''Check if the project HEAD is up to date with the manifest.
This is equivalent to ``is_up_to_date_with(self.revision,
cwd=cwd)``.
:param cwd: directory to run command in (default:
``self.abspath``)
'''
return self.is_up_to_date_with(self.revision, cwd=cwd)
def is_cloned(self, cwd: Optional[PathType] = None) -> bool:
'''Returns ``True`` if ``self.abspath`` looks like a git
repository's top-level directory, and ``False`` otherwise.
:param cwd: directory to run command in (default:
``self.abspath``)
'''
if not self.abspath or not os.path.isdir(self.abspath):
return False
# --is-inside-work-tree doesn't require that the directory is
# the top-level directory of a Git repository. Use --show-cdup
# instead, which prints an empty string (i.e., just a newline,
# which we strip) for the top-level directory.
_logger.debug(f'{self.name}: checking if cloned')
res = self.git('rev-parse --show-cdup', check=False, cwd=cwd,
capture_stderr=True, capture_stdout=True)
return not (res.returncode or res.stdout.strip())
def read_at(self, path: PathType, rev: Optional[str] = None,
cwd: Optional[PathType] = None) -> bytes:
'''Read file contents in the project at a specific revision.
:param path: relative path to file in this project
:param rev: revision to read *path* from (default: ``self.revision``)
:param cwd: directory to run command in (default: ``self.abspath``)
'''
if rev is None:
rev = self.revision
cp = self.git(['show', f'{rev}:{os.fspath(path)}'],
capture_stdout=True, capture_stderr=True, cwd=cwd)
return cp.stdout
def listdir_at(self, path: PathType, rev: Optional[str] = None,
cwd: Optional[PathType] = None,
encoding: Optional[str] = None) -> List[str]:
'''List of directory contents in the project at a specific revision.
The return value is the directory contents as a list of files and
subdirectories.
:param path: relative path to file in this project
:param rev: revision to read *path* from (default: ``self.revision``)
:param cwd: directory to run command in (default: ``self.abspath``)
:param encoding: directory contents encoding (default: 'utf-8')
'''
if rev is None:
rev = self.revision
if encoding is None:
encoding = 'utf-8'
# git-ls-tree -z means we get NUL-separated output with no quoting
# of the file names. Using 'git-show' or 'git-cat-file -p'
# wouldn't work for files with special characters in their names.
out = self.git(['ls-tree', '-z', f'{rev}:{os.fspath(path)}'], cwd=cwd,
capture_stdout=True, capture_stderr=True).stdout
# A tab character separates the SHA from the file name in each
# NUL-separated entry.
return [f.decode(encoding).split('\t', 1)[1]
for f in out.split(b'\x00') if f]
# FIXME: this whole class should just go away. See #327.
class ManifestProject(Project):
'''Represents the manifest repository as a `Project`.
Meaningful attributes:
- ``name``: the string ``"manifest"``
- ``topdir``: the top level directory of the west workspace
the manifest project controls, or ``None``
- ``path``: relative path to the manifest repository within the
workspace, or ``None`` (i.e. from ``topdir`` if that is set)
- ``abspath``: absolute path to the manifest repository in the
native path name format (or ``None`` if ``topdir`` is)
- ``posixpath``: like ``abspath``, but with slashes (``/``) as
path separators
- ``west_commands``:``west_commands:`` key in the manifest's
``self:`` map. This may be a list of such if the self
section imports multiple additional files with west commands.
Other readable attributes included for Project compatibility:
- ``url``: the empty string; the west manifest is not
version-controlled by west itself, even though 'west init'
can fetch a manifest repository from a Git remote
- ``revision``: ``"HEAD"``
- ``clone_depth``: ``None``, because there's no URL
- ``groups``: the empty list
'''
def __repr__(self):
return (f'ManifestProject({self.name}, path={repr(self.path)}, '
f'west_commands={self.west_commands}, '
f'topdir={repr(self.topdir)})')
def __init__(self, path: Optional[PathType] = None,
west_commands: Optional[WestCommandsType] = None,
topdir: Optional[PathType] = None):
'''
:param path: Relative path to the manifest repository in the
west workspace, if known.
:param west_commands: path to a west commands specification YAML
file in the project, relative to its base directory,
or list of these
:param topdir: Root of the west workspace the manifest
project is inside. If not given, all absolute path
attributes (abspath and posixpath) will be None.
'''
self.name: str = 'manifest'
# Pretending that this is a Project, even though it's not (#327)
self.url: str = ''
self.submodules = False
self.revision: str = 'HEAD'
self.clone_depth: Optional[int] = None
self.groups = []
# The following type: ignore is necessary since every Project
# actually has a non-None _path attribute, so the parent class
# defines its type as 'str', where here we need it to be
# an Optional[str].
self._path = os.fspath(path) if path else None # type: ignore
# Path related attributes
self.topdir: Optional[str] = os.fspath(topdir) if topdir else None
self._abspath: Optional[str] = None
self._posixpath: Optional[str] = None
# Extension commands.
self.west_commands = _west_commands_list(west_commands)
@property
def abspath(self) -> Optional[str]:
if self._abspath is None and self.topdir and self.path:
self._abspath = os.path.abspath(os.path.join(self.topdir,
self.path))
return self._abspath
def as_dict(self) -> Dict:
'''Return a representation of this object as a dict, as it would be
parsed from an equivalent YAML manifest.'''
ret: Dict = {}
if self.path:
ret['path'] = self.path
if self.west_commands:
ret['west-commands'] = \
_west_commands_maybe_delist(self.west_commands)
return ret
class Manifest:
'''The parsed contents of a west manifest file.
'''
@staticmethod
def from_file(source_file: Optional[PathType] = None,
**kwargs) -> 'Manifest':
'''Manifest object factory given a source YAML file.
The default behavior is to find the current west workspace's
manifest file and resolve it.
Results depend on the keyword arguments given in *kwargs*:
- If both *source_file* and *topdir* are given, the
returned Manifest object is based on the data in
*source_file*, rooted at *topdir*. The configuration
variable ``manifest.path`` is ignored in this case, though
``manifest.group-filter`` will still be read if it exists.
This allows parsing a manifest file "as if" its project
hierarchy were rooted at another location in the system.
- If neither *source_file* nor *topdir* is given, the file
system is searched for *topdir*. That workspace's
``manifest.path`` configuration option is used to find
*source_file*, ``topdir/<manifest.path>/<manifest.file>``.
- If only *source_file* is given, *topdir* is found
starting there. The directory containing *source_file*
doesn't have to be ``manifest.path`` in this case.
- If only *topdir* is given, that workspace's
``manifest.path`` is used to find *source_file*.
Exceptions raised:
- `west.util.WestNotFound` if no *topdir* can be found
- `MalformedManifest` if *source_file* contains invalid
data
- `ManifestVersionError` if this version of west is too
old to parse the manifest.
- `MalformedConfig` if ``manifest.path`` is needed and
can't be read
- ``ValueError`` if *topdir* is given but is not a west
workspace root
:param source_file: source file to load
:param kwargs: Manifest.__init__ keyword arguments
'''
topdir = kwargs.get('topdir')
if topdir is None:
if source_file is None:
# neither source_file nor topdir: search the filesystem
# for the workspace and use its manifest.path.
topdir = util.west_topdir()
(mpath, mname) = _mpath(topdir=topdir)
kwargs.update({
'topdir': topdir,
'source_file': os.path.join(topdir, mpath, mname),
'manifest_path': mpath
})
else:
# Just source_file: find topdir starting there.
# We need source_file in kwargs as that's what gets used below.
kwargs.update({
'source_file': source_file,
'topdir':
util.west_topdir(start=os.path.dirname(source_file))
})
elif source_file is None:
# Just topdir.
# Verify topdir is a real west workspace root.
msg = f'topdir {topdir} is not a west workspace root'
try:
real_topdir = util.west_topdir(start=topdir, fall_back=False)
except util.WestNotFound:
raise ValueError(msg)
if Path(topdir) != Path(real_topdir):
raise ValueError(f'{msg}; but {real_topdir} is')
# Read manifest.path from topdir/.west/config, and use it
# to locate source_file.
(mpath, mname) = _mpath(topdir=topdir)
source_file = os.path.join(topdir, mpath, mname)
kwargs.update({
'source_file': source_file,
'manifest_path': mpath,
})
else:
# Both source_file and topdir.
kwargs['source_file'] = source_file
return Manifest(**kwargs)
@staticmethod
def from_data(source_data: ManifestDataType, **kwargs) -> 'Manifest':
'''Manifest object factory given parsed YAML data.
This factory does not read any configuration files.
Letting the return value be ``m``. Results then depend on
keyword arguments in *kwargs*:
- Unless *topdir* is given, all absolute paths in ``m``,
like ``m.projects[1].abspath``, are ``None``.
- Relative paths, like ``m.projects[1].path``, are taken
from *source_data*.
- If ``source_data['manifest']['self']['path']`` is not
set, then ``m.projects[MANIFEST_PROJECT_INDEX].abspath``
will be set to *manifest_path* if given.
Returns the same exceptions as the Manifest constructor.
:param source_data: parsed YAML data as a Python object, or a
string with unparsed YAML data
:param kwargs: Manifest.__init__ keyword arguments
'''
kwargs.update({'source_data': source_data})
return Manifest(**kwargs)
def __init__(self, source_file: Optional[PathType] = None,
source_data: Optional[ManifestDataType] = None,
manifest_path: Optional[PathType] = None,
topdir: Optional[PathType] = None,
importer: Optional[ImporterType] = None,
import_flags: ImportFlag = ImportFlag.DEFAULT,
**kwargs: Dict[str, Any]):
'''
Using `from_file` or `from_data` is usually easier than direct
instantiation.
Instance attributes:
- ``projects``: sequence of `Project`
- ``topdir``: west workspace top level directory, or
None
- ``path``: path to the manifest file itself, or None
- ``has_imports``: bool, True if the manifest contains
an "import:" attribute in "self:" or "projects:"; False
otherwise
- ``group_filter``: a group filter value equivalent to
the resolved manifest's "group-filter:", along with any
values from imported manifests. This value may be simpler
than the actual input data.
Exactly one of *source_file* and *source_data* must be given.
If *source_file* is given:
- If *topdir* is too, ``projects`` is rooted there.
- Otherwise, *topdir* is found starting at *source_file*.
If *source_data* is given:
- If *topdir* is too, ``projects`` is rooted there.
- Otherwise, there is no root: ``projects[i].abspath`` and
other absolute path attributes are ``None``.
- If ``source_data['manifest']['self']['path']`` is unset,
*manifest_path* is used as a fallback.
The *importer* kwarg, if given, is a callable. It is called
when *source_file* requires importing manifest data that
aren't found locally. It will be called as:
``importer(project, file)``
where ``project`` is a `Project` and ``file`` is the missing
file. The file's contents at refs/heads/manifest-rev should
usually be returned, potentially after fetching the project's
revision from its remote URL and updating that ref.
The return value should be a string containing manifest data,
or a list of strings if ``file`` is a directory containing
YAML files. A return value of None will cause the import to be
ignored.
Exceptions raised:
- `MalformedManifest`: if the manifest data is invalid
- `ManifestImportFailed`: if the manifest could not be
resolved due to import errors
- `ManifestVersionError`: if this version of west is too
old to parse the manifest
- `WestNotFound`: if *topdir* was needed and not found
- ``ValueError``: for other invalid arguments
:param source_file: YAML file containing manifest data
:param source_data: parsed YAML data as a Python object, or a
string containing unparsed YAML data
:param manifest_path: fallback `ManifestProject` ``path``
attribute
:param topdir: used as the west workspace top level
directory
:param importer: callback to resolve missing manifest import
data
:param import_flags: bit mask, controls import resolution
'''
if source_file and source_data:
raise ValueError('both source_file and source_data were given')
if not _flags_ok(import_flags):
raise ValueError(f'bad import_flags {import_flags:x}')
self.path: Optional[str] = None
'''Path to the file containing the manifest, or None if
created from data rather than the file system.
'''
if source_file:
source_file = Path(source_file)
source_data = source_file.read_text()
self.path = os.path.abspath(source_file)
if not source_data:
self._malformed('manifest contains no data')
if isinstance(source_data, str):
source_data = _load(source_data)
# Validate the manifest. Wrap a couple of the exceptions with
# extra context about the problematic file in case of errors,
# to help debugging.
try:
validate(source_data)
except ManifestVersionError as mv:
raise ManifestVersionError(mv.version, file=source_file) from mv
except MalformedManifest as mm:
self._malformed(mm.args[0], parent=mm)
except TypeError as te:
self._malformed(te.args[0], parent=te)
# The above validate() and exception handling block's job is
# to ensure this, but pacify the type checker in a way that
# crashes if something goes wrong with that.
assert isinstance(source_data, dict)
self._projects: List[Project] = []
'''Sequence of `Project` objects representing manifest
projects.
Index 0 (`MANIFEST_PROJECT_INDEX`) contains a
`ManifestProject` representing the manifest repository. The
rest of the sequence contains projects in manifest file order
(or resolution order if the manifest contains imports).
'''
self.topdir: Optional[str] = None
'''The west workspace's top level directory, or None.'''
if topdir:
self.topdir = os.fspath(topdir)
self.has_imports: bool = False
# This will be overwritten in _load() as needed.
self.group_filter: GroupFilterType = []
# Private state which backs self.group_filter. This also
# gets overwritten as needed.
self._disabled_groups: Set[str] = set()
# Stash the importer and flags in instance attributes. These
# don't change as we recurse, so they don't belong in _import_ctx.
self._importer: ImporterType = importer or _default_importer
self._import_flags = import_flags
ctx: Optional[_import_ctx] = \
kwargs.get('import-context') # type: ignore
if ctx is None:
ctx = _import_ctx(projects={},
group_filter=[],
imap_filter=None,
path_prefix=Path('.'))
else:
assert isinstance(ctx, _import_ctx)
if manifest_path:
mpath: Optional[Path] = Path(manifest_path)
else:
mpath = None
self._load(source_data['manifest'], mpath, ctx)
def get_projects(self,
# any str name is also a PathType
project_ids: Iterable[PathType],
allow_paths: bool = True,
only_cloned: bool = False) -> List[Project]:
'''Get a list of `Project` objects in the manifest from
*project_ids*.
If *project_ids* is empty, a copy of ``self.projects``
attribute is returned as a list. Otherwise, the returned list
has projects in the same order as *project_ids*.
``ValueError`` is raised if:
- *project_ids* contains unknown project IDs
- (with *only_cloned*) an uncloned project was found
The ``ValueError`` *args* attribute is a 2-tuple with a list
of unknown *project_ids* at index 0, and a list of uncloned
`Project` objects at index 1.
:param project_ids: a sequence of projects, identified by name
or (absolute or relative) path. Names are matched first; path
checking can be disabled with *allow_paths*.
:param allow_paths: if false, *project_ids* is assumed to contain
names only, not paths
:param only_cloned: raise an exception for uncloned projects
'''
projects = list(self.projects)
unknown: List[PathType] = [] # project_ids with no Projects
uncloned: List[Project] = [] # if only_cloned, the uncloned Projects
ret: List[Project] = [] # result list of resolved Projects
# If no project_ids are specified, use all projects.
if not project_ids:
if only_cloned:
uncloned = [p for p in projects if not p.is_cloned()]
if uncloned:
raise ValueError(unknown, uncloned)
return projects
# Otherwise, resolve each of the project_ids to a project,
# returning the result or raising ValueError.
for pid in project_ids:
project: Optional[Project] = None
if isinstance(pid, str):
project = self._projects_by_name.get(pid)
if project is None and allow_paths:
project = self._projects_by_rpath.get(Path(pid).resolve())
if project is None:
unknown.append(pid)
continue
ret.append(project)
if only_cloned and not project.is_cloned():
uncloned.append(project)
if unknown or (only_cloned and uncloned):
raise ValueError(unknown, uncloned)
return ret
def _as_dict_helper(
self, pdict: Optional[Callable[[Project], Dict]] = None) \
-> Dict:
# pdict: returns a Project's dict representation.
# By default, it's Project.as_dict.
if pdict is None:
pdict = Project.as_dict
projects = list(self.projects)
del projects[MANIFEST_PROJECT_INDEX]
project_dicts = [pdict(p) for p in projects]
# This relies on insertion-ordered dictionaries for
# predictability, which is a CPython 3.6 implementation detail
# and Python 3.7+ guarantee.
r: Dict[str, Any] = {}
r['manifest'] = {}
if self.group_filter:
r['manifest']['group-filter'] = self.group_filter
r['manifest']['projects'] = project_dicts
r['manifest']['self'] = self.projects[MANIFEST_PROJECT_INDEX].as_dict()
return r
def as_dict(self) -> Dict:
'''Returns a dict representing self, fully resolved.
The value is "resolved" in that the result is as if all
projects had been defined in a single manifest without any
import attributes.
'''
return self._as_dict_helper()
def as_frozen_dict(self) -> Dict:
'''Returns a dict representing self, but frozen.
The value is "frozen" in that all project revisions are the
full SHAs pointed to by `QUAL_MANIFEST_REV_BRANCH` references.
Raises ``RuntimeError`` if a project SHA can't be resolved.
'''
def pdict(p):
if not p.is_cloned():
raise RuntimeError(f'cannot freeze; project {p.name} '
'is uncloned')
try:
sha = p.sha(QUAL_MANIFEST_REV_BRANCH)
except subprocess.CalledProcessError as e:
raise RuntimeError(f'cannot freeze; project {p.name} '
f'ref {QUAL_MANIFEST_REV_BRANCH} '
'cannot be resolved to a SHA') from e
d = p.as_dict()
d['revision'] = sha
return d
return self._as_dict_helper(pdict=pdict)
def as_yaml(self, **kwargs) -> str:
'''Returns a YAML representation for self, fully resolved.
The value is "resolved" in that the result is as if all
projects had been defined in a single manifest without any
import attributes.
:param kwargs: passed to yaml.safe_dump()
'''
return yaml.safe_dump(self.as_dict(), **kwargs)
def as_frozen_yaml(self, **kwargs) -> str:
'''Returns a YAML representation for self, but frozen.
The value is "frozen" in that all project revisions are the
full SHAs pointed to by `QUAL_MANIFEST_REV_BRANCH` references.
Raises ``RuntimeError`` if a project SHA can't be resolved.
:param kwargs: passed to yaml.safe_dump()
'''
return yaml.safe_dump(self.as_frozen_dict(), **kwargs)
@property
def projects(self) -> List[Project]:
return self._projects
def is_active(self, project: Project,
extra_filter: Optional[Iterable[str]] = None) -> bool:
'''Is a project active?
Projects with empty 'project.groups' lists are always active.
Otherwise, if any group in 'project.groups' is enabled by this
manifest's 'group-filter:' list (and the
'manifest.group-filter' local configuration option, if we have
a workspace), returns True.
Otherwise, i.e. if all of the project's groups are disabled,
this returns False.
"Inactive" projects should generally be considered absent from
the workspace for purposes like updating it, listing projects,
etc.
:param project: project to check
:param extra_filter: an optional additional group filter
'''
if not project.groups:
# Projects without any groups are always active, so just
# exit early. Note that this happens to treat the
# ManifestProject as though it's always active. This is
# important for keeping it in the 'west list' output for
# now.
return True
# Load manifest.group-filter from the configuration file if we
# haven't already. Only do this once so we don't hit the file
# system for every project when looping over the manifest.
cfg_gf = self._config_group_filter
# Figure out what the disabled groups are. Skip reallocation
# if possible.
if cfg_gf or extra_filter is not None:
disabled_groups = set(self._disabled_groups)
if cfg_gf:
_update_disabled_groups(disabled_groups, cfg_gf)
if extra_filter is not None:
extra_filter = self._validated_group_filter(None,
list(extra_filter))
_update_disabled_groups(disabled_groups, extra_filter)
else:
disabled_groups = self._disabled_groups
return any(group not in disabled_groups for group in project.groups)
@property
def _config_group_filter(self) -> GroupFilterType:
# Private property for loading the manifest.group-filter value
# in the local configuration file. Used by is_active.
if not hasattr(self, '_cfg_gf'):
self._cfg_gf = self._load_config_group_filter()
return self._cfg_gf
def _load_config_group_filter(self) -> GroupFilterType:
# Load and return manifest.group-filter (converted to a list
# of strings) from the local configuration file if there is
# one.
#
# Returns [] if manifest.group-filter is not set and when
# there is no workspace.
if not self.topdir:
# No workspace -> do not attempt to read config options.
return []
cp = cfg._configparser()
cfg.read_config(configfile=cfg.ConfigFile.LOCAL, config=cp,
topdir=self.topdir)
if 'manifest' not in cp:
# We may have been created from a partially set up
# workspace with an explicit source_file and topdir,
# but no manifest.path config option set.
return []
raw_filter: Optional[str] = cp['manifest'].get('group-filter', None)
if not raw_filter:
return []
# Be forgiving: allow empty strings and values with
# whitespace, and ignore (but emit warnings for) invalid
# values.
#
# Whitespace in between groups, like "foo ,bar", is removed,
# resulting in valid group names ['foo', 'bar'].
ret: GroupFilterType = []
for item in raw_filter.split(','):
stripped = item.strip()
if not stripped:
# Don't emit a warning here. This avoids warnings if
# the option is set to an empty string.
continue
if not stripped[0].startswith(('-', '+')):
_logger.warning(
f'ignoring invalid manifest.group-filter item {item}; '
'this must start with "-" or "+"')
continue
if not is_group(stripped[1:]):
_logger.warning(
f'ignoring invalid manifest.group-filter item {item}; '
f'"{stripped[1:]}" is not a group name')
continue
ret.append(stripped)
return ret
def _malformed(self, complaint: str,
parent: Optional[Exception] = None) -> NoReturn:
context = (f'file: {self.path} ' if self.path else 'data')
args = [f'Malformed manifest {context}',
f'Schema file: {_SCHEMA_PATH}']
if complaint:
args.append('Hint: ' + complaint)
exc = MalformedManifest(*args)
if parent:
raise exc from parent
else:
raise exc
def _load(self, manifest: Dict[str, Any],
path_hint: Optional[Path], # not PathType!
ctx: _import_ctx) -> None:
# Initialize this instance.
#
# - manifest: manifest data, parsed and validated
# - path_hint: hint about where the manifest repo lives
# - ctx: recursive import context
top_level = not bool(ctx.projects)
if self.path:
loading_what = self.path
else:
loading_what = 'data (no file)'
_logger.debug(f'loading {loading_what}')
schema_version = str(manifest.get('version', SCHEMA_VERSION))
# We want to make an ordered map from project names to
# corresponding Project instances. Insertion order into this
# map should reflect the final project order including
# manifest import resolution, which is:
#
# 1. Imported projects from "manifest: self: import:"
# 2. "manifest: projects:"
# 3. Imported projects from "manifest: projects: ... import:"
# Create the ManifestProject, and import projects and
# group-filter data from "self:".
mp = self._load_self(manifest, path_hint, ctx)
# Load "group-filter:" from this manifest.
self_group_filter = self._load_group_filter(manifest, ctx)
# Add this manifest's projects to the map, and handle imported
# projects and group-filter values.
url_bases = {r['name']: r['url-base'] for r in
manifest.get('remotes', [])}
defaults = self._load_defaults(manifest.get('defaults', {}), url_bases)
self._load_projects(manifest, url_bases, defaults, ctx)
# The manifest is resolved. Make sure paths are unique.
self._check_paths_are_unique(mp, ctx.projects, top_level)
# Make sure that project names don't contain unsupported characters.
self._check_names(mp, ctx.projects)
# Save the resulting projects and initialize lookup tables.
self._projects = list(ctx.projects.values())
self._projects.insert(MANIFEST_PROJECT_INDEX, mp)
self._projects_by_name: Dict[str, Project] = {'manifest': mp}
self._projects_by_name.update(ctx.projects)
self._projects_by_rpath: Dict[Path, Project] = {} # resolved paths
if self.topdir:
for i, p in enumerate(self.projects):
if i == MANIFEST_PROJECT_INDEX and not p.abspath:
# When from_data() is called without a path hint, mp
# can have a topdir but no path, and thus no abspath.
continue
if TYPE_CHECKING:
# The typing module can't tell that self.topdir
# being truthy guarantees p.abspath is a str, not None.
assert p.abspath
self._projects_by_rpath[Path(p.abspath).resolve()] = p
# Update self.group_filter
if top_level:
# For schema version 0.10 or later, there's no point in
# overwriting these attributes for anything except the top
# level manifest: all the other ones we've loaded above
# during import resolution are already garbage.
#
# For schema version 0.9, we only want to warn once, at the
# top level, if the distinction actually matters.
self._finalize_group_filter(self_group_filter, ctx,
schema_version)
_logger.debug(f'loaded {loading_what}')
def _load_group_filter(self, manifest_data: Dict[str, Any],
ctx: _import_ctx) -> GroupFilterType:
# Update ctx.group_filter from manifest_data.
if 'group-filter' not in manifest_data:
_logger.debug('group-filter: unset')
return []
raw_filter: List[RawGroupType] = manifest_data['group-filter']
if not raw_filter:
self._malformed('"manifest: group-filter:" may not be empty')
group_filter = self._validated_group_filter('manifest', raw_filter)
_logger.debug('group-filter: %s', group_filter)
ctx.group_filter[:0] = group_filter
return group_filter
def _validated_group_filter(
self, source: Optional[str], raw_filter: List[RawGroupType]
) -> GroupFilterType:
# Helper function for cleaning up nonempty manifest:
# group-filter: and manifest.group-filter values.
if source is not None:
source += ' '
else:
source = ''
ret: GroupFilterType = []
for item in raw_filter:
if not isinstance(item, str):
item = str(item)
if (not item) or (item[0] not in ('+', '-')):
self._malformed(
f'{source}group filter contains invalid item "{item}"; '
'this must begin with "+" or "-"')
group = item[1:]
if not is_group(group):
self._malformed(
f'{source}group filter contains invalid item "{item}"; '
f'"{group}" is an invalid group name')
ret.append(item)
return ret
def _load_self(self, manifest: Dict[str, Any],
path_hint: Optional[Path],
ctx: _import_ctx) -> ManifestProject:
# Handle the "self:" section in the manifest data.
slf = manifest.get('self', {})
if 'path' in slf:
path = slf['path']
if path is None:
self._malformed(f'self: path: is {path}; this value '
'must be nonempty if present')
else:
path = path_hint
mp = ManifestProject(path=path, topdir=self.topdir,
west_commands=slf.get('west-commands'))
imp = slf.get('import')
if imp is not None:
if self._import_flags & ImportFlag.IGNORE:
_logger.debug('ignored self import')
else:
_logger.debug(f'resolving self import {imp}')
self._import_from_self(mp, imp, ctx)
_logger.debug('resolved self import')
return mp
def _assert_imports_ok(self) -> None:
# Sanity check that we aren't calling code that does importing
# if the flags tell us not to.
#
# Could be deleted if this feature stabilizes and we never hit
# this assertion.
assert not self._import_flags & ImportFlag.IGNORE
def _import_from_self(self, mp: ManifestProject, imp: Any,
ctx: _import_ctx) -> None:
# Recursive helper to import projects from the manifest repository.
#
# The 'imp' argument is the loaded value of "foo" in "self:
# import: foo".
#
# All data is read from the file system. Requests to read
# files which don't exist or aren't ordinary files/directories
# raise MalformedManifest.
#
# This is unlike importing from projects -- for projects, data
# are read from Git (treating it as a content-addressable file
# system) with a fallback on self._importer.
self._assert_imports_ok()
self.has_imports = True
imptype = type(imp)
if imptype == bool:
self._malformed(f'got "self: import: {imp}" of boolean')
elif imptype == str:
self._import_path_from_self(mp, imp, ctx)
elif imptype == list:
for subimp in imp:
self._import_from_self(mp, subimp, ctx)
elif imptype == dict:
imap = self._load_imap(imp, f'manifest file {mp.abspath}')
# imap may introduce additional constraints on the
# existing ctx, such as a stricter imap_filter or a longer
# path_prefix.
#
# We therefore need to compose them during the recursive import.
new_ctx = _compose_ctx_and_imap(ctx, imap)
self._import_path_from_self(mp, imap.file, new_ctx)
else:
self._malformed(f'{mp.abspath}: "self: import: {imp}" '
f'has invalid type {imptype}')
def _import_path_from_self(self, mp: ManifestProject, imp: Any,
ctx: _import_ctx) -> None:
if mp.abspath:
# Fast path, when we're working inside a fully initialized
# topdir.
repo_root = Path(mp.abspath)
else:
# Fallback path, which is needed by at least west init. If
# this happens too often, something may be wrong with how
# we've implemented this. We'd like to avoid too many git
# commands, as subprocesses are slow on windows.
assert self.path is not None # to ensure and satisfy type checker
start = Path(self.path).parent
_logger.debug(
f'searching for manifest repository root from {start}')
repo_root = Path(mp.git('rev-parse --show-toplevel',
capture_stdout=True,
cwd=start).
stdout[:-1]. # chop off newline
decode('utf-8')) # hopefully this is safe
p = repo_root / imp
if p.is_file():
_logger.debug(f'found submanifest file: {p}')
self._import_pathobj_from_self(mp, p, ctx)
elif p.is_dir():
_logger.debug(f'found submanifest directory: {p}')
for yml in filter(_is_yml, sorted(p.iterdir())):
self._import_pathobj_from_self(mp, p / yml, ctx)
else:
# This also happens for special files like character
# devices, but it doesn't seem worth handling that error
# separately. Who would call mknod in their manifest repo?
self._malformed(f'{mp.abspath}: "self: import: {imp}": '
f'file {p} not found')
def _import_pathobj_from_self(self, mp: ManifestProject, pathobj: Path,
ctx: _import_ctx) -> None:
# Import a Path object, which is a manifest file in the
# manifest repository whose ManifestProject is mp.
# Destructively add the imported content into our 'projects'
# map, passing along our context. The intermediate manifest is
# thrown away; we're basically just using __init__ as a
# function here.
#
# The only thing we need to do with it is check if the
# submanifest has west commands, add them to mp's if so.
try:
kwargs: Dict[str, Any] = {'import-context': ctx}
submp = Manifest(source_file=pathobj,
manifest_path=mp.path,
topdir=self.topdir,
importer=self._importer,
import_flags=self._import_flags,
**kwargs).projects[MANIFEST_PROJECT_INDEX]
except RecursionError as e:
raise _ManifestImportDepth(mp, pathobj) from e
# submp.west_commands comes first because we
# logically treat imports from self as if they are
# defined before the contents in the higher level
# manifest.
mp.west_commands = _west_commands_merge(submp.west_commands,
mp.west_commands)
def _load_defaults(self, md: Dict, url_bases: Dict[str, str]) -> _defaults:
# md = manifest defaults (dictionary with values parsed from
# the manifest)
mdrem: Optional[str] = md.get('remote')
if mdrem:
# The default remote name, if provided, must refer to a
# well-defined remote.
if mdrem not in url_bases:
self._malformed(f'default remote {mdrem} is not defined')
return _defaults(mdrem, md.get('revision', _DEFAULT_REV))
def _load_projects(self, manifest: Dict[str, Any],
url_bases: Dict[str, str],
defaults: _defaults,
ctx: _import_ctx) -> None:
# Load projects and add them to the list, returning
# information about which ones have imports that need to be
# processed next.
if 'projects' not in manifest:
return
have_imports = []
names = set()
for pd in manifest['projects']:
project = self._load_project(pd, url_bases, defaults, ctx)
name = project.name
if not _imap_filter_allows(ctx.imap_filter, project):
_logger.debug(f'project {name} in file {self.path} ' +
'ignored: an importing manifest blocked or '
'did not allow it')
continue
if name in names:
# Project names must be unique within a manifest.
self._malformed(f'project name {name} used twice in ' +
(self.path or 'the same manifest'))
names.add(name)
# Add the project to the map if it's new.
added = self._add_project(project, ctx.projects)
if added:
# Track project imports unless we are ignoring those.
imp = pd.get('import')
if imp:
if self._import_flags & (ImportFlag.IGNORE |
ImportFlag.IGNORE_PROJECTS):
_logger.debug(
f'project {project}: ignored import ({imp})')
else:
have_imports.append((project, imp))
# Handle imports from new projects in our "projects:" section.
for project, imp in have_imports:
self._import_from_project(project, imp, ctx)
def _load_project(self, pd: Dict, url_bases: Dict[str, str],
defaults: _defaults, ctx: _import_ctx) -> Project:
# pd = project data (dictionary with values parsed from the
# manifest)
name = pd['name']
# The name "manifest" cannot be used as a project name; it
# is reserved to refer to the manifest repository itself
# (e.g. from "west list"). Note that this has not always
# been enforced, but it is part of the documentation.
if name == 'manifest':
self._malformed('no project can be named "manifest"')
# Figure out the project's fetch URL:
#
# - url is tested first (and can't be used with remote or repo-path)
# - remote is tested next (and must be defined if present)
# - default remote is tested last, if there is one
url = pd.get('url')
remote = pd.get('remote')
repo_path = pd.get('repo-path')
if remote and url:
self._malformed(f'project {name} has both "remote: {remote}" '
f'and "url: {url}"')
if defaults.remote and not (remote or url):
remote = defaults.remote
if url:
if repo_path:
self._malformed(f'project {name} has "repo_path: {repo_path}" '
f'and "url: {url}"')
elif remote:
if remote not in url_bases:
self._malformed(f'project {name} remote {remote} '
'is not defined')
url = url_bases[remote] + '/' + (repo_path or name)
else:
self._malformed(
f'project {name} '
'has no remote or url and no default remote is set')
# The project's path needs to respect any import: path-prefix,
# regardless of self._import_flags. The 'ignore' type flags
# just mean ignore the imported data. The path-prefix in this
# manifest affects the project no matter what.
imp = pd.get('import', None)
if isinstance(imp, dict):
pfx = self._load_imap(imp, f'project {name}').path_prefix
else:
pfx = ''
# Historically, path attributes came directly from the manifest data
# itself and were passed along to the Project constructor unmodified.
# When we added path-prefix support, we needed to introduce pathlib
# wrappers around the pd['path'] value as is done here.
#
# Since west is a git wrapper and git prefers to work with
# POSIX paths in general, we've decided for now to force paths
# to POSIX style in all circumstances. If this breaks
# anything, we can always revisit, maybe adding a 'nativepath'
# attribute or something like that.
path = (ctx.path_prefix / pfx / pd.get('path', name)).as_posix()
raw_groups = pd.get('groups')
if raw_groups:
self._validate_project_groups(name, raw_groups)
groups: GroupsType = [str(group) for group in raw_groups]
else:
groups = []
if imp and groups:
# Maybe there is a sensible way to combine the two of these.
# but it's not clear what it is. Let's avoid weird edge cases
# like "what do I do about a project whose group is disabled
# that I need to import data from?".
self._malformed(
f'project {name}: "groups" cannot be combined with "import"')
userdata = pd.get('userdata')
ret = Project(name, url, pd.get('revision', defaults.revision), path,
submodules=self._load_submodules(pd.get('submodules'),
f'project {name}'),
clone_depth=pd.get('clone-depth'),
west_commands=pd.get('west-commands'),
topdir=self.topdir, remote_name=remote,
groups=groups,
userdata=userdata)
# Make sure the return Project's path does not escape the
# workspace. We can't use escapes_directory() as that
# resolves paths, which has proven to break some existing
# users who use symlinks to existing project repositories
# outside the workspace as a cache.
#
# Instead, normalize the path and make sure it's neither
# absolute nor starts with a '..'. This is intended to be
# a purely lexical operation which should therefore ignore
# symbolic links.
ret_norm = os.path.normpath(ret.path)
if os.path.isabs(ret_norm):
self._malformed(f'project "{ret.name}" has absolute path '
f'{ret.path}; this must be relative to the '
f'workspace topdir' +
(f' ({self.topdir})' if self.topdir else ''))
if ret_norm.startswith('..'):
self._malformed(f'project "{name}" path {ret.path} '
f'normalizes to {ret_norm}, which escapes '
f'the workspace topdir')
return ret
def _validate_project_groups(self, project_name: str,
raw_groups: List[RawGroupType]):
for raw_group in raw_groups:
if not is_group(raw_group):
self._malformed(f'project {project_name}: '
f'invalid group "{raw_group}"')
def _load_submodules(self, submodules: Any, src: str) -> SubmodulesType:
# Gets a list of Submodules objects or boolean from the manifest
# *submodules* value.
#
# If submodules is a list[dict], checks the format of elements
# and converts the list to a List[Submodule].
#
# If submodules is a bool, returns its value (True means that
# all project submodules should be considered and False means
# all submodules should be ignored).
#
# If submodules is None, returns False.
#
# All errors raise MalformedManifest.
#
# :param submodules: content of the manifest submodules value.
# :param src: human readable source of the submodules data
# A missing 'submodules' is the same thing as False.
if submodules is None:
return False
# A bool should be returned as-is.
if isinstance(submodules, bool):
return submodules
# Convert lists[dict] to list[Submodules].
if isinstance(submodules, list):
ret = []
for index, value in enumerate(submodules):
if _is_submodule_dict_ok(value):
ret.append(Submodule(**value))
else:
self._malformed(f'{src}: invalid submodule element '
f'{value} at index {index}')
return ret
self._malformed(f'{src}: invalid submodules: {submodules} '
f'has type {type(submodules)}; '
'expected a list or boolean')
def _import_from_project(self, project: Project, imp: Any,
ctx: _import_ctx):
# Recursively resolve a manifest import from 'project'.
#
# - project: Project instance to import from
# - imp: the parsed value of project's import key (string, list, etc.)
# - ctx: recursive import context
self._assert_imports_ok()
self.has_imports = True
imptype = type(imp)
if imptype == bool:
# We should not have been called unless the import was truthy.
assert imp
self._import_path_from_project(project, _WEST_YML, ctx)
elif imptype == str:
self._import_path_from_project(project, imp, ctx)
elif imptype == list:
for subimp in imp:
self._import_from_project(project, subimp, ctx)
elif imptype == dict:
imap = self._load_imap(imp, f'project {project.name}')
# Similar comments about composing ctx and imap apply here as
# they do in _import_from_self().
new_ctx = _compose_ctx_and_imap(ctx, imap)
self._import_path_from_project(project, imap.file, new_ctx)
else:
self._malformed(f'{project.name_and_path}: invalid import {imp} '
f'type: {imptype}')
def _import_path_from_project(self, project: Project, path: str,
ctx: _import_ctx) -> None:
# Import data from git at the given path at revision manifest-rev.
# Fall back on self._importer if that fails.
_logger.debug(f'resolving import {path} for {project}')
imported = self._import_content_from_project(project, path)
if imported is None:
# This can happen if self._importer returns None.
# It means there's nothing to do.
return
for data in imported:
if isinstance(data, str):
data = _load(data)
validate(data)
try:
# Force a fallback onto manifest_path=project.path.
# The subpath to the manifest file itself will not be
# available, so that's the best we can do.
#
# Perhaps there's a cleaner way to convince mypy that
# the validate() postcondition is that we've got a
# real manifest and this is safe, but maybe just
# fixing this hack would be best. For now, silence the
# type checker on this line.
del data['manifest']['self']['path'] # type: ignore
except KeyError:
pass
# Destructively add the imported content into our 'projects'
# map, passing along our context.
try:
kwargs: Dict[str, Any] = {'import-context': ctx}
submp = Manifest(source_data=data,
manifest_path=project.path,
topdir=self.topdir,
importer=self._importer,
import_flags=self._import_flags,
**kwargs).projects[MANIFEST_PROJECT_INDEX]
except RecursionError as e:
raise _ManifestImportDepth(project, path) from e
# If the submanifest has west commands, merge them
# into project's.
project.west_commands = _west_commands_merge(
project.west_commands, submp.west_commands)
_logger.debug(f'done resolving import {path} for {project}')
def _import_content_from_project(self, project: Project,
path: str) -> ImportedContentType:
if not (self._import_flags & ImportFlag.FORCE_PROJECTS) and \
project.is_cloned():
try:
content = _manifest_content_at(project, path)
except MalformedManifest as mm:
self._malformed(mm.args[0])
except FileNotFoundError:
# We may need to fetch a new manifest-rev, e.g. if
# revision is a branch that didn't used to have a
# manifest, but now does.
content = self._importer(project, path)
except subprocess.CalledProcessError:
# We may need a new manifest-rev, e.g. if revision is
# a SHA we don't have yet.
content = self._importer(project, path)
else:
# We need to clone this project, or we were specifically
# asked to use the importer.
content = self._importer(project, path)
if isinstance(content, str):
content = [content]
return content
def _load_imap(self, imp: Dict, src: str) -> _import_map:
# Convert a parsed self or project import value from YAML into
# an _import_map namedtuple.
# Work on a copy in case the caller needs the full value.
copy = dict(imp)
# Preserve deprecated whitelist/blacklist terms
name_allowlist = copy.pop(
'name-allowlist', copy.pop('name-whitelist', [])
)
path_allowlist = copy.pop(
'path-allowlist', copy.pop('path-whitelist', [])
)
name_blocklist = copy.pop(
'name-blocklist', copy.pop('name-blacklist', [])
)
path_blocklist = copy.pop(
'path-blocklist', copy.pop('path-blacklist', [])
)
ret = _import_map(copy.pop('file', _WEST_YML),
name_allowlist,
path_allowlist,
name_blocklist,
path_blocklist,
copy.pop('path-prefix', ''))
# Check that the value is OK.
if copy:
# We popped out all of the valid keys already.
self._malformed(f'{src}: invalid import contents: {copy}')
elif not _is_imap_list(ret.name_allowlist):
self._malformed(f'{src}: bad import name-allowlist '
f'{ret.name_allowlist}')
elif not _is_imap_list(ret.path_allowlist):
self._malformed(f'{src}: bad import path-allowlist '
f'{ret.path_allowlist}')
elif not _is_imap_list(ret.name_blocklist):
self._malformed(f'{src}: bad import name-blocklist '
f'{ret.name_blocklist}')
elif not _is_imap_list(ret.path_blocklist):
self._malformed(f'{src}: bad import path-blocklist '
f'{ret.path_blocklist}')
elif not isinstance(ret.path_prefix, str):
self._malformed(f'{src}: bad import path-prefix '
f'{ret.path_prefix}; expected str, not '
f'{type(ret.path_prefix)}')
return ret
def _add_project(self, project: Project,
projects: Dict[str, Project]) -> bool:
# Add the project to our map if we don't already know about it.
# Return the result.
if project.name not in projects:
projects[project.name] = project
_logger.debug('added project %s path %s revision %s%s%s',
project.name, project.path, project.revision,
(f' from {self.path}' if self.path else ''),
(f' groups {project.groups}' if project.groups
else ''))
return True
else:
return False
def _check_paths_are_unique(self, mp: ManifestProject,
projects: Dict[str, Project],
top_level: bool) -> None:
# TODO: top_level can probably go away when #327 is done.
ppaths: Dict[Path, Project] = {}
if mp.path:
mppath: Optional[Path] = Path(mp.path)
else:
mppath = None
for name, project in projects.items():
pp = Path(project.path)
if top_level and pp == mppath:
self._malformed(f'project {name} path "{project.path}" '
'is taken by the manifest repository')
other = ppaths.get(pp)
if other:
self._malformed(f'project {name} path "{project.path}" '
f'is taken by project {other.name}')
ppaths[pp] = project
def _check_names(self, mp: ManifestProject,
projects: Dict[str, Project]) -> None:
for name, project in projects.items():
if _INVALID_PROJECT_NAME_RE.search(name):
self._malformed(f'Invalid project name: {name}')
def _finalize_group_filter(self, self_group_filter: GroupFilterType,
ctx: _import_ctx, schema_version: str):
# Update self.group_filter based on the schema version.
if schema_version == '0.9':
# If the user requested v0.9.x group-filter semantics,
# provide them, but emit a warning that can't be silenced
# if group filters were used anywhere.
#
# Hopefully no users ever actually see this warning.
if self_group_filter or ctx.group_filter:
_logger.warning(
"providing deprecated group-filter semantics "
"due to explicit 'manifest: version: 0.9'; "
"for the new semantics, use "
"'manifest: version: \"0.10\"' or later")
# Set attribute for white-box testing the above warning.
self._legacy_group_filter_warned = True
_update_disabled_groups(self._disabled_groups, self_group_filter)
self.group_filter = self_group_filter
else:
_update_disabled_groups(self._disabled_groups, ctx.group_filter)
self.group_filter = [f'-{g}' for g in self._disabled_groups]
| 45,903 | 1,896 | 1,581 |
b4b64e30d993a2d468f74e6ffc31129a9833f440 | 876 | py | Python | iohandler/writer.py | isaiahnields/business-classifier | b43eb8ae429487309e40e533943a0c7e76d37e02 | [
"MIT"
] | null | null | null | iohandler/writer.py | isaiahnields/business-classifier | b43eb8ae429487309e40e533943a0c7e76d37e02 | [
"MIT"
] | null | null | null | iohandler/writer.py | isaiahnields/business-classifier | b43eb8ae429487309e40e533943a0c7e76d37e02 | [
"MIT"
] | null | null | null | import csv
| 24.333333 | 85 | 0.594749 | import csv
class FileWriter:
def __init__(self, file_location):
"""
Opens the file at file_location and creates a csv writer for that file.
:param file_location: the location of the file that is to be written to
"""
# opens the file at file_location in write mode
self.file = open(file_location, 'w')
# creates a writer to write to the file
self.writer = csv.writer(self.file, lineterminator='\n')
def write(self, row):
"""
Writes a row of data to the csv file at file_location.
:param row: an array containing the data that will be written to the csv file
"""
# write the data to the file
self.writer.writerow(row)
self.file.flush()
def close(self):
"""
Closes the file reader.
"""
self.file.close()
| 0 | 841 | 23 |
0f63d7b1ac543a3111be7023ea9f14388beef221 | 589 | py | Python | dcmrtstruct2nii/cli/liststructs.py | thomas-albrecht/dcmrtstruct2nii | 5759fdf8672dc991e4e3fe57296cdc59d901c285 | [
"Apache-2.0"
] | null | null | null | dcmrtstruct2nii/cli/liststructs.py | thomas-albrecht/dcmrtstruct2nii | 5759fdf8672dc991e4e3fe57296cdc59d901c285 | [
"Apache-2.0"
] | null | null | null | dcmrtstruct2nii/cli/liststructs.py | thomas-albrecht/dcmrtstruct2nii | 5759fdf8672dc991e4e3fe57296cdc59d901c285 | [
"Apache-2.0"
] | null | null | null | from cleo import Command
import logging
from dcmrtstruct2nii.facade.dcmrtstruct2nii import list_rt_structs
class ListStructs(Command):
"""
List structures in RT Struct
list
{--r|rtstruct= : Path to DICOM RT Struct file}
"""
| 21.035714 | 66 | 0.626486 | from cleo import Command
import logging
from dcmrtstruct2nii.facade.dcmrtstruct2nii import list_rt_structs
class ListStructs(Command):
"""
List structures in RT Struct
list
{--r|rtstruct= : Path to DICOM RT Struct file}
"""
def handle(self):
file_path = self.option('rtstruct')
if not file_path:
logging.error('dcmrtstruct2nii list --rtstruct <..>')
return -1
structs = list_rt_structs(file_path)
for struct in structs:
print(struct)
print(f'Found {len(structs)} structures')
| 310 | 0 | 26 |
3ef495b179a0baa8a6d9d01ab07b1bb3069f3b75 | 1,680 | py | Python | menus/migrations/0005_auto_20200715_0348.py | theNegativeEntropy/digitalmenu | ae28932bbf00607e484a965ca90043250708f32c | [
"MIT"
] | null | null | null | menus/migrations/0005_auto_20200715_0348.py | theNegativeEntropy/digitalmenu | ae28932bbf00607e484a965ca90043250708f32c | [
"MIT"
] | 5 | 2021-03-30T13:48:07.000Z | 2021-09-22T19:14:57.000Z | menus/migrations/0005_auto_20200715_0348.py | theNegativeEntropy/digitalmenu | ae28932bbf00607e484a965ca90043250708f32c | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2020-07-15 03:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 35 | 122 | 0.618452 | # Generated by Django 3.0.7 on 2020-07-15 03:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('shops', '0005_shop_owner'),
('menus', '0004_auto_20200715_0255'),
]
operations = [
migrations.AddField(
model_name='menu',
name='owner',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='menucategory',
name='owner',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='menucategory',
name='shop',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='shops.Shop'),
preserve_default=False,
),
migrations.AddField(
model_name='menuitem',
name='owner',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='menuitem',
name='shop',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='shops.Shop'),
preserve_default=False,
),
]
| 0 | 1,500 | 23 |
96101e6dfb54b9aad38ea32b1d50d5f0cdb04c34 | 3,570 | py | Python | examples/005_evaluation_new_oversampler.py | szghlm/smote_variants | 9066ddbd526b18bb273746c1b989e8e07a35abd2 | [
"MIT"
] | 271 | 2020-01-18T09:04:35.000Z | 2022-03-31T11:49:12.000Z | examples/005_evaluation_new_oversampler.py | szghlm/smote_variants | 9066ddbd526b18bb273746c1b989e8e07a35abd2 | [
"MIT"
] | 19 | 2020-05-04T18:24:03.000Z | 2022-03-21T23:44:43.000Z | examples/005_evaluation_new_oversampler.py | szghlm/smote_variants | 9066ddbd526b18bb273746c1b989e8e07a35abd2 | [
"MIT"
] | 70 | 2020-01-18T15:01:43.000Z | 2022-03-28T15:10:19.000Z |
# coding: utf-8
# # Evaluation of the new oversampler on the standard database foldings
#
# In this notebook we give an example evaluating a new oversampler on the standard 104 imbalanced datasets. The evaluation is highly similar to that illustrated in the notebook ```002_evaluation_multiple_datasets``` with the difference that in this case some predefined dataset foldings are used to make the results comparable to those reported in the ranking page of the documentation. The database foldings need to be downloaded from the github repository and placed in the 'smote_foldings' directory.
# In[1]:
import os, pickle, itertools
# import classifiers
from sklearn.calibration import CalibratedClassifierCV
from sklearn.svm import LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from smote_variants import MLPClassifierWrapper
# import SMOTE variants
import smote_variants as sv
# itertools to derive imbalanced databases
import imbalanced_databases as imbd
# In[2]:
# setting global parameters
folding_path= os.path.join(os.path.expanduser('~'), 'smote_foldings')
if not os.path.exists(folding_path):
os.makedirs(folding_path)
max_sampler_parameter_combinations= 35
n_jobs= 5
# In[3]:
# instantiate classifiers
# Support Vector Classifiers with 6 parameter combinations
sv_classifiers= [CalibratedClassifierCV(LinearSVC(C=1.0, penalty='l1', loss= 'squared_hinge', dual= False)),
CalibratedClassifierCV(LinearSVC(C=1.0, penalty='l2', loss= 'hinge', dual= True)),
CalibratedClassifierCV(LinearSVC(C=1.0, penalty='l2', loss= 'squared_hinge', dual= False)),
CalibratedClassifierCV(LinearSVC(C=10.0, penalty='l1', loss= 'squared_hinge', dual= False)),
CalibratedClassifierCV(LinearSVC(C=10.0, penalty='l2', loss= 'hinge', dual= True)),
CalibratedClassifierCV(LinearSVC(C=10.0, penalty='l2', loss= 'squared_hinge', dual= False))]
# Multilayer Perceptron Classifiers with 6 parameter combinations
mlp_classifiers= []
for x in itertools.product(['relu', 'logistic'], [1.0, 0.5, 0.1]):
mlp_classifiers.append(MLPClassifierWrapper(activation= x[0], hidden_layer_fraction= x[1]))
# Nearest Neighbor Classifiers with 18 parameter combinations
nn_classifiers= []
for x in itertools.product([3, 5, 7], ['uniform', 'distance'], [1, 2, 3]):
nn_classifiers.append(KNeighborsClassifier(n_neighbors= x[0], weights= x[1], p= x[2]))
# Decision Tree Classifiers with 6 parameter combinations
dt_classifiers= []
for x in itertools.product(['gini', 'entropy'], [None, 3, 5]):
dt_classifiers.append(DecisionTreeClassifier(criterion= x[0], max_depth= x[1]))
classifiers= []
classifiers.extend(sv_classifiers)
classifiers.extend(mlp_classifiers)
classifiers.extend(nn_classifiers)
classifiers.extend(dt_classifiers)
# In[4]:
# querying datasets for the evaluation
datasets= imbd.get_data_loaders('study')
# In[ ]:
# executing the evaluation
results= sv.evaluate_oversamplers(datasets,
samplers= sv.get_all_oversamplers(),
classifiers= classifiers,
cache_path= folding_path,
n_jobs= n_jobs,
remove_sampling_cache= True,
max_samp_par_comb= max_sampler_parameter_combinations)
# In[ ]:
# The evaluation results are available in the results dataframe for further analysis.
print(results)
| 34 | 503 | 0.715126 |
# coding: utf-8
# # Evaluation of the new oversampler on the standard database foldings
#
# In this notebook we give an example evaluating a new oversampler on the standard 104 imbalanced datasets. The evaluation is highly similar to that illustrated in the notebook ```002_evaluation_multiple_datasets``` with the difference that in this case some predefined dataset foldings are used to make the results comparable to those reported in the ranking page of the documentation. The database foldings need to be downloaded from the github repository and placed in the 'smote_foldings' directory.
# In[1]:
import os, pickle, itertools
# import classifiers
from sklearn.calibration import CalibratedClassifierCV
from sklearn.svm import LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from smote_variants import MLPClassifierWrapper
# import SMOTE variants
import smote_variants as sv
# itertools to derive imbalanced databases
import imbalanced_databases as imbd
# In[2]:
# setting global parameters
folding_path= os.path.join(os.path.expanduser('~'), 'smote_foldings')
if not os.path.exists(folding_path):
os.makedirs(folding_path)
max_sampler_parameter_combinations= 35
n_jobs= 5
# In[3]:
# instantiate classifiers
# Support Vector Classifiers with 6 parameter combinations
sv_classifiers= [CalibratedClassifierCV(LinearSVC(C=1.0, penalty='l1', loss= 'squared_hinge', dual= False)),
CalibratedClassifierCV(LinearSVC(C=1.0, penalty='l2', loss= 'hinge', dual= True)),
CalibratedClassifierCV(LinearSVC(C=1.0, penalty='l2', loss= 'squared_hinge', dual= False)),
CalibratedClassifierCV(LinearSVC(C=10.0, penalty='l1', loss= 'squared_hinge', dual= False)),
CalibratedClassifierCV(LinearSVC(C=10.0, penalty='l2', loss= 'hinge', dual= True)),
CalibratedClassifierCV(LinearSVC(C=10.0, penalty='l2', loss= 'squared_hinge', dual= False))]
# Multilayer Perceptron Classifiers with 6 parameter combinations
mlp_classifiers= []
for x in itertools.product(['relu', 'logistic'], [1.0, 0.5, 0.1]):
mlp_classifiers.append(MLPClassifierWrapper(activation= x[0], hidden_layer_fraction= x[1]))
# Nearest Neighbor Classifiers with 18 parameter combinations
nn_classifiers= []
for x in itertools.product([3, 5, 7], ['uniform', 'distance'], [1, 2, 3]):
nn_classifiers.append(KNeighborsClassifier(n_neighbors= x[0], weights= x[1], p= x[2]))
# Decision Tree Classifiers with 6 parameter combinations
dt_classifiers= []
for x in itertools.product(['gini', 'entropy'], [None, 3, 5]):
dt_classifiers.append(DecisionTreeClassifier(criterion= x[0], max_depth= x[1]))
classifiers= []
classifiers.extend(sv_classifiers)
classifiers.extend(mlp_classifiers)
classifiers.extend(nn_classifiers)
classifiers.extend(dt_classifiers)
# In[4]:
# querying datasets for the evaluation
datasets= imbd.get_data_loaders('study')
# In[ ]:
# executing the evaluation
results= sv.evaluate_oversamplers(datasets,
samplers= sv.get_all_oversamplers(),
classifiers= classifiers,
cache_path= folding_path,
n_jobs= n_jobs,
remove_sampling_cache= True,
max_samp_par_comb= max_sampler_parameter_combinations)
# In[ ]:
# The evaluation results are available in the results dataframe for further analysis.
print(results)
| 0 | 0 | 0 |
ac7310b49e080a4f14ed384393fb879b330580a7 | 94 | py | Python | graph_sage.py | ejhill24/compound-pcfg | f871541d4a462d4bf37d3349f4746a139411a6e1 | [
"MIT"
] | 1 | 2021-01-08T20:16:16.000Z | 2021-01-08T20:16:16.000Z | graph_sage.py | ejhill24/compound-pcfg | f871541d4a462d4bf37d3349f4746a139411a6e1 | [
"MIT"
] | null | null | null | graph_sage.py | ejhill24/compound-pcfg | f871541d4a462d4bf37d3349f4746a139411a6e1 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
from tensorflow import keras
print(tf.__version__)
| 18.8 | 28 | 0.829787 | import numpy as np
import tensorflow as tf
from tensorflow import keras
print(tf.__version__)
| 0 | 0 | 0 |
389c4cf76abd71174620e665c96ceb58dd5d4a51 | 3,601 | py | Python | recipes/Python/579031_Simple_way_find_number_perfect_square_numbers/recipe-579031.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/579031_Simple_way_find_number_perfect_square_numbers/recipe-579031.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/579031_Simple_way_find_number_perfect_square_numbers/recipe-579031.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | #
# Question: given a range a,b, find the total number of perfect square numbers in the given range.
#
import cmath
def find_perfect_square_count(a, b, verbose=False):
'''
The strategy here is not to iterate through the set of possible integer values and check for is_perfect_square()
each time but to translate the upper and lower values to either complex or real space of square numbers.
# O(n) complexity
len([x for x in range(0, 100) if x!= 0 and float(math.sqrt(x)).is_integer()])
so if a and b positive then we count the number of integer values between upper and lower sqrt() values
if either a or b are negative then we need to use the complex number space for the sqrt() results. In this case
we are still counting integer values either along the imaginary or real axis, the result is then a simple sum
if both a and b are negative we need to make sure that when walking down the same imaginary axis we dont count
the same inteters twice, in this case we can take the min(a,b) to get max distinct set of integer values.
a : float
represents either start or end of range
b : float
represents either start or end of range
return : integer or NaN
returns total number of perfect square numbers between (a,b) or NaN if not available.
complexity:
O(a) scalar complexity
'''
# protect against silly type errors
try:
float(a)
float(b)
except:
return NaN
# protect against something that might be nothing
if (a or b) in [None]:
return NaN
# nothing to do here, fail quickly no range, what if a and b are a square?
if b==a :
return 0
# do we need to handle complex numbers?
if a < 0 or b < 0:
if verbose:
print 'complex'
# case when a img and b real
if a < 0 and b >= 0:
return np.sum([math.ceil(cmath.sqrt(complex(a,0)).imag), math.ceil(cmath.sqrt(complex(b,0)).real)])-2
# case when a real b imag
if a >= 0 and b < 0:
return np.sum([math.ceil(cmath.sqrt(complex(b,0)).imag), math.ceil(cmath.sqrt(complex(a,0)).real)])-2
# special case when both negative, both vectors are aligned to the
# i axis, in this case we only need the min(a,b) otherwise we will double count
if a < 0 and b < 0:
return np.sum([math.ceil(cmath.sqrt(complex(min(a,b),0)).imag)])-1
if a >= b:
count = 2*(ceil(math.sqrt(abs(a))) - ceil(math.sqrt(abs(b))))
if count > 0:
return count-2
else:
return 0
if b >= a:
# check to make sure we dont remove zero adjustment from zero count
# incorrectly gives a negative count.
count = 2*(ceil(math.sqrt(abs(b))) - ceil(math.sqrt(abs(a))))
if count > 0:
return count-2
else:
return 0
# else return NaN
return NaN
# some preflight checks
assert(find_perfect_square_count(0, 100) == 18)
assert(isnan(find_perfect_square_count('ff', 1.2)))
assert(isnan(find_perfect_square_count('ff', None)))
# lets fully test
import random
number_of_tests = 5
value = 1e4
for (a,b) in zip([random.randint(-value,value) for x in arange(number_of_tests)]
, [random.randint(-value,value) for x in arange(number_of_tests)]):
print '%d\t between \t[%d, %d]'%(find_perfect_square_count(a,b), a, b)
| 36.744898 | 120 | 0.609275 | #
# Question: given a range a,b, find the total number of perfect square numbers in the given range.
#
import cmath
def find_perfect_square_count(a, b, verbose=False):
'''
The strategy here is not to iterate through the set of possible integer values and check for is_perfect_square()
each time but to translate the upper and lower values to either complex or real space of square numbers.
# O(n) complexity
len([x for x in range(0, 100) if x!= 0 and float(math.sqrt(x)).is_integer()])
so if a and b positive then we count the number of integer values between upper and lower sqrt() values
if either a or b are negative then we need to use the complex number space for the sqrt() results. In this case
we are still counting integer values either along the imaginary or real axis, the result is then a simple sum
if both a and b are negative we need to make sure that when walking down the same imaginary axis we dont count
the same inteters twice, in this case we can take the min(a,b) to get max distinct set of integer values.
a : float
represents either start or end of range
b : float
represents either start or end of range
return : integer or NaN
returns total number of perfect square numbers between (a,b) or NaN if not available.
complexity:
O(a) scalar complexity
'''
# protect against silly type errors
try:
float(a)
float(b)
except:
return NaN
# protect against something that might be nothing
if (a or b) in [None]:
return NaN
# nothing to do here, fail quickly no range, what if a and b are a square?
if b==a :
return 0
# do we need to handle complex numbers?
if a < 0 or b < 0:
if verbose:
print 'complex'
# case when a img and b real
if a < 0 and b >= 0:
return np.sum([math.ceil(cmath.sqrt(complex(a,0)).imag), math.ceil(cmath.sqrt(complex(b,0)).real)])-2
# case when a real b imag
if a >= 0 and b < 0:
return np.sum([math.ceil(cmath.sqrt(complex(b,0)).imag), math.ceil(cmath.sqrt(complex(a,0)).real)])-2
# special case when both negative, both vectors are aligned to the
# i axis, in this case we only need the min(a,b) otherwise we will double count
if a < 0 and b < 0:
return np.sum([math.ceil(cmath.sqrt(complex(min(a,b),0)).imag)])-1
if a >= b:
count = 2*(ceil(math.sqrt(abs(a))) - ceil(math.sqrt(abs(b))))
if count > 0:
return count-2
else:
return 0
if b >= a:
# check to make sure we dont remove zero adjustment from zero count
# incorrectly gives a negative count.
count = 2*(ceil(math.sqrt(abs(b))) - ceil(math.sqrt(abs(a))))
if count > 0:
return count-2
else:
return 0
# else return NaN
return NaN
# some preflight checks
assert(find_perfect_square_count(0, 100) == 18)
assert(isnan(find_perfect_square_count('ff', 1.2)))
assert(isnan(find_perfect_square_count('ff', None)))
# lets fully test
import random
number_of_tests = 5
value = 1e4
for (a,b) in zip([random.randint(-value,value) for x in arange(number_of_tests)]
, [random.randint(-value,value) for x in arange(number_of_tests)]):
print '%d\t between \t[%d, %d]'%(find_perfect_square_count(a,b), a, b)
| 0 | 0 | 0 |
4f983a891333bfd8abc2163e6fb14ef469fc9b6e | 1,598 | py | Python | src/beanmachine/ppl/experimental/tests/mala/single_site_metropolis_adjusted_langevin_algorithm_conjugate_test_nightly.py | facebookresearch/beanmachine | 225114d9964b90c3a49adddc4387b4a47d1b4262 | [
"MIT"
] | 177 | 2021-12-12T14:19:05.000Z | 2022-03-24T05:48:10.000Z | src/beanmachine/ppl/experimental/tests/mala/single_site_metropolis_adjusted_langevin_algorithm_conjugate_test_nightly.py | facebookresearch/beanmachine | 225114d9964b90c3a49adddc4387b4a47d1b4262 | [
"MIT"
] | 171 | 2021-12-11T06:12:05.000Z | 2022-03-31T20:26:29.000Z | src/beanmachine/ppl/experimental/tests/mala/single_site_metropolis_adjusted_langevin_algorithm_conjugate_test_nightly.py | facebookresearch/beanmachine | 225114d9964b90c3a49adddc4387b4a47d1b4262 | [
"MIT"
] | 31 | 2021-12-11T06:27:19.000Z | 2022-03-25T13:31:56.000Z | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from beanmachine.ppl.experimental.mala import (
SingleSiteMetropolisAdapatedLangevinAlgorithm,
)
from beanmachine.ppl.testlib.abstract_conjugate import AbstractConjugateTests
| 38.047619 | 88 | 0.770964 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from beanmachine.ppl.experimental.mala import (
SingleSiteMetropolisAdapatedLangevinAlgorithm,
)
from beanmachine.ppl.testlib.abstract_conjugate import AbstractConjugateTests
class SingleSiteMetropolisAdapatedLangevinAlgorithmConjugateTest(
unittest.TestCase, AbstractConjugateTests
):
def test_beta_binomial_conjugate_run(self):
mala = SingleSiteMetropolisAdapatedLangevinAlgorithm(0.05)
self.beta_binomial_conjugate_run(
mala, num_samples=500, num_adaptive_samples=500
)
def test_gamma_gamma_conjugate_run(self):
mala = SingleSiteMetropolisAdapatedLangevinAlgorithm(0.05)
self.gamma_gamma_conjugate_run(mala, num_samples=500, num_adaptive_samples=500)
def test_gamma_normal_conjugate_run(self):
mala = SingleSiteMetropolisAdapatedLangevinAlgorithm(0.05)
self.gamma_normal_conjugate_run(mala, num_samples=500, num_adaptive_samples=500)
def test_normal_normal_conjugate_run(self):
mala = SingleSiteMetropolisAdapatedLangevinAlgorithm(0.05)
self.normal_normal_conjugate_run(
mala, num_samples=500, num_adaptive_samples=500
)
def test_dirichlet_categorical_conjugate_run(self):
mala = SingleSiteMetropolisAdapatedLangevinAlgorithm(0.01)
self.dirichlet_categorical_conjugate_run(
mala, num_samples=700, num_adaptive_samples=700
)
| 971 | 93 | 157 |
74fab2d630a219aeb732e879cee862c7eb741a42 | 256 | py | Python | src/dashboard/__init__.py | onubot/caterbot | 6dfe807292bc39a46c3e3b0bab30e0fc80e7f40c | [
"MIT"
] | null | null | null | src/dashboard/__init__.py | onubot/caterbot | 6dfe807292bc39a46c3e3b0bab30e0fc80e7f40c | [
"MIT"
] | null | null | null | src/dashboard/__init__.py | onubot/caterbot | 6dfe807292bc39a46c3e3b0bab30e0fc80e7f40c | [
"MIT"
] | null | null | null | # Dashboard blueprint
from flask.blueprints import Blueprint
from config import api_version
dash = Blueprint(
"dashboard",
__name__,
url_prefix=f"/api/{api_version}/dashboard",
template_folder="templates/dashboard",
)
from . import views
| 19.692308 | 47 | 0.746094 | # Dashboard blueprint
from flask.blueprints import Blueprint
from config import api_version
dash = Blueprint(
"dashboard",
__name__,
url_prefix=f"/api/{api_version}/dashboard",
template_folder="templates/dashboard",
)
from . import views
| 0 | 0 | 0 |
3f4591585d913e9b03db0e770ca385d819aed200 | 204 | py | Python | setup.py | qianjing2020/twitoff | a223e8f4a3dfecd582c18c92f8ac9212a01d4570 | [
"MIT"
] | null | null | null | setup.py | qianjing2020/twitoff | a223e8f4a3dfecd582c18c92f8ac9212a01d4570 | [
"MIT"
] | null | null | null | setup.py | qianjing2020/twitoff | a223e8f4a3dfecd582c18c92f8ac9212a01d4570 | [
"MIT"
] | null | null | null | from setuptools import _install_setup_requires, setup
setup(
name="twitoff_app",
packages=['twitoff_app'],
include_package_data=True,
install_requires=[
'flask',
],
) | 20.4 | 53 | 0.647059 | from setuptools import _install_setup_requires, setup
setup(
name="twitoff_app",
packages=['twitoff_app'],
include_package_data=True,
install_requires=[
'flask',
],
) | 0 | 0 | 0 |
cb41ee7d3e036aa5ad0ab1e0ff5773c0ce2b3d49 | 387 | py | Python | svn-multilog.py | jonathancone/svn-utils | 1d9715db3299d26cca802de63719abec34f478f5 | [
"Apache-2.0"
] | 25 | 2016-03-07T13:45:33.000Z | 2022-01-31T03:49:45.000Z | svn-multilog.py | jonathancone/svn-utils | 1d9715db3299d26cca802de63719abec34f478f5 | [
"Apache-2.0"
] | 1 | 2017-08-14T08:25:03.000Z | 2017-08-14T08:25:03.000Z | svn-multilog.py | jonathancone/svn-utils | 1d9715db3299d26cca802de63719abec34f478f5 | [
"Apache-2.0"
] | 13 | 2017-09-29T15:38:36.000Z | 2022-03-21T19:13:03.000Z | from subprocess import call
import sys
projects_file = sys.argv[1]
svn_path = sys.argv[2]
oldest_rev = sys.argv[3]
with open(projects_file) as f:
repo_paths = f.readlines()
print [svn_path + line[:-1] for line in repo_paths]
paths = [svn_path + line[:-1] for line in repo_paths]
for path in paths:
call(['svn', 'log', path, '-v', '--stop-on-copy', '-r', 'HEAD:' + oldest_rev]) | 24.1875 | 79 | 0.671835 | from subprocess import call
import sys
projects_file = sys.argv[1]
svn_path = sys.argv[2]
oldest_rev = sys.argv[3]
with open(projects_file) as f:
repo_paths = f.readlines()
print [svn_path + line[:-1] for line in repo_paths]
paths = [svn_path + line[:-1] for line in repo_paths]
for path in paths:
call(['svn', 'log', path, '-v', '--stop-on-copy', '-r', 'HEAD:' + oldest_rev]) | 0 | 0 | 0 |
3ba0d2c32c0376fb690dd41868b71959bbf7546b | 399 | py | Python | FUNDASTORE/APPS/PRINCIPAL/migrations/0004_auto_20210212_1545.py | rm-claudio22/VENV00-FUNDASTORE | 6764cf40fb44b5306e8d89f667fba87303824101 | [
"CC0-1.0"
] | null | null | null | FUNDASTORE/APPS/PRINCIPAL/migrations/0004_auto_20210212_1545.py | rm-claudio22/VENV00-FUNDASTORE | 6764cf40fb44b5306e8d89f667fba87303824101 | [
"CC0-1.0"
] | null | null | null | FUNDASTORE/APPS/PRINCIPAL/migrations/0004_auto_20210212_1545.py | rm-claudio22/VENV00-FUNDASTORE | 6764cf40fb44b5306e8d89f667fba87303824101 | [
"CC0-1.0"
] | null | null | null | # Generated by Django 3.1.5 on 2021-02-12 20:45
from django.db import migrations, models
| 21 | 54 | 0.611529 | # Generated by Django 3.1.5 on 2021-02-12 20:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('PRINCIPAL', '0003_blog_blg_descripcion'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='blg_fecha_act',
field=models.DateTimeField(auto_now=True),
),
]
| 0 | 285 | 23 |
226547bc8a9448ea4c0ccefbeac4eeb16bdbdebc | 535 | py | Python | gdksite/base/migrations/0032_auto_20191027_0009.py | vgrivtsov/moygdk | 74d63299c7326bcae92a17b61d978ad91f0d5552 | [
"MIT"
] | null | null | null | gdksite/base/migrations/0032_auto_20191027_0009.py | vgrivtsov/moygdk | 74d63299c7326bcae92a17b61d978ad91f0d5552 | [
"MIT"
] | 17 | 2020-02-11T23:35:05.000Z | 2022-03-11T23:39:52.000Z | gdksite/base/migrations/0032_auto_20191027_0009.py | vgrivtsov/moygdk | 74d63299c7326bcae92a17b61d978ad91f0d5552 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.6 on 2019-10-26 14:09
from django.db import migrations, models
| 28.157895 | 191 | 0.635514 | # Generated by Django 2.2.6 on 2019-10-26 14:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0031_auto_20191027_0008'),
]
operations = [
migrations.AlterField(
model_name='collectivepage',
name='club_paid',
field=models.CharField(choices=[('paid_yes', 'Платные'), ('paid_no', 'Бесплатные')], default='paid_no', help_text='Платный кружок', max_length=255, verbose_name='Платный кружок'),
),
]
| 0 | 464 | 23 |
5730081bea00d705475d9337e15311d3a32ef259 | 2,285 | py | Python | examples/voice_record.py | LonelyGuy12/disnake-ext-audiorec | 81a52db6d54d36635e01a674340e94c55a408261 | [
"MIT"
] | null | null | null | examples/voice_record.py | LonelyGuy12/disnake-ext-audiorec | 81a52db6d54d36635e01a674340e94c55a408261 | [
"MIT"
] | null | null | null | examples/voice_record.py | LonelyGuy12/disnake-ext-audiorec | 81a52db6d54d36635e01a674340e94c55a408261 | [
"MIT"
] | null | null | null | import asyncio
import os
import io
import disnake
import logging
from disnake.ext import commands
from disnake.ext.audiorec import NativeVoiceClient
logging.basicConfig(level=logging.INFO)
bot = commands.Bot(command_prefix=commands.when_mentioned_or("+"),
description='Relatively simple recording bot example')
@bot.event
bot.add_cog(Recorder(bot))
bot.run(os.environ['TOKEN'])
| 30.466667 | 93 | 0.633698 | import asyncio
import os
import io
import disnake
import logging
from disnake.ext import commands
from disnake.ext.audiorec import NativeVoiceClient
logging.basicConfig(level=logging.INFO)
class Recorder(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command()
async def join(self, ctx: commands.Context):
"""Joins a voice channel"""
channel: disnake.VoiceChannel = ctx.author.voice.channel # type: ignore
if ctx.voice_client is not None:
return await ctx.voice_client.move_to(channel)
await channel.connect(cls=NativeVoiceClient)
@commands.command()
async def rec(self, ctx: commands.Context):
"""Start recording"""
ctx.voice_client.record(lambda e: print(f"Exception: {e}"))
await ctx.send(f'Start Recording')
await asyncio.sleep(30)
await ctx.invoke(self.bot.get_command('stop'))
@commands.command()
async def stop(self, ctx: commands.Context):
"""Stops and disconnects the bot from voice"""
if not ctx.voice_client.is_recording():
return
await ctx.send(f'Stop Recording')
wav_bytes = await ctx.voice_client.stop_record()
wav_file = disnake.File(io.BytesIO(wav_bytes), filename="Recorded.wav")
await ctx.send(file=wav_file)
@rec.before_invoke
async def ensure_voice(self, ctx: commands.Context):
if ctx.voice_client is None:
if ctx.author.voice: # type: ignore
await ctx.author.voice.channel.connect(cls=NativeVoiceClient) # type: ignore
else:
await ctx.send("You are not connected to a voice channel.")
raise commands.CommandError("Author not connected to a voice channel.")
elif ctx.voice_client.is_playing():
ctx.voice_client.stop()
bot = commands.Bot(command_prefix=commands.when_mentioned_or("+"),
description='Relatively simple recording bot example')
@bot.event
async def on_ready():
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------')
bot.add_cog(Recorder(bot))
bot.run(os.environ['TOKEN'])
| 618 | 1,198 | 47 |
dcd53b89abf8828ecc57c2431b9d8699bf94cd04 | 3,075 | py | Python | utility/functions.py | mfschmidt/PyGEST | 3d5e9f5f29ad3d51d3786ea8c39ac89ae792db3b | [
"MIT"
] | null | null | null | utility/functions.py | mfschmidt/PyGEST | 3d5e9f5f29ad3d51d3786ea8c39ac89ae792db3b | [
"MIT"
] | 1 | 2020-09-20T03:20:14.000Z | 2020-09-20T03:20:14.000Z | utility/functions.py | mfschmidt/PyGEST | 3d5e9f5f29ad3d51d3786ea8c39ac89ae792db3b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
The following functions are a miscellaneous collection of utility functions
that can be useful in making code shorter and easier to read.
"""
def shortened_hash(s, n):
""" Return a shortened string with the first and last bits of a hash
:param s: the full string to shorten
:param n: the desired length of the string returned
:return: An n-character string with the first and last bits of s
"""
side_len = int((n - 3) / 2)
if len(s) <= n:
return s
else:
return s[0:side_len] + "..." + s[(-1 * side_len):]
def hash_file(filename, block_size=32768):
""" Return a 256-bit (64 character) sha256 hash of filename
:param filename: the full path of the file to hash
:param block_size: for large files, how big of a chunk should we read at a time
:return: A 64-character string representing the 256-bit sha256 hash
"""
import hashlib
try:
with open(filename, 'rb') as f:
hasher = hashlib.sha256()
while True:
buf = f.read(block_size)
if not buf:
break
hasher.update(buf)
except IOError:
return None
return hasher.hexdigest()
def tree(base_dir, padding=' ', print_files=True, is_last=False, is_first=False):
""" Return a list of strings that can be combined to form ASCII-art-style
directory listing
:param base_dir: the path to explore
:param padding: a string to prepend to each line
:param print_files: True to print directories and files, False for just directories
:param is_last: only used recursively
:param is_first: only used recursively
"""
import os
out_lines = []
if is_first:
out_lines.append(base_dir)
else:
if is_last:
out_lines.append(padding[:-2] + '└─ ' + os.path.basename(os.path.abspath(base_dir)))
else:
out_lines.append(padding[:-2] + '├─ ' + os.path.basename(os.path.abspath(base_dir)))
if print_files:
files = os.listdir(base_dir)
else:
files = [x for x in os.listdir(base_dir) if os.path.isdir(base_dir + os.sep + x)]
if not is_first:
padding = padding + ' '
files = sorted(files, key=lambda s: s.lower())
count = 0
last = len(files) - 1
for i, file in enumerate(files):
count += 1
path = base_dir + os.sep + file
is_last = i == last
if os.path.isdir(path):
if count == len(files):
if is_first:
out_lines = out_lines + tree(path, padding + ' ', print_files, is_last, False)
else:
out_lines = out_lines + tree(path, padding + ' ', print_files, is_last, False)
else:
out_lines = out_lines + tree(path, padding + '│ ', print_files, is_last, False)
else:
if is_last:
out_lines.append(padding + '└─ ' + file)
else:
out_lines.append(padding + '├─ ' + file)
return out_lines
| 34.943182 | 99 | 0.59252 | #!/usr/bin/env python3
"""
The following functions are a miscellaneous collection of utility functions
that can be useful in making code shorter and easier to read.
"""
def shortened_hash(s, n):
""" Return a shortened string with the first and last bits of a hash
:param s: the full string to shorten
:param n: the desired length of the string returned
:return: An n-character string with the first and last bits of s
"""
side_len = int((n - 3) / 2)
if len(s) <= n:
return s
else:
return s[0:side_len] + "..." + s[(-1 * side_len):]
def hash_file(filename, block_size=32768):
""" Return a 256-bit (64 character) sha256 hash of filename
:param filename: the full path of the file to hash
:param block_size: for large files, how big of a chunk should we read at a time
:return: A 64-character string representing the 256-bit sha256 hash
"""
import hashlib
try:
with open(filename, 'rb') as f:
hasher = hashlib.sha256()
while True:
buf = f.read(block_size)
if not buf:
break
hasher.update(buf)
except IOError:
return None
return hasher.hexdigest()
def tree(base_dir, padding=' ', print_files=True, is_last=False, is_first=False):
""" Return a list of strings that can be combined to form ASCII-art-style
directory listing
:param base_dir: the path to explore
:param padding: a string to prepend to each line
:param print_files: True to print directories and files, False for just directories
:param is_last: only used recursively
:param is_first: only used recursively
"""
import os
out_lines = []
if is_first:
out_lines.append(base_dir)
else:
if is_last:
out_lines.append(padding[:-2] + '└─ ' + os.path.basename(os.path.abspath(base_dir)))
else:
out_lines.append(padding[:-2] + '├─ ' + os.path.basename(os.path.abspath(base_dir)))
if print_files:
files = os.listdir(base_dir)
else:
files = [x for x in os.listdir(base_dir) if os.path.isdir(base_dir + os.sep + x)]
if not is_first:
padding = padding + ' '
files = sorted(files, key=lambda s: s.lower())
count = 0
last = len(files) - 1
for i, file in enumerate(files):
count += 1
path = base_dir + os.sep + file
is_last = i == last
if os.path.isdir(path):
if count == len(files):
if is_first:
out_lines = out_lines + tree(path, padding + ' ', print_files, is_last, False)
else:
out_lines = out_lines + tree(path, padding + ' ', print_files, is_last, False)
else:
out_lines = out_lines + tree(path, padding + '│ ', print_files, is_last, False)
else:
if is_last:
out_lines.append(padding + '└─ ' + file)
else:
out_lines.append(padding + '├─ ' + file)
return out_lines
| 0 | 0 | 0 |
32c7d714838071b24865cbd45577385baa9961ff | 316 | py | Python | Chapter01/05_triple_quotes.py | add54/ADMIN_SYS_PYTHON | 5a6d9705537c8663c8f7b0f45d29ccc87b6096e7 | [
"MIT"
] | 116 | 2018-12-21T01:05:47.000Z | 2022-03-23T21:41:41.000Z | Chapter01/05_triple_quotes.py | add54/ADMIN_SYS_PYTHON | 5a6d9705537c8663c8f7b0f45d29ccc87b6096e7 | [
"MIT"
] | 2 | 2021-03-31T19:36:19.000Z | 2021-06-10T22:29:26.000Z | Chapter01/05_triple_quotes.py | add54/ADMIN_SYS_PYTHON | 5a6d9705537c8663c8f7b0f45d29ccc87b6096e7 | [
"MIT"
] | 147 | 2018-12-19T14:10:32.000Z | 2022-03-20T11:03:20.000Z | #!/usr/bin/python3
para_str = """ Python is a scripting language which was created by
Guido van Rossum in 1991, \t which is used in various sectors such as \n Game Development, GIS Programming, Software Development, web development,
Data Analytics and Machine learning, System Scripting etc.
"""
print (para_str)
| 35.111111 | 147 | 0.768987 | #!/usr/bin/python3
para_str = """ Python is a scripting language which was created by
Guido van Rossum in 1991, \t which is used in various sectors such as \n Game Development, GIS Programming, Software Development, web development,
Data Analytics and Machine learning, System Scripting etc.
"""
print (para_str)
| 0 | 0 | 0 |
c4adcf6b8e6adab89e31fe3c85f893680eacd1b0 | 4,216 | py | Python | I18nLibrary/__init__.py | qahive/robotframework-i18n | 7f0e86ff14dcc7ba7030380c34dff82a95a8ec78 | [
"Apache-2.0"
] | 3 | 2020-01-16T00:52:48.000Z | 2021-11-15T04:55:02.000Z | I18nLibrary/__init__.py | qahive/robotframework-i18n | 7f0e86ff14dcc7ba7030380c34dff82a95a8ec78 | [
"Apache-2.0"
] | 1 | 2020-06-10T04:46:24.000Z | 2020-06-15T11:16:09.000Z | I18nLibrary/__init__.py | qahive/robotframework-i18n | 7f0e86ff14dcc7ba7030380c34dff82a95a8ec78 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Atthaboon S.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import i18n
from robot.libraries.BuiltIn import BuiltIn
__version__ = '0.1.4'
class I18nLibrary:
"""
I18nLibrary translator library for support in robotframework
"""
def load_path_append(self, append_path):
"""
Auto load language from specific path
:param append_path:
:return:
"""
i18n.load_path.append(append_path)
# Load lang files to memory
for lang in i18n.config.get('pre_load_langs'):
self._load_directory(append_path, lang)
subfolders = self._get_list_of_sub_folders(append_path)
for folder_path in subfolders:
self._load_directory(folder_path, lang)
| 36.66087 | 95 | 0.650617 | # Copyright 2020 Atthaboon S.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import i18n
from robot.libraries.BuiltIn import BuiltIn
__version__ = '0.1.4'
class I18nLibrary:
"""
I18nLibrary translator library for support in robotframework
"""
def __init__(self):
i18n.config.set('enable_memoization', True)
i18n.config.set('pre_load_langs', [])
i18n.set('prefer', '')
i18n.set('is_prefer', False)
i18n.resource_loader.init_yaml_loader()
def load_path_append(self, append_path):
"""
Auto load language from specific path
:param append_path:
:return:
"""
i18n.load_path.append(append_path)
# Load lang files to memory
for lang in i18n.config.get('pre_load_langs'):
self._load_directory(append_path, lang)
subfolders = self._get_list_of_sub_folders(append_path)
for folder_path in subfolders:
self._load_directory(folder_path, lang)
def _load_directory(self, directory, locale):
for f in os.listdir(directory):
path = os.path.join(directory, f)
if os.path.isfile(path) and path.endswith(i18n.config.get('file_format')):
if '{locale}' in i18n.config.get('filename_format') and not locale+'.yml' in f:
continue
i18n.resource_loader.load_translation_file(f, directory, locale)
def _get_list_of_sub_folders(self, dirName):
listOfFile = os.listdir(dirName)
allFiles = list()
# Iterate over all the entries
for entry in listOfFile:
# Create full path
fullPath = os.path.join(dirName, entry)
# If entry is a directory then get the list of files in this directory
if os.path.isdir(fullPath):
allFiles.append(fullPath)
allFiles = allFiles + self._get_list_of_sub_folders(fullPath)
return allFiles
def set_pre_load_language(self, language):
langs = i18n.config.get('pre_load_langs')
langs.append(language)
i18n.config.set('pre_load_langs', langs)
def set_locale_language(self, language):
i18n.set('locale', language)
def set_fallback_language(self, language):
i18n.set('fallback', language)
def set_prefer_language(self, language):
i18n.set('prefer', language)
i18n.set('is_prefer', True)
def translate_message(self, message):
return i18n.t(message)
def translate_message_for_specific_language(self, message, language):
return i18n.t(message, locale=language)
def translate_message_with_prefer_language(self, message, second_fallback):
return i18n.t(message, default=i18n.t(message, locale=second_fallback))
def generate_suite_variables(self):
robot_buildIn = BuiltIn()
is_prefer = i18n.get('is_prefer')
prefer_lang = i18n.get('prefer')
keys = self._get_all_unique_keys()
for key in keys:
robot_buildIn.log('Generate variable for '+ key)
value = ''
if is_prefer:
value = self.translate_message_with_prefer_language(key, prefer_lang)
else:
value = self.translate_message(key)
robot_buildIn.set_suite_variable('${'+key+'}', ""+value)
def _get_all_unique_keys(self):
key_list = []
container = i18n.translations.container
for lang in container.keys():
lang_dicts = container.get(lang)
key_list += list(lang_dicts.keys())
key_set = set(key_list)
key_unique_list = list(key_set)
return key_unique_list
| 2,612 | 0 | 323 |
90b5b1db45e1f95fb16ea33e2e6c4e344dfe41b6 | 5,548 | py | Python | vmprof/__init__.py | kayhayen/vmprof-python | 66c46aa7bb10b3c580b4e71f9a32469a175b5263 | [
"MIT"
] | 430 | 2015-01-31T13:41:07.000Z | 2022-01-24T02:04:23.000Z | vmprof/__init__.py | kayhayen/vmprof-python | 66c46aa7bb10b3c580b4e71f9a32469a175b5263 | [
"MIT"
] | 202 | 2015-02-06T19:01:38.000Z | 2022-03-22T15:15:20.000Z | vmprof/__init__.py | kayhayen/vmprof-python | 66c46aa7bb10b3c580b4e71f9a32469a175b5263 | [
"MIT"
] | 59 | 2015-02-08T16:06:28.000Z | 2022-01-11T00:12:37.000Z | import os
import sys
try:
from shutil import which
except ImportError:
from backports.shutil_which import which
import _vmprof
from vmprof import cli
from vmprof.reader import (MARKER_NATIVE_SYMBOLS, FdWrapper,
LogReaderState, LogReaderDumpNative)
from vmprof.stats import Stats
from vmprof.profiler import Profiler, read_profile
PY3 = sys.version_info[0] >= 3
IS_PYPY = '__pypy__' in sys.builtin_module_names
# it's not a good idea to use a "round" default sampling period, else we risk
# to oversample periodic tasks which happens to run at e.g. 100Hz or 1000Hz:
# http://www.solarisinternals.com/wiki/index.php/DTrace_Topics_Hints_Tips#profile-1001.2C_profile-997.3F
#
# To avoid the problem, we use a period which is "almost" but not exactly
# 1000Hz
DEFAULT_PERIOD = 0.00099
if IS_PYPY:
else:
# CPYTHON
def sample_stack_now(skip=0):
""" Helper utility mostly for tests, this is considered
private API.
It will return a list of stack frames the python program currently
walked.
"""
stackframes = _vmprof.sample_stack_now(skip)
assert isinstance(stackframes, list)
return stackframes
def resolve_addr(addr):
""" Private API, returns the symbol name of the given address.
Only considers linking symbols found by dladdr.
"""
return _vmprof.resolve_addr(addr)
def insert_real_time_thread(thread_id=0):
""" Inserts a thread into the list of threads to be sampled in real time mode.
When enabling real time mode, the caller thread is inserted automatically.
Returns the number of registered threads, or -1 if we can't insert thread.
Inserts the current thread if thread_id is not provided.
"""
return _vmprof.insert_real_time_thread(thread_id)
def remove_real_time_thread(thread_id=0):
""" Removes a thread from the list of threads to be sampled in real time mode.
When disabling in real time mode, *all* threads are removed automatically.
Returns the number of registered threads, or -1 if we can't remove thread.
Removes the current thread if thread_id is not provided.
"""
return _vmprof.remove_real_time_thread(thread_id)
def is_enabled():
""" Indicates if vmprof has already been enabled for this process.
Returns True or False. None is returned if the state is unknown.
"""
if hasattr(_vmprof, 'is_enabled'):
return _vmprof.is_enabled()
raise NotImplementedError("is_enabled is not implemented on this platform")
def get_profile_path():
""" Returns the absolute path for the file that is currently open.
None is returned if the backend implementation does not implement that function,
or profiling is not enabled.
"""
if hasattr(_vmprof, 'get_profile_path'):
return _vmprof.get_profile_path()
raise NotImplementedError("get_profile_path not implemented on this platform")
| 40.202899 | 114 | 0.671053 | import os
import sys
try:
from shutil import which
except ImportError:
from backports.shutil_which import which
import _vmprof
from vmprof import cli
from vmprof.reader import (MARKER_NATIVE_SYMBOLS, FdWrapper,
LogReaderState, LogReaderDumpNative)
from vmprof.stats import Stats
from vmprof.profiler import Profiler, read_profile
PY3 = sys.version_info[0] >= 3
IS_PYPY = '__pypy__' in sys.builtin_module_names
# it's not a good idea to use a "round" default sampling period, else we risk
# to oversample periodic tasks which happens to run at e.g. 100Hz or 1000Hz:
# http://www.solarisinternals.com/wiki/index.php/DTrace_Topics_Hints_Tips#profile-1001.2C_profile-997.3F
#
# To avoid the problem, we use a period which is "almost" but not exactly
# 1000Hz
DEFAULT_PERIOD = 0.00099
def disable():
try:
# fish the file descriptor that is still open!
if hasattr(_vmprof, 'stop_sampling'):
fileno = _vmprof.stop_sampling()
if fileno >= 0:
# TODO does fileobj leak the fd? I dont think so, but need to check
fileobj = FdWrapper(fileno)
l = LogReaderDumpNative(fileobj, LogReaderState())
l.read_all()
if hasattr(_vmprof, 'write_all_code_objects'):
_vmprof.write_all_code_objects(l.dedup)
_vmprof.disable()
except IOError as e:
raise Exception("Error while writing profile: " + str(e))
def _is_native_enabled(native):
if os.name == "nt":
if native:
raise ValueError("native profiling is only supported on Linux & Mac OS X")
native = False
else:
if native is None:
native = True
return native
if IS_PYPY:
def enable(fileno, period=DEFAULT_PERIOD, memory=False, lines=False, native=None, real_time=False, warn=True):
pypy_version_info = sys.pypy_version_info[:3]
MAJOR = pypy_version_info[0]
MINOR = pypy_version_info[1]
PATCH = pypy_version_info[2]
if not isinstance(period, float):
raise ValueError("period must be a float, not %s" % type(period))
if warn and pypy_version_info < (4, 1, 0):
raise Exception("PyPy <4.1 have various kinds of bugs, pass warn=False if you know what you're doing")
if warn and memory:
print("Memory profiling is currently unsupported for PyPy. Running without memory statistics.")
if warn and lines:
print('Line profiling is currently unsupported for PyPy. Running without lines statistics.\n')
native = _is_native_enabled(native)
#
if (MAJOR, MINOR, PATCH) >= (5, 9, 0):
_vmprof.enable(fileno, period, memory, lines, native, real_time)
return
if real_time:
raise ValueError('real_time=True requires PyPy >= 5.9')
if MAJOR >= 5 and MINOR >= 8 and PATCH >= 0:
_vmprof.enable(fileno, period, memory, lines, native)
return
_vmprof.enable(fileno, period)
else:
# CPYTHON
def enable(fileno, period=DEFAULT_PERIOD, memory=False, lines=False, native=None, real_time=False):
if not isinstance(period, float):
raise ValueError("period must be a float, not %s" % type(period))
native = _is_native_enabled(native)
_vmprof.enable(fileno, period, memory, lines, native, real_time)
def sample_stack_now(skip=0):
""" Helper utility mostly for tests, this is considered
private API.
It will return a list of stack frames the python program currently
walked.
"""
stackframes = _vmprof.sample_stack_now(skip)
assert isinstance(stackframes, list)
return stackframes
def resolve_addr(addr):
""" Private API, returns the symbol name of the given address.
Only considers linking symbols found by dladdr.
"""
return _vmprof.resolve_addr(addr)
def insert_real_time_thread(thread_id=0):
""" Inserts a thread into the list of threads to be sampled in real time mode.
When enabling real time mode, the caller thread is inserted automatically.
Returns the number of registered threads, or -1 if we can't insert thread.
Inserts the current thread if thread_id is not provided.
"""
return _vmprof.insert_real_time_thread(thread_id)
def remove_real_time_thread(thread_id=0):
""" Removes a thread from the list of threads to be sampled in real time mode.
When disabling in real time mode, *all* threads are removed automatically.
Returns the number of registered threads, or -1 if we can't remove thread.
Removes the current thread if thread_id is not provided.
"""
return _vmprof.remove_real_time_thread(thread_id)
def is_enabled():
""" Indicates if vmprof has already been enabled for this process.
Returns True or False. None is returned if the state is unknown.
"""
if hasattr(_vmprof, 'is_enabled'):
return _vmprof.is_enabled()
raise NotImplementedError("is_enabled is not implemented on this platform")
def get_profile_path():
""" Returns the absolute path for the file that is currently open.
None is returned if the backend implementation does not implement that function,
or profiling is not enabled.
"""
if hasattr(_vmprof, 'get_profile_path'):
return _vmprof.get_profile_path()
raise NotImplementedError("get_profile_path not implemented on this platform")
| 2,474 | 0 | 98 |
4a6c2654322f5a24ecf864246ad64a6fb7032e8a | 8,178 | py | Python | lte/protos/ha_orc8r_pb2.py | aweimeow/enodebd | e1cd20693153e6b85e5d1bf9d21af2501c358601 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | lte/protos/ha_orc8r_pb2.py | aweimeow/enodebd | e1cd20693153e6b85e5d1bf9d21af2501c358601 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | lte/protos/ha_orc8r_pb2.py | aweimeow/enodebd | e1cd20693153e6b85e5d1bf9d21af2501c358601 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: lte/protos/ha_orc8r.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='lte/protos/ha_orc8r.proto',
package='magma.lte',
syntax='proto3',
serialized_options=b'Z\031magma/lte/cloud/go/protos',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x19lte/protos/ha_orc8r.proto\x12\tmagma.lte\"\x1e\n\x1cGetEnodebOffloadStateRequest\"\xd9\x02\n\x1dGetEnodebOffloadStateResponse\x12`\n\x15\x65nodeb_offload_states\x18\x01 \x03(\x0b\x32\x41.magma.lte.GetEnodebOffloadStateResponse.EnodebOffloadStatesEntry\x1aw\n\x18\x45nodebOffloadStatesEntry\x12\x0b\n\x03key\x18\x01 \x01(\r\x12J\n\x05value\x18\x02 \x01(\x0e\x32;.magma.lte.GetEnodebOffloadStateResponse.EnodebOffloadState:\x02\x38\x01\"]\n\x12\x45nodebOffloadState\x12\t\n\x05NO_OP\x10\x00\x12\x15\n\x11PRIMARY_CONNECTED\x10\x01\x12%\n!PRIMARY_CONNECTED_AND_SERVING_UES\x10\x02\x32r\n\x02Ha\x12l\n\x15GetEnodebOffloadState\x12\'.magma.lte.GetEnodebOffloadStateRequest\x1a(.magma.lte.GetEnodebOffloadStateResponse\"\x00\x42\x1bZ\x19magma/lte/cloud/go/protosb\x06proto3'
)
_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATE = _descriptor.EnumDescriptor(
name='EnodebOffloadState',
full_name='magma.lte.GetEnodebOffloadStateResponse.EnodebOffloadState',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='NO_OP', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PRIMARY_CONNECTED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PRIMARY_CONNECTED_AND_SERVING_UES', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=325,
serialized_end=418,
)
_sym_db.RegisterEnumDescriptor(_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATE)
_GETENODEBOFFLOADSTATEREQUEST = _descriptor.Descriptor(
name='GetEnodebOffloadStateRequest',
full_name='magma.lte.GetEnodebOffloadStateRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=40,
serialized_end=70,
)
_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATESENTRY = _descriptor.Descriptor(
name='EnodebOffloadStatesEntry',
full_name='magma.lte.GetEnodebOffloadStateResponse.EnodebOffloadStatesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='magma.lte.GetEnodebOffloadStateResponse.EnodebOffloadStatesEntry.key', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='magma.lte.GetEnodebOffloadStateResponse.EnodebOffloadStatesEntry.value', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=204,
serialized_end=323,
)
_GETENODEBOFFLOADSTATERESPONSE = _descriptor.Descriptor(
name='GetEnodebOffloadStateResponse',
full_name='magma.lte.GetEnodebOffloadStateResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='enodeb_offload_states', full_name='magma.lte.GetEnodebOffloadStateResponse.enodeb_offload_states', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATESENTRY, ],
enum_types=[
_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=73,
serialized_end=418,
)
_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATESENTRY.fields_by_name['value'].enum_type = _GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATE
_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATESENTRY.containing_type = _GETENODEBOFFLOADSTATERESPONSE
_GETENODEBOFFLOADSTATERESPONSE.fields_by_name['enodeb_offload_states'].message_type = _GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATESENTRY
_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATE.containing_type = _GETENODEBOFFLOADSTATERESPONSE
DESCRIPTOR.message_types_by_name['GetEnodebOffloadStateRequest'] = _GETENODEBOFFLOADSTATEREQUEST
DESCRIPTOR.message_types_by_name['GetEnodebOffloadStateResponse'] = _GETENODEBOFFLOADSTATERESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetEnodebOffloadStateRequest = _reflection.GeneratedProtocolMessageType('GetEnodebOffloadStateRequest', (_message.Message,), {
'DESCRIPTOR' : _GETENODEBOFFLOADSTATEREQUEST,
'__module__' : 'lte.protos.ha_orc8r_pb2'
# @@protoc_insertion_point(class_scope:magma.lte.GetEnodebOffloadStateRequest)
})
_sym_db.RegisterMessage(GetEnodebOffloadStateRequest)
GetEnodebOffloadStateResponse = _reflection.GeneratedProtocolMessageType('GetEnodebOffloadStateResponse', (_message.Message,), {
'EnodebOffloadStatesEntry' : _reflection.GeneratedProtocolMessageType('EnodebOffloadStatesEntry', (_message.Message,), {
'DESCRIPTOR' : _GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATESENTRY,
'__module__' : 'lte.protos.ha_orc8r_pb2'
# @@protoc_insertion_point(class_scope:magma.lte.GetEnodebOffloadStateResponse.EnodebOffloadStatesEntry)
})
,
'DESCRIPTOR' : _GETENODEBOFFLOADSTATERESPONSE,
'__module__' : 'lte.protos.ha_orc8r_pb2'
# @@protoc_insertion_point(class_scope:magma.lte.GetEnodebOffloadStateResponse)
})
_sym_db.RegisterMessage(GetEnodebOffloadStateResponse)
_sym_db.RegisterMessage(GetEnodebOffloadStateResponse.EnodebOffloadStatesEntry)
DESCRIPTOR._options = None
_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATESENTRY._options = None
_HA = _descriptor.ServiceDescriptor(
name='Ha',
full_name='magma.lte.Ha',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=420,
serialized_end=534,
methods=[
_descriptor.MethodDescriptor(
name='GetEnodebOffloadState',
full_name='magma.lte.Ha.GetEnodebOffloadState',
index=0,
containing_service=None,
input_type=_GETENODEBOFFLOADSTATEREQUEST,
output_type=_GETENODEBOFFLOADSTATERESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_HA)
DESCRIPTOR.services_by_name['Ha'] = _HA
# @@protoc_insertion_point(module_scope)
| 38.394366 | 791 | 0.79934 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: lte/protos/ha_orc8r.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='lte/protos/ha_orc8r.proto',
package='magma.lte',
syntax='proto3',
serialized_options=b'Z\031magma/lte/cloud/go/protos',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x19lte/protos/ha_orc8r.proto\x12\tmagma.lte\"\x1e\n\x1cGetEnodebOffloadStateRequest\"\xd9\x02\n\x1dGetEnodebOffloadStateResponse\x12`\n\x15\x65nodeb_offload_states\x18\x01 \x03(\x0b\x32\x41.magma.lte.GetEnodebOffloadStateResponse.EnodebOffloadStatesEntry\x1aw\n\x18\x45nodebOffloadStatesEntry\x12\x0b\n\x03key\x18\x01 \x01(\r\x12J\n\x05value\x18\x02 \x01(\x0e\x32;.magma.lte.GetEnodebOffloadStateResponse.EnodebOffloadState:\x02\x38\x01\"]\n\x12\x45nodebOffloadState\x12\t\n\x05NO_OP\x10\x00\x12\x15\n\x11PRIMARY_CONNECTED\x10\x01\x12%\n!PRIMARY_CONNECTED_AND_SERVING_UES\x10\x02\x32r\n\x02Ha\x12l\n\x15GetEnodebOffloadState\x12\'.magma.lte.GetEnodebOffloadStateRequest\x1a(.magma.lte.GetEnodebOffloadStateResponse\"\x00\x42\x1bZ\x19magma/lte/cloud/go/protosb\x06proto3'
)
_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATE = _descriptor.EnumDescriptor(
name='EnodebOffloadState',
full_name='magma.lte.GetEnodebOffloadStateResponse.EnodebOffloadState',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='NO_OP', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PRIMARY_CONNECTED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PRIMARY_CONNECTED_AND_SERVING_UES', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=325,
serialized_end=418,
)
_sym_db.RegisterEnumDescriptor(_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATE)
_GETENODEBOFFLOADSTATEREQUEST = _descriptor.Descriptor(
name='GetEnodebOffloadStateRequest',
full_name='magma.lte.GetEnodebOffloadStateRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=40,
serialized_end=70,
)
_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATESENTRY = _descriptor.Descriptor(
name='EnodebOffloadStatesEntry',
full_name='magma.lte.GetEnodebOffloadStateResponse.EnodebOffloadStatesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='magma.lte.GetEnodebOffloadStateResponse.EnodebOffloadStatesEntry.key', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='magma.lte.GetEnodebOffloadStateResponse.EnodebOffloadStatesEntry.value', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=204,
serialized_end=323,
)
_GETENODEBOFFLOADSTATERESPONSE = _descriptor.Descriptor(
name='GetEnodebOffloadStateResponse',
full_name='magma.lte.GetEnodebOffloadStateResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='enodeb_offload_states', full_name='magma.lte.GetEnodebOffloadStateResponse.enodeb_offload_states', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATESENTRY, ],
enum_types=[
_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=73,
serialized_end=418,
)
_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATESENTRY.fields_by_name['value'].enum_type = _GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATE
_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATESENTRY.containing_type = _GETENODEBOFFLOADSTATERESPONSE
_GETENODEBOFFLOADSTATERESPONSE.fields_by_name['enodeb_offload_states'].message_type = _GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATESENTRY
_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATE.containing_type = _GETENODEBOFFLOADSTATERESPONSE
DESCRIPTOR.message_types_by_name['GetEnodebOffloadStateRequest'] = _GETENODEBOFFLOADSTATEREQUEST
DESCRIPTOR.message_types_by_name['GetEnodebOffloadStateResponse'] = _GETENODEBOFFLOADSTATERESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetEnodebOffloadStateRequest = _reflection.GeneratedProtocolMessageType('GetEnodebOffloadStateRequest', (_message.Message,), {
'DESCRIPTOR' : _GETENODEBOFFLOADSTATEREQUEST,
'__module__' : 'lte.protos.ha_orc8r_pb2'
# @@protoc_insertion_point(class_scope:magma.lte.GetEnodebOffloadStateRequest)
})
_sym_db.RegisterMessage(GetEnodebOffloadStateRequest)
GetEnodebOffloadStateResponse = _reflection.GeneratedProtocolMessageType('GetEnodebOffloadStateResponse', (_message.Message,), {
'EnodebOffloadStatesEntry' : _reflection.GeneratedProtocolMessageType('EnodebOffloadStatesEntry', (_message.Message,), {
'DESCRIPTOR' : _GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATESENTRY,
'__module__' : 'lte.protos.ha_orc8r_pb2'
# @@protoc_insertion_point(class_scope:magma.lte.GetEnodebOffloadStateResponse.EnodebOffloadStatesEntry)
})
,
'DESCRIPTOR' : _GETENODEBOFFLOADSTATERESPONSE,
'__module__' : 'lte.protos.ha_orc8r_pb2'
# @@protoc_insertion_point(class_scope:magma.lte.GetEnodebOffloadStateResponse)
})
_sym_db.RegisterMessage(GetEnodebOffloadStateResponse)
_sym_db.RegisterMessage(GetEnodebOffloadStateResponse.EnodebOffloadStatesEntry)
DESCRIPTOR._options = None
_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATESENTRY._options = None
_HA = _descriptor.ServiceDescriptor(
name='Ha',
full_name='magma.lte.Ha',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=420,
serialized_end=534,
methods=[
_descriptor.MethodDescriptor(
name='GetEnodebOffloadState',
full_name='magma.lte.Ha.GetEnodebOffloadState',
index=0,
containing_service=None,
input_type=_GETENODEBOFFLOADSTATEREQUEST,
output_type=_GETENODEBOFFLOADSTATERESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_HA)
DESCRIPTOR.services_by_name['Ha'] = _HA
# @@protoc_insertion_point(module_scope)
| 0 | 0 | 0 |
bb5cbb4070bf18e7e184de9d99bffef9ee62901f | 5,348 | py | Python | xirl/pretrain.py | gunpowder78/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-19T04:26:12.000Z | 2022-03-19T04:26:12.000Z | xirl/pretrain.py | gunpowder78/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | null | null | null | xirl/pretrain.py | gunpowder78/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-30T07:20:29.000Z | 2022-03-30T07:20:29.000Z | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch script for pre-training representations."""
import os.path as osp
from absl import app
from absl import flags
from absl import logging
from base_configs import validate_config
from ml_collections import config_flags
import torch
from torchkit import CheckpointManager
from torchkit import experiment
from torchkit import Logger
from torchkit.utils.py_utils import Stopwatch
from utils import setup_experiment
from xirl import common
# pylint: disable=logging-fstring-interpolation
FLAGS = flags.FLAGS
flags.DEFINE_string("experiment_name", None, "Experiment name.")
flags.DEFINE_boolean("resume", False, "Whether to resume training.")
flags.DEFINE_string("device", "cuda:0", "The compute device.")
flags.DEFINE_boolean("raw_imagenet", False, "")
config_flags.DEFINE_config_file(
"config",
"base_configs/pretrain.py",
"File path to the training hyperparameter configuration.",
)
@experiment.pdb_fallback
if __name__ == "__main__":
flags.mark_flag_as_required("experiment_name")
app.run(main)
| 30.913295 | 78 | 0.672588 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch script for pre-training representations."""
import os.path as osp
from absl import app
from absl import flags
from absl import logging
from base_configs import validate_config
from ml_collections import config_flags
import torch
from torchkit import CheckpointManager
from torchkit import experiment
from torchkit import Logger
from torchkit.utils.py_utils import Stopwatch
from utils import setup_experiment
from xirl import common
# pylint: disable=logging-fstring-interpolation
FLAGS = flags.FLAGS
flags.DEFINE_string("experiment_name", None, "Experiment name.")
flags.DEFINE_boolean("resume", False, "Whether to resume training.")
flags.DEFINE_string("device", "cuda:0", "The compute device.")
flags.DEFINE_boolean("raw_imagenet", False, "")
config_flags.DEFINE_config_file(
"config",
"base_configs/pretrain.py",
"File path to the training hyperparameter configuration.",
)
@experiment.pdb_fallback
def main(_):
# Make sure we have a valid config that inherits all the keys defined in the
# base config.
validate_config(FLAGS.config, mode="pretrain")
config = FLAGS.config
exp_dir = osp.join(config.root_dir, FLAGS.experiment_name)
setup_experiment(exp_dir, config, FLAGS.resume)
# No need to do any pretraining if we're loading the raw pretrained
# ImageNet baseline.
if FLAGS.raw_imagenet:
return
# Setup compute device.
if torch.cuda.is_available():
device = torch.device(FLAGS.device)
else:
logging.info("No GPU device found. Falling back to CPU.")
device = torch.device("cpu")
logging.info("Using device: %s", device)
# Set RNG seeds.
if config.seed is not None:
logging.info("Pretraining experiment seed: %d", config.seed)
experiment.seed_rngs(config.seed)
experiment.set_cudnn(config.cudnn_deterministic, config.cudnn_benchmark)
else:
logging.info("No RNG seed has been set for this pretraining experiment.")
logger = Logger(osp.join(exp_dir, "tb"), FLAGS.resume)
# Load factories.
(
model,
optimizer,
pretrain_loaders,
downstream_loaders,
trainer,
eval_manager,
) = common.get_factories(config, device)
# Create checkpoint manager.
checkpoint_dir = osp.join(exp_dir, "checkpoints")
checkpoint_manager = CheckpointManager(
checkpoint_dir,
model=model,
optimizer=optimizer,
)
global_step = checkpoint_manager.restore_or_initialize()
total_batches = max(1, len(pretrain_loaders["train"]))
epoch = int(global_step / total_batches)
complete = False
stopwatch = Stopwatch()
try:
while not complete:
for batch in pretrain_loaders["train"]:
train_loss = trainer.train_one_iter(batch)
if not global_step % config.logging_frequency:
for k, v in train_loss.items():
logger.log_scalar(v, global_step, k, "pretrain")
logger.flush()
if not global_step % config.eval.eval_frequency:
# Evaluate the model on the pretraining validation dataset.
valid_loss = trainer.eval_num_iters(
pretrain_loaders["valid"],
config.eval.val_iters,
)
for k, v in valid_loss.items():
logger.log_scalar(v, global_step, k, "pretrain")
# Evaluate the model on the downstream datasets.
for split, downstream_loader in downstream_loaders.items():
eval_to_metric = eval_manager.evaluate(
model,
downstream_loader,
device,
config.eval.val_iters,
)
for eval_name, eval_out in eval_to_metric.items():
eval_out.log(
logger,
global_step,
eval_name,
f"downstream/{split}",
)
# Save model checkpoint.
if not global_step % config.checkpointing_frequency:
checkpoint_manager.save(global_step)
# Exit if complete.
global_step += 1
if global_step > config.optim.train_max_iters:
complete = True
break
time_per_iter = stopwatch.elapsed()
logging.info(
"Iter[{}/{}] (Epoch {}), {:.6f}s/iter, Loss: {:.3f}".format(
global_step,
config.optim.train_max_iters,
epoch,
time_per_iter,
train_loss["train/total_loss"].item(),
))
stopwatch.reset()
epoch += 1
except KeyboardInterrupt:
logging.info("Caught keyboard interrupt. Saving model before quitting.")
finally:
checkpoint_manager.save(global_step)
logger.close()
if __name__ == "__main__":
flags.mark_flag_as_required("experiment_name")
app.run(main)
| 3,691 | 0 | 22 |
f519766e303a1b626be87cea373205c6323bff6f | 2,634 | py | Python | pkg/factory/creator/_creator.py | shrohilla/kafka-func-core-tool-test | 110815f74bd035758542526c74ebaa55ce51046f | [
"Apache-2.0"
] | null | null | null | pkg/factory/creator/_creator.py | shrohilla/kafka-func-core-tool-test | 110815f74bd035758542526c74ebaa55ce51046f | [
"Apache-2.0"
] | null | null | null | pkg/factory/creator/_creator.py | shrohilla/kafka-func-core-tool-test | 110815f74bd035758542526c74ebaa55ce51046f | [
"Apache-2.0"
] | null | null | null | from pkg.creator._creator import Creator
from pkg.creator.app_plan._app_plan import FunctionPlanCreator
from pkg.creator.az_resources._az_resources import AzureResourceCreator
from pkg.creator.function_app._function_app import FunctionAppCreator
from pkg.creator.local_function._local_function import LocalFunctionAppCreator
from pkg.creator.storageacnt._storage_account import StorageAccountCreator
from pkg.creator.type._creator_type import CreatorType
from pkg.factory._absfactory import KafkaExtTestAbsFactory
from pkg.enums.language._language import Language
factory_instance = CreatorFactory() | 37.628571 | 78 | 0.684131 | from pkg.creator._creator import Creator
from pkg.creator.app_plan._app_plan import FunctionPlanCreator
from pkg.creator.az_resources._az_resources import AzureResourceCreator
from pkg.creator.function_app._function_app import FunctionAppCreator
from pkg.creator.local_function._local_function import LocalFunctionAppCreator
from pkg.creator.storageacnt._storage_account import StorageAccountCreator
from pkg.creator.type._creator_type import CreatorType
from pkg.factory._absfactory import KafkaExtTestAbsFactory
from pkg.enums.language._language import Language
class CreatorFactory(KafkaExtTestAbsFactory):
instance_map = {}
def retrieve_instance(self, *args):
creator_type, lang_creator_type = self.validate_extract_val(args)
if self.instance_map[lang_creator_type] is not None:
return self.instance_map[lang_creator_type]
if self.instance_map[creator_type] is not None:
return self.instance_map[creator_type]
raise ("creator type is not found")
def validate_extract_val(self, args):
lang_creator_type: Language = None
creator_type: CreatorType = None
try:
lang_creator_type = args[0]
except IndexError:
pass
try:
creator_type = args[1]
except IndexError:
pass
return creator_type, lang_creator_type
def build_factory(self, path: str):
self.build_factory_creator_type()
self.create_factory_language(path)
def create_factory_language(self, path):
pass
# for creator_type in Language:
# creator: Creator = None
# if creator_type == Language.PYTHON:
# creator = PythonFunctionCreator(path, creator_type)
# else:
# break
# self.instance_map[creator_type] = creator
def build_factory_creator_type(self):
for creator_type in CreatorType:
creator: Creator = None
if creator_type == CreatorType.STORAGE_ACCOUNT:
creator = StorageAccountCreator()
elif creator_type == CreatorType.FUNCTION_APP_PLAN:
creator = FunctionPlanCreator()
elif creator_type == CreatorType.AZURE_RESOURCE:
creator = AzureResourceCreator()
elif creator_type == CreatorType.LOCAL_FUNCTION:
creator = LocalFunctionAppCreator()
else:
creator = FunctionAppCreator()
self.instance_map[creator_type] = creator
factory_instance = CreatorFactory()
def get_instance():
return factory_instance | 1,806 | 181 | 46 |
6ceabea02b2033f21ca38076603889cc89287471 | 1,030 | py | Python | transactional_mail/templatetags/email_tags.py | vinaypai/django-transactional-mail | 547785237d5bd6108fef348c89e4ff13c5ccc8c1 | [
"MIT"
] | null | null | null | transactional_mail/templatetags/email_tags.py | vinaypai/django-transactional-mail | 547785237d5bd6108fef348c89e4ff13c5ccc8c1 | [
"MIT"
] | null | null | null | transactional_mail/templatetags/email_tags.py | vinaypai/django-transactional-mail | 547785237d5bd6108fef348c89e4ff13c5ccc8c1 | [
"MIT"
] | null | null | null | """Template tags to make e-mail styling less painful"""
import cssutils
from django import template
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.contrib.staticfiles import finders
from django.utils.safestring import mark_safe
register = template.Library()
_styles = None
@register.simple_tag()
| 27.105263 | 97 | 0.703883 | """Template tags to make e-mail styling less painful"""
import cssutils
from django import template
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.contrib.staticfiles import finders
from django.utils.safestring import mark_safe
register = template.Library()
_styles = None
@register.simple_tag()
def style(names):
global _styles
if _styles is None or settings.DEBUG:
_load_styles()
css = ';'.join(_styles.get(name, '') for name in names.split())
return mark_safe('style="%s"' % css)
def _load_styles():
global _styles
_styles = {}
if settings.EMAIL_STYLESHEET:
fname = finders.find(settings.EMAIL_STYLESHEET)
if not fname:
raise ImproperlyConfigured("Couldn't find stylesheet %s" % settings.EMAIL_STYLESHEET)
sheet = cssutils.parseFile(fname)
for rule in sheet.cssRules:
for selector in rule.selectorList:
_styles[selector.selectorText] = rule.style.cssText
| 629 | 0 | 45 |
7b540622cb95c7a1c0b29a096f777060135be33d | 10,931 | py | Python | cannab/create_submission_speed.py | SpaceNetChallenge/SpaceNet_Optimized_Routing_Solutions | 3fbc215de6b05904a5b54b2c7cde7e61074ae38d | [
"Apache-2.0"
] | 27 | 2020-03-04T05:54:48.000Z | 2022-01-05T07:07:44.000Z | cannab/create_submission_speed.py | CosmiQ/SpaceNet_Optimized_Routing_Solutions | 3fbc215de6b05904a5b54b2c7cde7e61074ae38d | [
"Apache-2.0"
] | 1 | 2020-07-14T10:35:50.000Z | 2020-07-14T10:35:50.000Z | cannab/create_submission_speed.py | SpaceNetChallenge/SpaceNet_Optimized_Routing_Solutions | 3fbc215de6b05904a5b54b2c7cde7e61074ae38d | [
"Apache-2.0"
] | 7 | 2020-03-07T21:42:57.000Z | 2022-01-07T10:49:50.000Z | # -*- coding: utf-8 -*-
import os
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
from os import path, listdir
import numpy as np
np.random.seed(1)
import random
random.seed(1)
import pandas as pd
import timeit
import cv2
from tqdm import tqdm
import sys
sys.setrecursionlimit(10000)
from multiprocessing import Pool
from shapely.geometry.linestring import LineString
# from skimage.morphology import skeletonize_3d, square, erosion, dilation, medial_axis
# from skimage.measure import label, regionprops, approximate_polygon
from math import hypot, sin, cos, asin, acos, radians
from sklearn.neighbors import KDTree
from shapely.wkt import dumps, loads
import scipy
import utm
#pip install utm
import gdal
gdal.UseExceptions()
import osr
import ogr
#conda install gdal
import ntpath
from shapely.geometry import mapping, Point, LineString
# import matplotlib.pyplot as plt
# import seaborn as sns
pred_folders = ['/wdata/test_pred', '/wdata/test_pred_960']
speed_bins = np.array([15, 18.75, 20, 25, 30, 35, 45, 55, 65])
# test_folders = ['/data/SN5_roads/test_public/AOI_7_Moscow', '/data/SN5_roads/test_public/AOI_8_Mumbai', '/data/SN5_roads/test_public/AOI_9_San_Juan']
test_folders = []
for i in range(1, len(sys.argv) - 1):
test_folders.append(sys.argv[i])
df = pd.read_csv(path.join('/wdata', 'solution_length.csv'), header=None)
df.columns = ['ImageId', 'WKT_Pix']
# example GDAL error handler function
gdal.PushErrorHandler(gdal_error_handler)
# from https://github.com/CosmiQ/cresi
def get_linestring_midpoints(geom):
'''Get midpoints of each line segment in the line.
Also return the length of each segment, assuming cartesian coordinates'''
coords = list(geom.coords)
N = len(coords)
x_mids, y_mids, dls = [], [], []
for i in range(N-1):
(x0, y0) = coords[i]
(x1, y1) = coords[i+1]
x_mids.append(np.rint(0.5 * (x0 + x1)))
y_mids.append(np.rint(0.5 * (y0 + y1)))
dl = scipy.spatial.distance.euclidean(coords[i], coords[i+1])
dls. append(dl)
return np.array(x_mids).astype(int), np.array(y_mids).astype(int), \
np.array(dls)
def pixelToGeoCoord(xPix, yPix, inputRaster, sourceSR='', geomTransform='', targetSR=''):
'''from spacenet geotools'''
if targetSR =='':
performReprojection=False
targetSR = osr.SpatialReference()
targetSR.ImportFromEPSG(4326)
else:
performReprojection=True
if geomTransform=='':
srcRaster = gdal.Open(inputRaster)
geomTransform = srcRaster.GetGeoTransform()
source_sr = osr.SpatialReference()
source_sr.ImportFromWkt(srcRaster.GetProjectionRef())
geom = ogr.Geometry(ogr.wkbPoint)
xOrigin = geomTransform[0]
yOrigin = geomTransform[3]
pixelWidth = geomTransform[1]
pixelHeight = geomTransform[5]
xCoord = (xPix * pixelWidth) + xOrigin
yCoord = (yPix * pixelHeight) + yOrigin
geom.AddPoint(xCoord, yCoord)
if performReprojection:
if sourceSR=='':
srcRaster = gdal.Open(inputRaster)
sourceSR = osr.SpatialReference()
sourceSR.ImportFromWkt(srcRaster.GetProjectionRef())
coord_trans = osr.CoordinateTransformation(sourceSR, targetSR)
geom.Transform(coord_trans)
return (geom.GetX(), geom.GetY())
def convert_pix_lstring_to_geo(wkt_lstring, im_file,
utm_zone=None, utm_letter=None, verbose=False):
'''Convert linestring in pixel coords to geo coords
If zone or letter changes inthe middle of line, it's all screwed up, so
force zone and letter based on first point
(latitude, longitude, force_zone_number=None, force_zone_letter=None)
Or just force utm zone and letter explicitly
'''
shape = wkt_lstring #shapely.wkt.loads(lstring)
x_pixs, y_pixs = shape.coords.xy
coords_latlon = []
coords_utm = []
for i,(x,y) in enumerate(zip(x_pixs, y_pixs)):
targetSR = osr.SpatialReference()
targetSR.ImportFromEPSG(4326)
lon, lat = pixelToGeoCoord(x, y, im_file, targetSR=targetSR)
if utm_zone and utm_letter:
[utm_east, utm_north, _, _] = utm.from_latlon(lat, lon,
force_zone_number=utm_zone, force_zone_letter=utm_letter)
else:
[utm_east, utm_north, utm_zone, utm_letter] = utm.from_latlon(lat, lon)
if verbose:
print("lat lon, utm_east, utm_north, utm_zone, utm_letter]",
[lat, lon, utm_east, utm_north, utm_zone, utm_letter])
coords_utm.append([utm_east, utm_north])
coords_latlon.append([lon, lat])
lstring_latlon = LineString([Point(z) for z in coords_latlon])
lstring_utm = LineString([Point(z) for z in coords_utm])
return lstring_latlon, lstring_utm, utm_zone, utm_letter
meters_to_miles = 0.000621371
###########
if __name__ == '__main__':
t0 = timeit.default_timer()
out_file = sys.argv[-1]
# out_file = '/wdata/solution.csv'
all_files = []
for d in test_folders:
for f in listdir(path.join(d, 'PS-MS')):
if '.tif' in f:
all_files.append(path.join(d, 'PS-MS', f))
# for fn in tqdm(all_files):
# process_file(fn)
with Pool() as pool:
results = pool.map(process_file, all_files)
res_rows = []
for i in range(len(results)):
res_rows.extend(results[i])
sub = pd.DataFrame(res_rows, columns=['ImageId', 'WKT_Pix', 'length_m', 'travel_time_s'])
sub.to_csv(path.join('/wdata', out_file), index=False, header=False)
elapsed = timeit.default_timer() - t0
print('Submission file created! Time: {:.3f} min'.format(elapsed / 60)) | 33.024169 | 151 | 0.565456 | # -*- coding: utf-8 -*-
import os
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
from os import path, listdir
import numpy as np
np.random.seed(1)
import random
random.seed(1)
import pandas as pd
import timeit
import cv2
from tqdm import tqdm
import sys
sys.setrecursionlimit(10000)
from multiprocessing import Pool
from shapely.geometry.linestring import LineString
# from skimage.morphology import skeletonize_3d, square, erosion, dilation, medial_axis
# from skimage.measure import label, regionprops, approximate_polygon
from math import hypot, sin, cos, asin, acos, radians
from sklearn.neighbors import KDTree
from shapely.wkt import dumps, loads
import scipy
import utm
#pip install utm
import gdal
gdal.UseExceptions()
import osr
import ogr
#conda install gdal
import ntpath
from shapely.geometry import mapping, Point, LineString
# import matplotlib.pyplot as plt
# import seaborn as sns
pred_folders = ['/wdata/test_pred', '/wdata/test_pred_960']
speed_bins = np.array([15, 18.75, 20, 25, 30, 35, 45, 55, 65])
# test_folders = ['/data/SN5_roads/test_public/AOI_7_Moscow', '/data/SN5_roads/test_public/AOI_8_Mumbai', '/data/SN5_roads/test_public/AOI_9_San_Juan']
test_folders = []
for i in range(1, len(sys.argv) - 1):
test_folders.append(sys.argv[i])
df = pd.read_csv(path.join('/wdata', 'solution_length.csv'), header=None)
df.columns = ['ImageId', 'WKT_Pix']
# example GDAL error handler function
def gdal_error_handler(err_class, err_num, err_msg):
errtype = {
gdal.CE_None:'None',
gdal.CE_Debug:'Debug',
gdal.CE_Warning:'Warning',
gdal.CE_Failure:'Failure',
gdal.CE_Fatal:'Fatal'
}
err_msg = err_msg.replace('\n',' ')
err_class = errtype.get(err_class, 'None')
print('Error Number: ', (err_num))
print('Error Type: ', (err_class))
print('Error Message: ', (err_msg))
gdal.PushErrorHandler(gdal_error_handler)
# from https://github.com/CosmiQ/cresi
def get_linestring_midpoints(geom):
'''Get midpoints of each line segment in the line.
Also return the length of each segment, assuming cartesian coordinates'''
coords = list(geom.coords)
N = len(coords)
x_mids, y_mids, dls = [], [], []
for i in range(N-1):
(x0, y0) = coords[i]
(x1, y1) = coords[i+1]
x_mids.append(np.rint(0.5 * (x0 + x1)))
y_mids.append(np.rint(0.5 * (y0 + y1)))
dl = scipy.spatial.distance.euclidean(coords[i], coords[i+1])
dls. append(dl)
return np.array(x_mids).astype(int), np.array(y_mids).astype(int), \
np.array(dls)
def pixelToGeoCoord(xPix, yPix, inputRaster, sourceSR='', geomTransform='', targetSR=''):
'''from spacenet geotools'''
if targetSR =='':
performReprojection=False
targetSR = osr.SpatialReference()
targetSR.ImportFromEPSG(4326)
else:
performReprojection=True
if geomTransform=='':
srcRaster = gdal.Open(inputRaster)
geomTransform = srcRaster.GetGeoTransform()
source_sr = osr.SpatialReference()
source_sr.ImportFromWkt(srcRaster.GetProjectionRef())
geom = ogr.Geometry(ogr.wkbPoint)
xOrigin = geomTransform[0]
yOrigin = geomTransform[3]
pixelWidth = geomTransform[1]
pixelHeight = geomTransform[5]
xCoord = (xPix * pixelWidth) + xOrigin
yCoord = (yPix * pixelHeight) + yOrigin
geom.AddPoint(xCoord, yCoord)
if performReprojection:
if sourceSR=='':
srcRaster = gdal.Open(inputRaster)
sourceSR = osr.SpatialReference()
sourceSR.ImportFromWkt(srcRaster.GetProjectionRef())
coord_trans = osr.CoordinateTransformation(sourceSR, targetSR)
geom.Transform(coord_trans)
return (geom.GetX(), geom.GetY())
def convert_pix_lstring_to_geo(wkt_lstring, im_file,
utm_zone=None, utm_letter=None, verbose=False):
'''Convert linestring in pixel coords to geo coords
If zone or letter changes inthe middle of line, it's all screwed up, so
force zone and letter based on first point
(latitude, longitude, force_zone_number=None, force_zone_letter=None)
Or just force utm zone and letter explicitly
'''
shape = wkt_lstring #shapely.wkt.loads(lstring)
x_pixs, y_pixs = shape.coords.xy
coords_latlon = []
coords_utm = []
for i,(x,y) in enumerate(zip(x_pixs, y_pixs)):
targetSR = osr.SpatialReference()
targetSR.ImportFromEPSG(4326)
lon, lat = pixelToGeoCoord(x, y, im_file, targetSR=targetSR)
if utm_zone and utm_letter:
[utm_east, utm_north, _, _] = utm.from_latlon(lat, lon,
force_zone_number=utm_zone, force_zone_letter=utm_letter)
else:
[utm_east, utm_north, utm_zone, utm_letter] = utm.from_latlon(lat, lon)
if verbose:
print("lat lon, utm_east, utm_north, utm_zone, utm_letter]",
[lat, lon, utm_east, utm_north, utm_zone, utm_letter])
coords_utm.append([utm_east, utm_north])
coords_latlon.append([lon, lat])
lstring_latlon = LineString([Point(z) for z in coords_latlon])
lstring_utm = LineString([Point(z) for z in coords_utm])
return lstring_latlon, lstring_utm, utm_zone, utm_letter
meters_to_miles = 0.000621371
###########
def get_linestring_keypoints(geom):
coords = list(geom.coords)
N = len(coords)
xs, ys, dls = [], [], []
for i in range(N-1):
xs.append([])
ys.append([])
(x0, y0) = coords[i]
(x1, y1) = coords[i+1]
xs[i].append(0.5 * x0 + 0.5 * x1)
ys[i].append(0.5 * y0 + 0.5 * y1)
xs[i].append(0.75 * x0 + 0.25 * x1)
ys[i].append(0.75 * y0 + 0.25 * y1)
xs[i].append(0.25 * x0 + 0.75 * x1)
ys[i].append(0.25 * y0 + 0.75 * y1)
xs[i].append(0.9 * x0 + 0.1 * x1)
ys[i].append(0.9 * y0 + 0.1 * y1)
xs[i].append(0.1 * x0 + 0.9 * x1)
ys[i].append(0.1 * y0 + 0.9 * y1)
xs[i].append(0.35 * x0 + 0.65 * x1)
ys[i].append(0.35 * y0 + 0.65 * y1)
xs[i].append(0.65 * x0 + 0.35 * x1)
ys[i].append(0.65 * y0 + 0.35 * y1)
dl = scipy.spatial.distance.euclidean(coords[i], coords[i+1])
dls. append(dl)
return xs, ys, np.asarray(dls)
def process_file(fn):
img_id = ntpath.basename(fn)[0:-4]
img_id = img_id.replace('_PS-MS', '')
im_file = fn
msks = []
for pred_folder in pred_folders:
msk0 = cv2.imread(path.join(pred_folder, img_id + '_speed0.png'), cv2.IMREAD_UNCHANGED)
msk1 = cv2.imread(path.join(pred_folder, img_id + '_speed1.png'), cv2.IMREAD_UNCHANGED)
msk2 = cv2.imread(path.join(pred_folder, img_id + '_speed2.png'), cv2.IMREAD_UNCHANGED)
msk = np.concatenate((msk0, msk1, msk2), axis=2)
if msk.shape[0] < 1306:
msk = cv2.resize(msk, (1300, 1300))
msk = np.pad(msk, ((6, 6), (6, 6), (0, 0)), mode='reflect')
msks.append(msk)
msks = np.asarray(msks)
msk = msks.mean(axis=0)
msk = msk[6:1306, 6:1306].astype('uint8')
vals = df[(df['ImageId'] == img_id)]['WKT_Pix'].values
res_rows = []
for v in vals:
if v == 'LINESTRING EMPTY':
return [{'ImageId': img_id, 'WKT_Pix': 'LINESTRING EMPTY', 'length_m': 0, 'travel_time_s': 0}]
l = loads(v)
lstring_latlon, lstring_utm, utm_zone, utm_letter = convert_pix_lstring_to_geo(l, im_file)
length = lstring_utm.length
length_miles = length * meters_to_miles
# x_mids, y_mids, dls = get_linestring_midpoints(l)
xs, ys, dls = get_linestring_keypoints(l)
_sz = 4
# speed = []
# if x_mids.shape[0] > 0:
# for i in range(x_mids.shape[0]):
# x0 = max(0, x_mids[i] - _sz)
# x1= min(1300, x_mids[i] + _sz)
# y0 = max(0, y_mids[i] - _sz)
# y1= min(1300, y_mids[i] + _sz)
# patch = msk[y0:y1, x0:x1]
# means = patch.mean(axis=(0, 1))
# if means.sum() == 0:
# speed.append(25)
# else:
# means /= means.sum()
# _s = (speed_bins * means).sum()
# if _s < 15:
# _s = 15
# if _s > 65:
# _s = 65
# speed.append(_s)
speed = []
if len(xs) > 0:
for i in range(len(xs)):
seg_speeds = []
for j in range(len(xs[i])):
x0 = max(0, int(xs[i][j] - _sz))
x1= min(1300, int(xs[i][j] + _sz))
y0 = max(0, int(ys[i][j] - _sz))
y1= min(1300, int(ys[i][j] + _sz))
patch = msk[y0:y1, x0:x1]
means = patch.mean(axis=(0, 1))
if means.sum() == 0:
seg_speeds.append(25)
else:
means /= means.sum()
_s = (speed_bins * means).sum()
if _s < 15:
_s = 15
if _s > 65:
_s = 65
seg_speeds.append(_s)
speed.append(np.mean(seg_speeds))
speed = np.asarray(speed)
dls /= dls.sum()
speed = (speed * dls).sum()
if speed < 15:
speed = 15
if speed > 65:
speed = 65
hours = length_miles / speed
travel_time_s = np.round(3600. * hours, 3)
res_rows.append({'ImageId': img_id, 'WKT_Pix': v, 'length_m': length, 'travel_time_s': travel_time_s})
return res_rows
if __name__ == '__main__':
t0 = timeit.default_timer()
out_file = sys.argv[-1]
# out_file = '/wdata/solution.csv'
all_files = []
for d in test_folders:
for f in listdir(path.join(d, 'PS-MS')):
if '.tif' in f:
all_files.append(path.join(d, 'PS-MS', f))
# for fn in tqdm(all_files):
# process_file(fn)
with Pool() as pool:
results = pool.map(process_file, all_files)
res_rows = []
for i in range(len(results)):
res_rows.extend(results[i])
sub = pd.DataFrame(res_rows, columns=['ImageId', 'WKT_Pix', 'length_m', 'travel_time_s'])
sub.to_csv(path.join('/wdata', out_file), index=False, header=False)
elapsed = timeit.default_timer() - t0
print('Submission file created! Time: {:.3f} min'.format(elapsed / 60)) | 4,991 | 0 | 68 |
0600c6d422557755a49f0f4fa55a1f9a73774a50 | 6,425 | py | Python | haruhi_dl/extractor/weibo.py | haruhi-dl/haruhi-dl | 0526e2add4c263209cad55347efa9a2dfe6c3fa6 | [
"Unlicense"
] | 32 | 2021-01-18T03:52:17.000Z | 2022-02-17T20:43:39.000Z | haruhi_dl/extractor/weibo.py | haruhi-dl/haruhi-dl | 0526e2add4c263209cad55347efa9a2dfe6c3fa6 | [
"Unlicense"
] | 12 | 2021-02-06T08:12:08.000Z | 2021-12-11T23:17:41.000Z | haruhi_dl/extractor/weibo.py | haruhi-dl/haruhi-dl | 0526e2add4c263209cad55347efa9a2dfe6c3fa6 | [
"Unlicense"
] | 6 | 2021-01-29T16:46:31.000Z | 2022-01-20T18:40:03.000Z | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
import json
import random
import re
from ..compat import (
compat_parse_qs,
compat_str,
)
from ..utils import (
js_to_json,
strip_jsonp,
urlencode_postdata,
)
| 36.095506 | 148 | 0.50428 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
import json
import random
import re
from ..compat import (
compat_parse_qs,
compat_str,
)
from ..utils import (
js_to_json,
strip_jsonp,
urlencode_postdata,
)
class WeiboIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?weibo\.com/[0-9]+/(?P<id>[a-zA-Z0-9]+)'
_TESTS = [{
'url': 'https://weibo.com/6275294458/Fp6RGfbff?type=comment',
'info_dict': {
'id': 'Fp6RGfbff',
'ext': 'mp4',
'title': 'You should have servants to massage you,... 来自Hosico_猫 - 微博',
}
}, {
# DASH formats - https://github.com/ytdl-org/youtube-dl/issues/27320
'url': 'https://weibo.com/5720474518/JxfyRbDh6?type=repost',
'info_dict': {
'id': 'JxfyRbDh6',
'ext': 'mp4',
'title': '#张亚东访谈KDA#击穿次元壁!张亚东访谈K... 来自英雄联盟 - 微博',
},
'params': {
'format': 'bestvideo',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
# to get Referer url for genvisitor
webpage, urlh = self._download_webpage_handle(url, video_id)
visitor_url = urlh.geturl()
if 'passport.weibo.com' in visitor_url:
# first visit
visitor_data = self._download_json(
'https://passport.weibo.com/visitor/genvisitor', video_id,
note='Generating first-visit data',
transform_source=strip_jsonp,
headers={'Referer': visitor_url},
data=urlencode_postdata({
'cb': 'gen_callback',
'fp': json.dumps({
'os': '2',
'browser': 'Gecko57,0,0,0',
'fonts': 'undefined',
'screenInfo': '1440*900*24',
'plugins': '',
}),
}))
tid = visitor_data['data']['tid']
cnfd = '%03d' % visitor_data['data']['confidence']
self._download_webpage(
'https://passport.weibo.com/visitor/visitor', video_id,
note='Running first-visit callback',
query={
'a': 'incarnate',
't': tid,
'w': 2,
'c': cnfd,
'cb': 'cross_domain',
'from': 'weibo',
'_rand': random.random(),
})
webpage = self._download_webpage(
url, video_id, note='Revisiting webpage')
title = self._html_search_regex(
r'<title>(.+?)</title>', webpage, 'title')
video_formats = compat_parse_qs(self._search_regex(
r'video-sources=\\\"(.+?)\"', webpage, 'video_sources'))
formats = []
supported_resolutions = (480, 720)
for res in supported_resolutions:
vid_urls = video_formats.get(compat_str(res))
if not vid_urls or not isinstance(vid_urls, list):
continue
vid_url = vid_urls[0]
formats.append({
'url': vid_url,
'height': res,
})
media_ids = self._search_regex(
# for single-media posts only
r' action-data=\\"protocol=(?:.*?,)?dash(?:,.*?)?&type=feedvideo&objectid=(\d+:[\da-f]+)&',
webpage, 'media ids', default=None)
if media_ids:
dash_data = self._download_json('https://weibo.com/aj/video/getdashinfo?media_ids=%s' % media_ids,
media_ids, 'Downloading DASH mp4 urls')
for media in dash_data['data']['list'][0]['details']:
if 'play_info' not in media:
continue
pinf = media['play_info']
if not pinf['url']:
continue
formats.append({
'url': pinf['url'],
'format_id': pinf.get('label'),
'width': pinf.get('width') if 'video/' in pinf.get('mime') else None,
'height': pinf.get('height') if 'video/' in pinf.get('mime') else None,
('vbr' if 'video/' in pinf.get('mime') else 'abr'): pinf.get('bitrate'),
'fps': pinf.get('fps') if 'video/' in pinf.get('mime') else None,
'vcodec': pinf.get('video_codecs') if 'video/' in pinf.get('mime') else 'none',
'acodec': pinf.get('audio_codecs') if 'audio/' in pinf.get('mime') else 'none',
'asr': pinf.get('audio_sample_rate') if 'audio/' in pinf.get('mime') else None,
'filesize': pinf.get('size'),
})
self._sort_formats(formats, field_preference=('height', 'vbr', 'abr'))
uploader = self._og_search_property(
'nick-name', webpage, 'uploader', default=None)
return {
'id': video_id,
'title': title,
'uploader': uploader,
'formats': formats
}
class WeiboMobileIE(InfoExtractor):
_VALID_URL = r'https?://m\.weibo\.cn/status/(?P<id>[0-9]+)(\?.+)?'
_TEST = {
'url': 'https://m.weibo.cn/status/4189191225395228?wm=3333_2001&sourcetype=weixin&featurecode=newtitle&from=singlemessage&isappinstalled=0',
'info_dict': {
'id': '4189191225395228',
'ext': 'mp4',
'title': '午睡当然是要甜甜蜜蜜的啦',
'uploader': '柴犬柴犬'
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
# to get Referer url for genvisitor
webpage = self._download_webpage(url, video_id, note='visit the page')
weibo_info = self._parse_json(self._search_regex(
r'var\s+\$render_data\s*=\s*\[({.*})\]\[0\]\s*\|\|\s*{};',
webpage, 'js_code', flags=re.DOTALL),
video_id, transform_source=js_to_json)
status_data = weibo_info.get('status', {})
page_info = status_data.get('page_info')
title = status_data['status_title']
uploader = status_data.get('user', {}).get('screen_name')
return {
'id': video_id,
'title': title,
'uploader': uploader,
'url': page_info['media_info']['stream_url']
}
| 4,905 | 1,290 | 46 |
36ef0dd7e0463788bad2d380b4d3cdd7c13fbdba | 1,230 | py | Python | common/utils/path.py | tower000/sublime_a_file_icon | dd27dcf54ba018a42d75c2ed533f9de0f7df69d4 | [
"MIT"
] | 3 | 2019-05-22T06:23:53.000Z | 2019-06-29T02:28:23.000Z | common/utils/path.py | themilkman/a-file-icon | 2daa81d114e9f1c7f5fa1f30455db1f91966667b | [
"MIT"
] | 1 | 2021-06-10T11:31:33.000Z | 2021-06-10T11:31:33.000Z | common/utils/path.py | mallowigi/a-file-icon | cd0d53b707593e8ec5fca1acd9342734beb9f983 | [
"MIT"
] | 4 | 2018-12-17T10:51:38.000Z | 2021-11-27T14:32:30.000Z | import os
import sublime
from .. import settings
| 25.625 | 71 | 0.660163 | import os
import sublime
from .. import settings
def get_package_archive():
return os.path.join(sublime.installed_packages_path(),
settings.PACKAGE_ARCH)
def get_package_folder():
return os.path.join(sublime.packages_path(), settings.PACKAGE_NAME)
def get_package_icons():
return os.path.join(sublime.packages_path(), settings.PACKAGE_NAME,
"icons")
def get_package_aliases():
return os.path.join(sublime.packages_path(), settings.PACKAGE_NAME,
"aliases")
def get_overlay():
return os.path.join(sublime.packages_path(), settings.OVERLAY_ROOT)
def get_overlay_aliases():
return os.path.join(sublime.packages_path(), settings.OVERLAY_ROOT,
"aliases")
def get_overlay_patches():
return os.path.join(sublime.packages_path(), settings.OVERLAY_ROOT,
"patches")
def get_overlay_patches_general():
return os.path.join(sublime.packages_path(), settings.OVERLAY_ROOT,
"patches", "general")
def get_overlay_patches_specific():
return os.path.join(sublime.packages_path(), settings.OVERLAY_ROOT,
"patches", "specific")
| 964 | 0 | 207 |
021b57886f000670438a5ae0f288f7cee9e50927 | 983 | py | Python | class1/e8_ciscoconfparse_v2.py | ktbyers/pynet_wantonik | 601bce26142b6741202c2bdafb9e0d0cec1b3c78 | [
"Apache-2.0"
] | 2 | 2017-05-11T12:05:15.000Z | 2021-07-15T18:13:19.000Z | class1/e8_ciscoconfparse_v2.py | ktbyers/pynet_wantonik | 601bce26142b6741202c2bdafb9e0d0cec1b3c78 | [
"Apache-2.0"
] | null | null | null | class1/e8_ciscoconfparse_v2.py | ktbyers/pynet_wantonik | 601bce26142b6741202c2bdafb9e0d0cec1b3c78 | [
"Apache-2.0"
] | 1 | 2017-05-11T12:05:18.000Z | 2017-05-11T12:05:18.000Z | #!/usr/bin/env pytho
'''
This script parses cisco.txt file and searches for 'crypto map CRYPTO' lines using CCP module.
Then it displays childrens indented multiple times for each element of crypto_map variable (list).
'''
import pprint
from ciscoconfparse import CiscoConfParse
fo = open('cisco.txt', 'r') # Opening text file as FileObject with open() function
parse = CiscoConfParse(fo) # Loading the file to CCF module as argument for parsing later
crypto_map = parse.find_all_children(r'^crypto map CRYPTO')
# In the line above using find_all_children method with regex (parsing)
print 'Show the content of crypto_map variable: \n',crypto_map
print
print 'Show me parent-child relationships for crypto map CRYPTO lines in cisco.txt file: '
# Iterate over elements of crypto_map list and display in a nice human readable format
for line in crypto_map:
# pprint.pprint(line) # Replaced with the below as suggested by Kirk (clean output)
print(line.strip("\n"))
| 42.73913 | 98 | 0.766022 | #!/usr/bin/env pytho
'''
This script parses cisco.txt file and searches for 'crypto map CRYPTO' lines using CCP module.
Then it displays childrens indented multiple times for each element of crypto_map variable (list).
'''
import pprint
from ciscoconfparse import CiscoConfParse
fo = open('cisco.txt', 'r') # Opening text file as FileObject with open() function
parse = CiscoConfParse(fo) # Loading the file to CCF module as argument for parsing later
crypto_map = parse.find_all_children(r'^crypto map CRYPTO')
# In the line above using find_all_children method with regex (parsing)
print 'Show the content of crypto_map variable: \n',crypto_map
print
print 'Show me parent-child relationships for crypto map CRYPTO lines in cisco.txt file: '
# Iterate over elements of crypto_map list and display in a nice human readable format
for line in crypto_map:
# pprint.pprint(line) # Replaced with the below as suggested by Kirk (clean output)
print(line.strip("\n"))
| 0 | 0 | 0 |
3f5bc935d0411f46bef1792e263ae6b4c4c6c37e | 5,219 | py | Python | test/test_validation.py | rajivpatel36/pyesg | f16939f6de003c55fc89d8e1bd11af03011ee0aa | [
"MIT"
] | null | null | null | test/test_validation.py | rajivpatel36/pyesg | f16939f6de003c55fc89d8e1bd11af03011ee0aa | [
"MIT"
] | null | null | null | test/test_validation.py | rajivpatel36/pyesg | f16939f6de003c55fc89d8e1bd11af03011ee0aa | [
"MIT"
] | null | null | null | from pyesg.configuration import validation_configuration as valid_config
from pyesg.validation.run import validate_simulations
from pyesg.configuration.pyesg_configuration import *
config = PyESGConfiguration()
config.number_of_projection_steps = 30
config.number_of_batches = 1
config.number_of_simulations = 100000
config.projection_frequency = 'annually'
config.random_seed = 128
config.start_date = '2018-01-01'
economy = Economy()
economy.id = "GBP"
asset_class = AssetClass()
asset_class.id = "GBP_Nominal"
asset_class.model_id = 'hull_white'
asset_class.add_parameter('alpha', 0.05)
asset_class.add_parameter('sigma', 0.2 * 0.1)
yc_points = {
0.5: 0.00679070105770901,
1: 0.00745916002218801,
1.5: 0.0079074852733388,
2: 0.00836441669643775,
2.5: 0.00884161282573678,
3: 0.00932762601832977,
3.5: 0.00981445589941161,
4: 0.0102969721178294,
4.5: 0.0107716710398867,
5: 0.0112363849191675,
5.5: 0.0116900851233338,
6: 0.0121325124408309,
6.5: 0.0125637162796559,
7: 0.0129837371605093,
7.5: 0.0133924143022063,
8: 0.0137892855650153,
8.5: 0.0141736214537358,
9: 0.0145445182679629,
9.5: 0.0149010412164557,
10: 0.0152422849420296,
10.5: 0.0155674503497323,
11: 0.0158758864638649,
11.5: 0.0161671188651251,
12: 0.0164409074632115,
12.5: 0.016697217851849,
13: 0.0169361824548138,
13.5: 0.0171580886888855,
14: 0.0173633870307634,
14.5: 0.0175526692648801,
15: 0.0177266234016501,
15.5: 0.0178859783210095,
16: 0.0180314895849257,
16.5: 0.0181639353683754,
17: 0.018284106311916,
17.5: 0.0183927617968095,
18: 0.018490607925128,
18.5: 0.0185782967490554,
19: 0.0186563922209754,
19.5: 0.0187253557221218,
20: 0.018785557677642,
20.5: 0.0188372886488034,
21: 0.0188807683798148,
21.5: 0.0189161404104334,
22: 0.0189434581524923,
22.5: 0.0189627104915117,
23: 0.0189738426838589,
23.5: 0.0189767792253448,
24: 0.0189714599105421,
24.5: 0.018957845218761,
25: 0.0189359147882514,
25.5: 0.0189056816921497,
26: 0.0188672208215708,
26.5: 0.0188206722776286,
27: 0.0187662444763932,
27.5: 0.0187042132382632,
28: 0.0186349225161717,
28.5: 0.0185587809820652,
29: 0.0184762565449625,
29.5: 0.0183878727980299,
30: 0.0182942021898953,
30.5: 0.0181958450182937,
31: 0.0180934206282059,
31.5: 0.0179875657839365,
32: 0.0178789330057234,
32.5: 0.0177681797287789,
33: 0.017655948801948,
33.5: 0.0175428655247506,
34: 0.0174295389236686,
34.5: 0.0173165628801392,
35: 0.0172045112957245,
35.5: 0.0170939187050309,
36: 0.0169852750575237,
36.5: 0.0168790286221742,
37: 0.0167755888037096,
37.5: 0.0166753287335625,
38: 0.0165785875430073,
38.5: 0.0164856688754966,
39: 0.0163968357127189,
39.5: 0.0163123113348747,
40: 0.0162322805072689,
}
for key, value in yc_points.items():
asset_class.add_parameter(f"yc_{key}", value)
asset_class.add_output('GBP_Nominal_Discount_Factor', 'discount_factor')
asset_class.add_output('GBP_Nominal_ZCB_5', 'zero_coupon_bond', term=5)
asset_class.add_output('GBP_Nominal_ZCB_10', 'zero_coupon_bond', term=10)
asset_class.add_output('GBP_Nominal_CMI_5', 'bond_index', term=5)
asset_class.add_output('GBP_Nominal_CMI_10', 'bond_index', term=10)
asset_class.random_drivers.append("GBP_Nominal")
equity_asset_class = AssetClass()
equity_asset_class.id = "GBP_Equities"
equity_asset_class.model_id = 'black_scholes'
equity_asset_class.add_parameter('sigma', 0.2)
equity_asset_class.add_output('GBP_Equities_TRI', 'total_return_index', 1)
equity_asset_class.random_drivers.append("GBP_Equities")
equity_asset_class.dependencies.append("GBP_Nominal")
economy.asset_classes.append(asset_class)
economy.asset_classes.append(equity_asset_class)
config.economies.append(economy)
config.output_file_directory = '/Users/rajivpatel/Desktop'
config.output_file_name = 'yomama'
validation_settings = valid_config.ValidationConfiguration(
output_file_directory='/users/rajivpatel/Desktop/',
output_file_name='test',
)
validation_settings.asset_classes.append(valid_config.AssetClass(
id="GBP_Nominal",
validation_analyses=[
valid_config.ValidationAnalysis(
id='average_discount_factor',
parameters=Parameters(confidence_level=0.95)
),
valid_config.ValidationAnalysis(
id='discounted_zero_coupon_bond',
parameters=Parameters(confidence_level=0.95, terms=[5, 10])
),
valid_config.ValidationAnalysis(
id='discounted_bond_index',
parameters=Parameters(confidence_level=0.95, terms=[5, 10])
)
]
))
validation_settings.asset_classes.append(valid_config.AssetClass(
id="GBP_Equities",
validation_analyses=[
valid_config.ValidationAnalysis(
id='discounted_total_return_index',
parameters=Parameters(confidence_level=0.95)
),
# valid_config.ValidationAnalysis(
# id='total_return_index_log_return_moments'
# )
]
))
validate_simulations(config, validation_settings)
| 31.823171 | 74 | 0.721211 | from pyesg.configuration import validation_configuration as valid_config
from pyesg.validation.run import validate_simulations
from pyesg.configuration.pyesg_configuration import *
config = PyESGConfiguration()
config.number_of_projection_steps = 30
config.number_of_batches = 1
config.number_of_simulations = 100000
config.projection_frequency = 'annually'
config.random_seed = 128
config.start_date = '2018-01-01'
economy = Economy()
economy.id = "GBP"
asset_class = AssetClass()
asset_class.id = "GBP_Nominal"
asset_class.model_id = 'hull_white'
asset_class.add_parameter('alpha', 0.05)
asset_class.add_parameter('sigma', 0.2 * 0.1)
yc_points = {
0.5: 0.00679070105770901,
1: 0.00745916002218801,
1.5: 0.0079074852733388,
2: 0.00836441669643775,
2.5: 0.00884161282573678,
3: 0.00932762601832977,
3.5: 0.00981445589941161,
4: 0.0102969721178294,
4.5: 0.0107716710398867,
5: 0.0112363849191675,
5.5: 0.0116900851233338,
6: 0.0121325124408309,
6.5: 0.0125637162796559,
7: 0.0129837371605093,
7.5: 0.0133924143022063,
8: 0.0137892855650153,
8.5: 0.0141736214537358,
9: 0.0145445182679629,
9.5: 0.0149010412164557,
10: 0.0152422849420296,
10.5: 0.0155674503497323,
11: 0.0158758864638649,
11.5: 0.0161671188651251,
12: 0.0164409074632115,
12.5: 0.016697217851849,
13: 0.0169361824548138,
13.5: 0.0171580886888855,
14: 0.0173633870307634,
14.5: 0.0175526692648801,
15: 0.0177266234016501,
15.5: 0.0178859783210095,
16: 0.0180314895849257,
16.5: 0.0181639353683754,
17: 0.018284106311916,
17.5: 0.0183927617968095,
18: 0.018490607925128,
18.5: 0.0185782967490554,
19: 0.0186563922209754,
19.5: 0.0187253557221218,
20: 0.018785557677642,
20.5: 0.0188372886488034,
21: 0.0188807683798148,
21.5: 0.0189161404104334,
22: 0.0189434581524923,
22.5: 0.0189627104915117,
23: 0.0189738426838589,
23.5: 0.0189767792253448,
24: 0.0189714599105421,
24.5: 0.018957845218761,
25: 0.0189359147882514,
25.5: 0.0189056816921497,
26: 0.0188672208215708,
26.5: 0.0188206722776286,
27: 0.0187662444763932,
27.5: 0.0187042132382632,
28: 0.0186349225161717,
28.5: 0.0185587809820652,
29: 0.0184762565449625,
29.5: 0.0183878727980299,
30: 0.0182942021898953,
30.5: 0.0181958450182937,
31: 0.0180934206282059,
31.5: 0.0179875657839365,
32: 0.0178789330057234,
32.5: 0.0177681797287789,
33: 0.017655948801948,
33.5: 0.0175428655247506,
34: 0.0174295389236686,
34.5: 0.0173165628801392,
35: 0.0172045112957245,
35.5: 0.0170939187050309,
36: 0.0169852750575237,
36.5: 0.0168790286221742,
37: 0.0167755888037096,
37.5: 0.0166753287335625,
38: 0.0165785875430073,
38.5: 0.0164856688754966,
39: 0.0163968357127189,
39.5: 0.0163123113348747,
40: 0.0162322805072689,
}
for key, value in yc_points.items():
asset_class.add_parameter(f"yc_{key}", value)
asset_class.add_output('GBP_Nominal_Discount_Factor', 'discount_factor')
asset_class.add_output('GBP_Nominal_ZCB_5', 'zero_coupon_bond', term=5)
asset_class.add_output('GBP_Nominal_ZCB_10', 'zero_coupon_bond', term=10)
asset_class.add_output('GBP_Nominal_CMI_5', 'bond_index', term=5)
asset_class.add_output('GBP_Nominal_CMI_10', 'bond_index', term=10)
asset_class.random_drivers.append("GBP_Nominal")
equity_asset_class = AssetClass()
equity_asset_class.id = "GBP_Equities"
equity_asset_class.model_id = 'black_scholes'
equity_asset_class.add_parameter('sigma', 0.2)
equity_asset_class.add_output('GBP_Equities_TRI', 'total_return_index', 1)
equity_asset_class.random_drivers.append("GBP_Equities")
equity_asset_class.dependencies.append("GBP_Nominal")
economy.asset_classes.append(asset_class)
economy.asset_classes.append(equity_asset_class)
config.economies.append(economy)
config.output_file_directory = '/Users/rajivpatel/Desktop'
config.output_file_name = 'yomama'
validation_settings = valid_config.ValidationConfiguration(
output_file_directory='/users/rajivpatel/Desktop/',
output_file_name='test',
)
validation_settings.asset_classes.append(valid_config.AssetClass(
id="GBP_Nominal",
validation_analyses=[
valid_config.ValidationAnalysis(
id='average_discount_factor',
parameters=Parameters(confidence_level=0.95)
),
valid_config.ValidationAnalysis(
id='discounted_zero_coupon_bond',
parameters=Parameters(confidence_level=0.95, terms=[5, 10])
),
valid_config.ValidationAnalysis(
id='discounted_bond_index',
parameters=Parameters(confidence_level=0.95, terms=[5, 10])
)
]
))
validation_settings.asset_classes.append(valid_config.AssetClass(
id="GBP_Equities",
validation_analyses=[
valid_config.ValidationAnalysis(
id='discounted_total_return_index',
parameters=Parameters(confidence_level=0.95)
),
# valid_config.ValidationAnalysis(
# id='total_return_index_log_return_moments'
# )
]
))
validate_simulations(config, validation_settings)
| 0 | 0 | 0 |
2a90588ad955849c7145f9d2a7d534b0eee92746 | 3,061 | py | Python | openrec/recommenders/vbpr.py | amirbiran/openrec | 69a1c57a7a1eec49720b776279b9120b80630ba2 | [
"Apache-2.0"
] | 1 | 2018-11-08T14:53:41.000Z | 2018-11-08T14:53:41.000Z | openrec/recommenders/vbpr.py | amirbiran/openrec | 69a1c57a7a1eec49720b776279b9120b80630ba2 | [
"Apache-2.0"
] | null | null | null | openrec/recommenders/vbpr.py | amirbiran/openrec | 69a1c57a7a1eec49720b776279b9120b80630ba2 | [
"Apache-2.0"
] | null | null | null | from openrec.recommenders import BPR
from openrec.modules.extractions import LatentFactor, MultiLayerFC
import tensorflow as tf
| 52.775862 | 115 | 0.67723 | from openrec.recommenders import BPR
from openrec.modules.extractions import LatentFactor, MultiLayerFC
import tensorflow as tf
def VBPR(batch_size, dim_user_embed, dim_item_embed, dim_v, total_users, total_items, l2_reg_embed=None,
l2_reg_mlp=None, init_model_dir=None, save_model_dir='Recommender/', train=True, serve=False):
rec = BPR(batch_size=batch_size,
dim_user_embed=dim_user_embed,
dim_item_embed=dim_item_embed,
total_users=total_users,
total_items=total_items,
l2_reg=l2_reg_embed,
init_model_dir=init_model_dir,
save_model_dir=save_model_dir,
train=train, serve=serve)
t = rec.traingraph
s = rec.servegraph
@t.inputgraph.extend(outs=['p_item_vfeature', 'n_item_vfeature'])
def train_item_visual_features(subgraph):
subgraph['p_item_vfeature'] = tf.placeholder(tf.float32, shape=[batch_size, dim_v], name='p_item_vfeature')
subgraph['n_item_vfeature'] = tf.placeholder(tf.float32, shape=[batch_size, dim_v], name='n_item_vfeature')
subgraph.update_global_input_mapping({'p_item_vfeature': subgraph['p_item_vfeature'],
'n_item_vfeature': subgraph['n_item_vfeature']})
@s.inputgraph.extend(outs=['item_vfeature'])
def serving_item_visual_features(subgraph):
subgraph['item_vfeature'] = tf.placeholder(tf.float32, shape=[None, dim_v], name='item_vfeature')
subgraph.update_global_input_mapping({'item_vfeature': subgraph['item_vfeature']})
@t.itemgraph.extend(ins=['p_item_vfeature', 'n_item_vfeature'])
def train_add_item_graph(subgraph):
p_item_vout = MultiLayerFC(in_tensor=subgraph['p_item_vfeature'], l2_reg=l2_reg_mlp, subgraph=subgraph,
dims=[dim_user_embed-dim_item_embed], scope='item_MLP')
n_item_vout = MultiLayerFC(in_tensor=subgraph['n_item_vfeature'], l2_reg=l2_reg_mlp, subgraph=subgraph,
dims=[dim_user_embed-dim_item_embed], scope='item_MLP')
subgraph['p_item_vec'] = tf.concat([subgraph['p_item_vec'], p_item_vout], axis=1)
subgraph['n_item_vec'] = tf.concat([subgraph['n_item_vec'], n_item_vout], axis=1)
@s.itemgraph.extend(ins=['item_vfeature'])
def serving_add_item_graph(subgraph):
item_vout = MultiLayerFC(in_tensor=subgraph['item_vfeature'], l2_reg=l2_reg_mlp, subgraph=subgraph,
dims=[dim_user_embed-dim_item_embed], scope='item_MLP')
subgraph['item_vec'] = tf.concat([subgraph['item_vec'], item_vout], axis=1)
@t.connector.extend
def train_connect(graph):
graph.itemgraph['p_item_vfeature'] = graph.inputgraph['p_item_vfeature']
graph.itemgraph['n_item_vfeature'] = graph.inputgraph['n_item_vfeature']
@s.connector.extend
def serve_connect(graph):
graph.itemgraph['item_vfeature'] = graph.inputgraph['item_vfeature']
return rec | 2,910 | 0 | 23 |
a9f10f7c8bcf5e0dd3b721bf09970610f2d2f830 | 6,625 | py | Python | testsuite/runtests.py | LaplaceKorea/Concuerror | 87e63f10ac615bf2eeac5b0916ef54d11a933e0b | [
"BSD-2-Clause"
] | 21 | 2015-01-08T05:47:56.000Z | 2019-06-27T13:31:14.000Z | testsuite/runtests.py | LaplaceKorea/Concuerror | 87e63f10ac615bf2eeac5b0916ef54d11a933e0b | [
"BSD-2-Clause"
] | 4 | 2015-05-27T12:46:48.000Z | 2015-05-27T12:47:33.000Z | testsuite/runtests.py | mariachris/Concuerror | 87e63f10ac615bf2eeac5b0916ef54d11a933e0b | [
"BSD-2-Clause"
] | 6 | 2015-03-05T00:29:04.000Z | 2021-08-17T16:05:02.000Z | #!/usr/bin/env python
import os
import re
import sys
import glob
import subprocess
from ctypes import c_int
from multiprocessing import Process, Lock, Value, BoundedSemaphore, cpu_count
#---------------------------------------------------------------------
# Extract scenarios from the specified test
#---------------------------------------------------------------------
# Run the specified scenario and print the results
#---------------------------------------------------------------------
# Main program
# Compile some regular expressions
match_pids = re.compile("<\d+\.\d+\.\d+>")
match_refs = re.compile("#Ref<[\d\.]+>")
#match_file = re.compile("suites/.+/src/.*\.erl")
ignore_matches = [match_pids, match_refs]
# Get the directory of Concuerror's testsuite
dirname = os.path.abspath(os.path.dirname(sys.argv[0]))
concuerror = os.path.abspath(dirname + "/../concuerror")
results = os.path.abspath(dirname + "/results")
# Cleanup temp files
# TODO: make it os independent
os.system("find %s \( -name '*.beam' -o -name '*.dump' \) -exec rm {} \;"
% dirname)
os.system("rm -rf %s/*" % results)
# Compile scenarios.erl
os.system("erlc %s/scenarios.erl" % dirname)
# If we have arguments we should use them as tests,
# otherwise check them all
if len(sys.argv) > 1:
tests = sys.argv[1:]
tests = [os.path.abspath(item) for item in tests]
else:
tests = glob.glob(dirname + "/suites/*/src/*")
# How many threads we want (default, number of CPUs in the system)
threads = os.getenv("THREADS", "")
if threads == "":
try:
threads = str(cpu_count())
except:
threads = "4"
# Print header
print "Concuerror's Testsuite (%d threads)\n" % int(threads)
print "%-10s %-20s %-50s %s" % \
("Suite", "Test", "(Function, Preemption Bound, Reduction)", "Result")
print "---------------------------------------------" + \
"---------------------------------------------"
# Create share integers to count tests and
# a lock to protect printings
lock = Lock()
total_tests = Value(c_int, 0, lock=False)
total_failed = Value(c_int, 0, lock=False)
sema = BoundedSemaphore(int(threads))
# For every test do
procT = []
for test in tests:
p = Process(target=runTest, args=(test,))
p.start()
procT.append(p)
# Wait
for p in procT:
p.join()
# Print overview
print "\nOVERALL SUMMARY for test run"
print " %d total tests, which gave rise to" % len(tests)
print " %d test cases, of which" % total_tests.value
print " %d caused unexpected failures!" % total_failed.value
# Cleanup temp files
os.system("find %s -name '*.beam' -exec rm {} \;" % dirname)
| 29.842342 | 78 | 0.560604 | #!/usr/bin/env python
import os
import re
import sys
import glob
import subprocess
from ctypes import c_int
from multiprocessing import Process, Lock, Value, BoundedSemaphore, cpu_count
#---------------------------------------------------------------------
# Extract scenarios from the specified test
def runTest(test):
global dirname
global results
# test has the format of '.*/suites/<suite_name>/src/<test_name>(.erl)?'
# Split the test in suite and name components using pattern matching
rest1, name = os.path.split(test)
rest2 = os.path.split(rest1)[0]
suite = os.path.split(rest2)[1]
name = os.path.splitext(name)[0]
if os.path.isdir(test):
# Our test is a multi module directory
dirn = test # directory
modn = "test" # module name
files = glob.glob(dirn + "/*.erl")
else:
dirn = rest1
modn = name
files = [test]
# Create a dir to save the results
try:
os.makedirs(results + "/" + suite + "/results")
except OSError:
pass
sema.acquire()
# Compile it
os.system("erlc -W0 -o %s %s/%s.erl" % (dirn, dirn, modn))
# And extract scenarios from it
pout = subprocess.Popen(
["erl -noinput -pa %s -pa %s -s scenarios extract %s -s init stop"
% (dirname, dirn, modn)], stdout=subprocess.PIPE, shell=True)
sema.release()
procS = []
for scenario in pout.stdout:
# scenario has the format of {<mod_name>,<func_name>,<preb>}\n
scen = scenario.strip("{}\n").split(",")
# And run the test
p = Process(
target=runScenario,
args=(suite, name, modn, scen[1], scen[2], scen[3:], files))
p.start()
procS.append(p)
pout.stdout.close()
# Wait
for p in procS:
p.join()
#---------------------------------------------------------------------
# Run the specified scenario and print the results
def runScenario(suite, name, modn, funn, preb, flags, files):
global concuerror
global results
global dirname
global sema
global lock
global total_tests
global total_failed
if "dpor" in flags:
dpor_flag = "--dpor"
file_ext = "-dpor"
dpor_output = "dpor"
else:
dpor_flag = ""
file_ext = ""
dpor_output = "full"
sema.acquire()
# Run concuerror
status = os.system(
("%s --target %s %s --files %s --output %s/%s/results/%s-%s-%s%s.txt "
"--preb %s --quiet --wait-messages %s > /dev/null 2>&1")
% (concuerror, modn, funn, ' '.join(files), results,
suite, name, funn, preb, file_ext, preb, dpor_flag))
# Compare the results
has_crash = "crash" in flags
orig = ("%s/suites/%s/results/%s-%s-%s%s.txt"
% (dirname, suite, name, funn, preb, file_ext))
rslt = ("%s/%s/results/%s-%s-%s%s.txt"
% (results, suite, name, funn, preb, file_ext))
if status == 0 and not has_crash:
equalRes = equalResults(orig, rslt)
elif status == 0 and has_crash:
equalRes = False
else:
equalRes = has_crash
sema.release()
# Print the results
lock.acquire()
total_tests.value += 1
if equalRes:
# We don't need to keep the results file
try:
os.remove(rslt)
except:
pass
print "%-10s %-20s %-50s \033[01;32mok\033[00m" % \
(suite, name, "("+funn+", "+preb+", "+dpor_output+")")
else:
if status != 0:
f = open(rslt, 'w')
f.write("The test crashed.")
total_failed.value += 1
print "%-10s %-20s %-50s \033[01;31mfailed\033[00m" % \
(suite, name, "("+funn+", "+preb+", "+dpor_output+")")
lock.release()
def equalResults(f1, f2):
try:
fp1 = open(f1, 'r')
except IOError:
return False
try:
fp2 = open(f2, 'r')
except IOError:
fp1.close()
return False
while True:
l1 = fp1.readline()
l2 = fp2.readline()
if (l1 != l2) and (not ignoreLine(l1)):
fp1.close()
fp2.close()
return False
if not l1:
fp1.close()
fp2.close()
return True
def ignoreLine(line):
global ignore_matches
for match in ignore_matches:
if re.search(match, line):
return True
return False
#---------------------------------------------------------------------
# Main program
# Compile some regular expressions
match_pids = re.compile("<\d+\.\d+\.\d+>")
match_refs = re.compile("#Ref<[\d\.]+>")
#match_file = re.compile("suites/.+/src/.*\.erl")
ignore_matches = [match_pids, match_refs]
# Get the directory of Concuerror's testsuite
dirname = os.path.abspath(os.path.dirname(sys.argv[0]))
concuerror = os.path.abspath(dirname + "/../concuerror")
results = os.path.abspath(dirname + "/results")
# Cleanup temp files
# TODO: make it os independent
os.system("find %s \( -name '*.beam' -o -name '*.dump' \) -exec rm {} \;"
% dirname)
os.system("rm -rf %s/*" % results)
# Compile scenarios.erl
os.system("erlc %s/scenarios.erl" % dirname)
# If we have arguments we should use them as tests,
# otherwise check them all
if len(sys.argv) > 1:
tests = sys.argv[1:]
tests = [os.path.abspath(item) for item in tests]
else:
tests = glob.glob(dirname + "/suites/*/src/*")
# How many threads we want (default, number of CPUs in the system)
threads = os.getenv("THREADS", "")
if threads == "":
try:
threads = str(cpu_count())
except:
threads = "4"
# Print header
print "Concuerror's Testsuite (%d threads)\n" % int(threads)
print "%-10s %-20s %-50s %s" % \
("Suite", "Test", "(Function, Preemption Bound, Reduction)", "Result")
print "---------------------------------------------" + \
"---------------------------------------------"
# Create share integers to count tests and
# a lock to protect printings
lock = Lock()
total_tests = Value(c_int, 0, lock=False)
total_failed = Value(c_int, 0, lock=False)
sema = BoundedSemaphore(int(threads))
# For every test do
procT = []
for test in tests:
p = Process(target=runTest, args=(test,))
p.start()
procT.append(p)
# Wait
for p in procT:
p.join()
# Print overview
print "\nOVERALL SUMMARY for test run"
print " %d total tests, which gave rise to" % len(tests)
print " %d test cases, of which" % total_tests.value
print " %d caused unexpected failures!" % total_failed.value
# Cleanup temp files
os.system("find %s -name '*.beam' -exec rm {} \;" % dirname)
| 3,913 | 0 | 90 |
7cc29cf11219a7ab5ae33a724505475581157527 | 2,890 | py | Python | go_make_venv.py | illuscio-dev/make_scripts | 9db5243761e1dc05afc7ea1fa36c2f3956456361 | [
"MIT"
] | 1 | 2021-05-03T06:23:12.000Z | 2021-05-03T06:23:12.000Z | go_make_venv.py | opencinemac/make_scripts | be06facb8a99e6375695df57b775b5f743d52f76 | [
"MIT"
] | null | null | null | go_make_venv.py | opencinemac/make_scripts | be06facb8a99e6375695df57b775b5f743d52f76 | [
"MIT"
] | 1 | 2021-04-28T22:25:07.000Z | 2021-04-28T22:25:07.000Z | import venv
import sys
import pathlib
import platform
from configparser import ConfigParser
CONFIG_PATH: pathlib.Path = pathlib.Path(__file__).parent.parent.parent / "setup.cfg"
PLATFORM = platform.system()
def load_cfg() -> ConfigParser:
"""
loads library config file
:return: loaded `ConfigParser` object
"""
config = ConfigParser()
config.read(CONFIG_PATH)
return config
def create_venv(lib_name: str, py_version: str) -> pathlib.Path:
"""
creates the new virtual environment
:param lib_name: name of library
:param py_version: string representation of two-digit python version (ie 37)
:return: path to venv
"""
venv_name = f"{lib_name}-go-{py_version}"
venv_path = pathlib.Path(f"~/venvs/{venv_name}").expanduser()
try:
venv_path.mkdir(parents=True, exist_ok=False)
except FileExistsError as error:
raise error
venv.create(env_dir=str(venv_path), with_pip=True, system_site_packages=True)
return venv_path
def register_venv(activate_path: pathlib.Path, lib_name: str, py_version: str) -> str:
"""
registers the new environment with a .bashrc entry alias for easy venv entry
:param activate_path: path to virtual env activation script
:param py_version: string representation of two-digit python version (ie 37)
:param lib_name: name of library
:return: bash alias to enter venv
"""
lib_path: pathlib.Path = pathlib.Path(__file__).parent.parent.parent.absolute()
bash_alias = f"env_go-{lib_name}-{py_version}"
command = f'alias {bash_alias}=\'cd "{lib_path}";source "{activate_path}"\''
if PLATFORM == "Darwin":
bash_rc_path = pathlib.Path("~/.bash_aliases").expanduser()
elif PLATFORM == "Linux":
bash_rc_path = pathlib.Path("~/.bash_aliases").expanduser()
else:
raise RuntimeError("operating system not supported for venv creation")
if bash_rc_path.exists():
bash_rc_text = bash_rc_path.read_text()
else:
bash_rc_text = ""
if command in bash_rc_text:
return bash_alias
bash_rc_text += (
f"\n"
f"\n# {lib_name} development virtual env entry for Python {py_version}"
f"\n{command}"
)
with bash_rc_path.open(mode="w") as f:
f.write(bash_rc_text)
return bash_alias
def main() -> None:
"""makes virtual environment for development and adds alias to ~/.bashrc"""
py_version = f"{sys.version_info[0]}{sys.version_info[1]}"
config = load_cfg()
lib_name = config.get("metadata", "name")
venv_path = create_venv(lib_name, py_version)
activate_path = venv_path / "bin" / "activate"
bash_alias = register_venv(activate_path, lib_name, py_version)
sys.stdout.write(str(bash_alias))
if __name__ == "__main__":
"""creates virtual environment and writes path to stdout"""
main()
| 27.788462 | 86 | 0.680277 | import venv
import sys
import pathlib
import platform
from configparser import ConfigParser
CONFIG_PATH: pathlib.Path = pathlib.Path(__file__).parent.parent.parent / "setup.cfg"
PLATFORM = platform.system()
def load_cfg() -> ConfigParser:
"""
loads library config file
:return: loaded `ConfigParser` object
"""
config = ConfigParser()
config.read(CONFIG_PATH)
return config
def create_venv(lib_name: str, py_version: str) -> pathlib.Path:
"""
creates the new virtual environment
:param lib_name: name of library
:param py_version: string representation of two-digit python version (ie 37)
:return: path to venv
"""
venv_name = f"{lib_name}-go-{py_version}"
venv_path = pathlib.Path(f"~/venvs/{venv_name}").expanduser()
try:
venv_path.mkdir(parents=True, exist_ok=False)
except FileExistsError as error:
raise error
venv.create(env_dir=str(venv_path), with_pip=True, system_site_packages=True)
return venv_path
def register_venv(activate_path: pathlib.Path, lib_name: str, py_version: str) -> str:
"""
registers the new environment with a .bashrc entry alias for easy venv entry
:param activate_path: path to virtual env activation script
:param py_version: string representation of two-digit python version (ie 37)
:param lib_name: name of library
:return: bash alias to enter venv
"""
lib_path: pathlib.Path = pathlib.Path(__file__).parent.parent.parent.absolute()
bash_alias = f"env_go-{lib_name}-{py_version}"
command = f'alias {bash_alias}=\'cd "{lib_path}";source "{activate_path}"\''
if PLATFORM == "Darwin":
bash_rc_path = pathlib.Path("~/.bash_aliases").expanduser()
elif PLATFORM == "Linux":
bash_rc_path = pathlib.Path("~/.bash_aliases").expanduser()
else:
raise RuntimeError("operating system not supported for venv creation")
if bash_rc_path.exists():
bash_rc_text = bash_rc_path.read_text()
else:
bash_rc_text = ""
if command in bash_rc_text:
return bash_alias
bash_rc_text += (
f"\n"
f"\n# {lib_name} development virtual env entry for Python {py_version}"
f"\n{command}"
)
with bash_rc_path.open(mode="w") as f:
f.write(bash_rc_text)
return bash_alias
def main() -> None:
"""makes virtual environment for development and adds alias to ~/.bashrc"""
py_version = f"{sys.version_info[0]}{sys.version_info[1]}"
config = load_cfg()
lib_name = config.get("metadata", "name")
venv_path = create_venv(lib_name, py_version)
activate_path = venv_path / "bin" / "activate"
bash_alias = register_venv(activate_path, lib_name, py_version)
sys.stdout.write(str(bash_alias))
if __name__ == "__main__":
"""creates virtual environment and writes path to stdout"""
main()
| 0 | 0 | 0 |
206fee9ba129abbc928f9b3f244e338cc2c477db | 6,227 | py | Python | tests/test_config_output.py | Algomorph/ext_argparse | fbca26f8a551f84677475a11fb5415ddda78abd9 | [
"Apache-2.0"
] | 1 | 2021-09-06T23:22:07.000Z | 2021-09-06T23:22:07.000Z | tests/test_config_output.py | Algomorph/ext_argparse | fbca26f8a551f84677475a11fb5415ddda78abd9 | [
"Apache-2.0"
] | 11 | 2021-09-07T14:13:39.000Z | 2021-09-29T15:17:46.000Z | tests/test_config_output.py | Algomorph/ext_argparse | fbca26f8a551f84677475a11fb5415ddda78abd9 | [
"Apache-2.0"
] | null | null | null | import os
from io import StringIO
from pathlib import Path
from tests.common import HouseParameters, HouseStyle, RoofMaterial, test_data_dir
from ext_argparse import process_arguments, save_defaults, dump, add_comments_from_help, process_settings_file
| 41.513333 | 113 | 0.705476 | import os
from io import StringIO
from pathlib import Path
from tests.common import HouseParameters, HouseStyle, RoofMaterial, test_data_dir
from ext_argparse import process_arguments, save_defaults, dump, add_comments_from_help, process_settings_file
def test_process_settings_file_with_generate_defaults(test_data_dir):
output_settings_path = os.path.join(test_data_dir, "enum_setting_defaults3.yaml")
process_settings_file(HouseParameters, output_settings_path, generate_default_settings_if_missing=True)
assert HouseParameters.sturdiness.value == 5.0
assert HouseParameters.year_built.value == 2000
assert HouseParameters.roof.year_changed.value == 2010
assert HouseParameters.style.value == HouseStyle.CRAFTSMAN_BUNGALO
assert HouseParameters.roof.roof_material.value == RoofMaterial.SLATE
def test_save_defaults(test_data_dir):
output_settings_path = os.path.join(test_data_dir, "enum_setting_defaults.yaml")
if os.path.exists(output_settings_path):
to_remove = Path(output_settings_path)
to_remove.unlink()
save_defaults(HouseParameters, output_settings_path, save_help_comments=False)
HouseParameters.sturdiness.argument = 10.0
HouseParameters.year_built.argument = 2002
process_arguments(HouseParameters, "Parameters of the house to repair.", argv=[
f"--settings_file={output_settings_path}"])
assert HouseParameters.sturdiness.value == 5.0
assert HouseParameters.year_built.value == 2000
assert HouseParameters.roof.year_changed.value == 2010
assert HouseParameters.style.value == HouseStyle.CRAFTSMAN_BUNGALO
assert HouseParameters.roof.roof_material.value == RoofMaterial.SLATE
with open(output_settings_path, 'r') as file:
lines = file.readlines()
# make sure we're actually reading from the file, not coming up with default settings
assert len(lines) == 6
assert lines[5] == "style: CRAFTSMAN_BUNGALO\n"
def test_save_defaults_with_comments(test_data_dir):
output_settings_path = os.path.join(test_data_dir, "enum_setting_defaults_with_comments.yaml")
if os.path.exists(output_settings_path):
to_remove = Path(output_settings_path)
to_remove.unlink()
save_defaults(HouseParameters, output_settings_path, save_help_comments=True, line_length_limit=100)
HouseParameters.sturdiness.argument = 12.0
HouseParameters.year_built.argument = 2003
process_arguments(HouseParameters, "Parameters of the house to repair.", argv=[
f"--settings_file={output_settings_path}"])
assert HouseParameters.sturdiness.value == 5.0
assert HouseParameters.year_built.value == 2000
assert HouseParameters.roof.year_changed.value == 2010
assert HouseParameters.style.value == HouseStyle.CRAFTSMAN_BUNGALO
assert HouseParameters.roof.roof_material.value == RoofMaterial.SLATE
with open(output_settings_path, 'r') as file:
lines = file.readlines()
# make sure we're actually reading from the file, not coming up with default settings
assert lines[0] == "# Sturdiness of the house.\n"
assert lines[5] == " # The last year when the roof tiles were changed.\n"
assert lines[12] == "# 'NEOCLASSICAL', 'MEDITERRANEAN']\n"
def test_add_comments_from_help(test_data_dir):
output_settings_path = os.path.join(test_data_dir, "enum_setting_defaults_with_comments.yaml")
if os.path.exists(output_settings_path):
to_remove = Path(output_settings_path)
to_remove.unlink()
save_defaults(HouseParameters, output_settings_path, save_help_comments=False)
add_comments_from_help(HouseParameters, Path(output_settings_path), line_length_limit=100)
with open(output_settings_path, 'r') as file:
lines = file.readlines()
# make sure we're actually reading from the file, not coming up with default settings
assert lines[0] == "# Sturdiness of the house.\n"
assert lines[5] == " # The last year when the roof tiles were changed.\n"
assert lines[12] == "# 'NEOCLASSICAL', 'MEDITERRANEAN']\n"
def test_dump_parameters():
process_arguments(HouseParameters, "Parameters of the house to repair.", argv=[
"--sturdiness=6.0",
"--year_built=2001",
"--roof.year_changed=2012",
"--style=CONTEMPORARY",
"--roof.roof_material=SOLAR"
])
string_stream = StringIO()
dump(HouseParameters, string_stream)
ground_truth_lines = [
"sturdiness: 6.0",
"year_built: 2001",
"roof:",
" year_changed: 2012",
" roof_material: SOLAR",
"style: CONTEMPORARY",
""]
ground_truth_string = '\n'.join(ground_truth_lines)
output_string = string_stream.getvalue()
assert output_string == ground_truth_string
def test_dump_parameters_with_comments():
process_arguments(HouseParameters, "Parameters of the house to repair.", argv=[
"--sturdiness=6.0",
"--year_built=2001",
"--roof.year_changed=2012",
"--style=CONTEMPORARY",
"--roof.roof_material=SOLAR"
])
string_stream = StringIO()
dump(HouseParameters, string_stream, save_help_comments=True, line_length_limit=100)
ground_truth_lines = [
"# Sturdiness of the house.",
"sturdiness: 6.0",
"# The year the house was built.",
"year_built: 2001",
"roof:",
" # The last year when the roof tiles were changed.",
" year_changed: 2012",
" # Material of the roof tiles.| Can be set to one of: ['SLATE', 'METAL', 'CONCRETE', 'COMPOSITE',",
" # 'SOLAR', 'CLAY', 'SYNTHETIC_BARREL', 'SYNTHETIC_SLATE', 'SYNTHETIC_CEDAR']",
" roof_material: SOLAR",
"# Style of da house.| Can be set to one of: ['CRAFTSMAN_BUNGALO', 'CAPE_COD', 'RANCH', 'CONTEMPORARY',",
"# 'QUEEN_ANNE', 'COLONIAL_REVIVAL', 'TUDOR_REVIVAL', 'TOWNHOUSE', 'PRAIRIE', 'MID_CENTURY_MODERN',",
"# 'NEOCLASSICAL', 'MEDITERRANEAN']",
"style: CONTEMPORARY",
""
]
ground_truth_string = '\n'.join(ground_truth_lines)
output_string = string_stream.getvalue()
assert output_string == ground_truth_string
| 5,829 | 0 | 138 |
07edeeb03434484a568e9f4426cf7e4049646292 | 9,060 | py | Python | Procedurals/BSPDungeonGenerator.py | sidav/ShadowPriest | 0ab3f9e4dde03237dff7389d0654112f1d1994e9 | [
"MIT"
] | 1 | 2017-12-12T15:34:54.000Z | 2017-12-12T15:34:54.000Z | Procedurals/BSPDungeonGenerator.py | sidav/ShadowPriest | 0ab3f9e4dde03237dff7389d0654112f1d1994e9 | [
"MIT"
] | null | null | null | Procedurals/BSPDungeonGenerator.py | sidav/ShadowPriest | 0ab3f9e4dde03237dff7389d0654112f1d1994e9 | [
"MIT"
] | null | null | null | #############################################################################
_LCG_X = None #
#
#############################################################################
_MIN_SPLIT_FACTOR = 40 #In percent
_MAX_SPLIT_FACTOR = 100 - _MIN_SPLIT_FACTOR #In percent
_MIN_ROOM_WIDTH = 3
_MIN_ROOM_HEIGHT = 3
_SPLITS = 12
_FLOOR_CODE = 'floor'
_WALL_CODE = 'wall'
_DOOR_CODE = 'door'
#############################################################################################################
#############################################################################################################
| 40.995475 | 168 | 0.487528 | #############################################################################
def _random(min, max): #IT'S JUST A WRAPPER. Min, max inclusive! #
return _rand(max-min+1)+min #
#
_LCG_X = None #
#
def setRandomSeed(seed): # FOR TEH GREAT INDEPENDENCY!
global _LCG_X #
_LCG_X = seed #
#
def _rand(mod): #
global _LCG_X #
if _LCG_X is None: #
_LCG_X = 7355608 #
LCG_A = 14741 #
LCG_C = 757 #
LCG_M = 77777677777 #
_LCG_X = (LCG_A*_LCG_X + LCG_C) % LCG_M #
return _LCG_X%mod #
#############################################################################
_MIN_SPLIT_FACTOR = 40 #In percent
_MAX_SPLIT_FACTOR = 100 - _MIN_SPLIT_FACTOR #In percent
_MIN_ROOM_WIDTH = 3
_MIN_ROOM_HEIGHT = 3
_SPLITS = 12
_FLOOR_CODE = 'floor'
_WALL_CODE = 'wall'
_DOOR_CODE = 'door'
class treeNode:
def __init__(self, parent=None, cont=None):
self.parent = parent
self.left = None
self.right = None
self.cont = cont
def get_leafs_of_level(self, lvl, nodelist=None): #should be called from the root node only
if nodelist == None:
nodelist = []
if lvl == 0:
nodelist.append(self)
else:
if self.left is not None:
self.left.get_leafs_of_level(lvl - 1, nodelist)
if self.right is not None:
self.right.get_leafs_of_level(lvl - 1, nodelist)
return nodelist
def getLeafs(self, leafs=None):
if leafs == None:
leafs = []
if self.left is None and self.right is None:
leafs.append(self)
if self.left is not None:
self.left.getLeafs(leafs)
if self.right is not None:
self.right.getLeafs(leafs)
return leafs
def splitSelf(self): #BSP splitting
selfx = self.cont.x
selfy = self.cont.y
selfw = self.cont.w
selfh = self.cont.h
horiz = _random(0, 1) # 1 is horizontal splitting, 0 is vertical
for _ in range(5): #5 is just a number of tries
horizOK = True
vertOK = True
factor = _random(_MIN_SPLIT_FACTOR, _MAX_SPLIT_FACTOR)
lefthorizh = selfh*factor//100
righthorizh = selfh - lefthorizh
leftvertw = selfw*factor//100
rightvertw = selfw - leftvertw
if (lefthorizh < _MIN_ROOM_HEIGHT or righthorizh < _MIN_ROOM_HEIGHT):
horiz = 0
horizOK = False
if (leftvertw < _MIN_ROOM_WIDTH or rightvertw < _MIN_ROOM_WIDTH):
vertOK = False
continue
if not (horizOK and vertOK):
return
if horiz == 1: #horizontal split
leftc = Container(selfx, selfy, selfw, lefthorizh, "LHORIZONTAL")
rightc = Container(selfx, selfy+lefthorizh, selfw, righthorizh, "RHORIZONTAL")
self.left = treeNode(self, leftc)
self.right = treeNode(self, rightc)
else: #vertical split
leftc = Container(selfx, selfy, leftvertw, selfh, "LVERTICAL")
rightc = Container(selfx+leftvertw, selfy, rightvertw, selfh, "RVERTICAL")
self.left = treeNode(self, leftc)
self.right = treeNode(self, rightc)
class Container:
def __init__(self, x, y, w, h, vh = "UNDEF"):
self.x = x
self.y = y
self.w = w
self.h = h
self.vh = vh
def addToMap(self, arr):
x0 = self.x-1
y0 = self.y-1
h = self.h+1
w = self.w+1
for i in range(x0, x0 + w):
arr[i][y0] = _WALL_CODE
try:
arr[i][y0+h-1] = _WALL_CODE
except:
print("BSP SECTOR PLACEMENT ERROR AT x {}, y {}".format(i, y0+h-1))
for j in range(y0, y0 + h):
arr[x0][j] = _WALL_CODE
try:
arr[x0+w-1][j] = _WALL_CODE
except:
print("BSP SECTOR PLACEMENT ERROR AT x {}, y {}".format(i, y0+h-1))
#############################################################################################################
#############################################################################################################
def splitNTimes(root, N):
for _ in range(N):
leafs = root.getLeafs()
for l in leafs:
l.splitSelf()
def placeConnections(root, arr):
# the following loop will draw the connections between the nodes with the same parent.
# It creates smth like doorways or even removes some walls.
# I'm glad of the result. Really.
traverseEnded = False
curlvl = 0
while not traverseEnded:
a = root.get_leafs_of_level(curlvl)
if len(a) is 0:
traverseEnded = True
for i in a:
if i.left is not None and i.right is not None:
fx = i.left.cont.x + i.left.cont.w // 2
fy = i.left.cont.y + i.left.cont.h // 2
tx = i.right.cont.x + i.right.cont.w // 2
ty = i.right.cont.y + i.right.cont.h // 2
if fx == tx:
for k in range(fy, ty + 1):
arr[fx][k] = _FLOOR_CODE
elif fy == ty:
for k in range(fx, tx + 1):
arr[k][fy] = _FLOOR_CODE
curlvl += 1
def placeDoors(arr):
for i in range(1, len(arr)-1):
for j in range(1, len(arr[0])-1):
#horizontal doors:
if arr[i][j] == _FLOOR_CODE and arr[i][j-1] == _WALL_CODE and arr[i][j+1] == _WALL_CODE and arr[i-1][j] == _FLOOR_CODE and arr[i+1][j] == _FLOOR_CODE \
and (arr[i-1][j - 1] == _FLOOR_CODE or arr[i+1][j - 1] == _FLOOR_CODE or arr[i-1][j + 1] == _FLOOR_CODE or arr[i+1][j + 1] == _FLOOR_CODE ):
arr[i][j] = _DOOR_CODE
#vertical doors:
elif arr[i][j] == _FLOOR_CODE and arr[i-1][j] == _WALL_CODE and arr[i+1][j] == _WALL_CODE and arr[i][j-1] == _FLOOR_CODE and arr[i][j+1] == _FLOOR_CODE \
and (arr[i - 1][j - 1] == _FLOOR_CODE or arr[i + 1][j - 1] == _FLOOR_CODE or arr[i - 1][j + 1] == _FLOOR_CODE or arr[i + 1][j + 1] == _FLOOR_CODE):
arr[i][j] = _DOOR_CODE
def makeWallOutline(arr): #just to be sure that the map won't be open to its borders.
for i in range(len(arr)):
for j in range(len(arr[0])):
if i == 0 or j == 0 or i == len(arr)-1 or j == len(arr[0])-1:
arr[i][j] = _WALL_CODE
def setTileCodes(floor, wall, door):
global _FLOOR_CODE, _WALL_CODE, _DOOR_CODE
_FLOOR_CODE = floor
_WALL_CODE = wall
_DOOR_CODE = door
def __generateMap(mapW, mapH):
outp = [[_FLOOR_CODE] * (mapH) for _ in range(mapW)]
con = Container(1, 1, mapW-1, mapH-1)
BSPRoot = treeNode(cont = con)
splitNTimes(BSPRoot, _SPLITS)
leafs = BSPRoot.getLeafs()
for i in leafs:
i.cont.addToMap(outp)
placeConnections(BSPRoot, outp)
placeDoors(outp)
#makeWallOutline(outp) <-- No need now!
return outp
def generateMapWithRandomParams(mapW, mapH, seed = -1): #this may cause crap
global _MIN_SPLIT_FACTOR, _MAX_SPLIT_FACTOR, _MIN_ROOM_WIDTH, _MIN_ROOM_HEIGHT, _SPLITS
if seed != -1:
setRandomSeed(seed)
_MIN_SPLIT_FACTOR = _random(20, 50)
_MAX_SPLIT_FACTOR = 100 - _MIN_SPLIT_FACTOR
_MIN_ROOM_WIDTH = _random(2, 4)
_MIN_ROOM_HEIGHT = _random(2, 5)
_SPLITS = _random(2,25)
return __generateMap(mapW, mapH)
def generateMapWithGivenParams(mapW, mapH, minSplitFactor = 40, minRoomWidth = 3, minRoomHeight = 3, splits = 12, seed = -1): #this will not cause crap most of the time
global _MIN_SPLIT_FACTOR, _MAX_SPLIT_FACTOR, _MIN_ROOM_WIDTH, _MIN_ROOM_HEIGHT, _SPLITS
if seed != -1:
setRandomSeed(seed)
_MIN_SPLIT_FACTOR = minSplitFactor # In percent
_MAX_SPLIT_FACTOR = 100 - _MIN_SPLIT_FACTOR # In percent
_MIN_ROOM_WIDTH = minRoomWidth
_MIN_ROOM_HEIGHT = minRoomHeight
_SPLITS = splits
return __generateMap(mapW, mapH)
| 7,870 | -11 | 455 |
25841aec8d34dd1399a61fd5df3b0157891726bc | 891 | py | Python | boosup/boosup/performance/urls.py | developertqw2017/migrationDjango | f7256ec2af51da1179d2f957e1aa896191b7b514 | [
"MIT"
] | null | null | null | boosup/boosup/performance/urls.py | developertqw2017/migrationDjango | f7256ec2af51da1179d2f957e1aa896191b7b514 | [
"MIT"
] | 16 | 2020-02-11T23:19:19.000Z | 2022-03-11T23:33:40.000Z | boosup/boosup/performance/urls.py | developertqw2017/migrationDjango | f7256ec2af51da1179d2f957e1aa896191b7b514 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#coding=utf8
'''
Created on 2016/9/19
@author: cloudy
'''
from django.conf.urls import url
from . import views
urlpatterns = [
#绩效首页
url(r'^index/$',views.index,name='index'),
#绩效趋势post 请求url
url(r'^echart/$',views.echart,name='echart'),
#绩效设置
url(r'^setting/$',views.setting,name='setting'),
#绩效考核打分页面
url(r'^list/$',views.list,name='list'),
#绩效考核打分页面
url(r'^newlist/$',views.newlist,name='newlist'),
#绩效考核提交
url(r'^post/$',views.action_post,name='action_post'),
# 绩效考核提交
url(r'^check/$', views.check_done, name='check_done'),
# 历史记录
url(r'^history/$', views.history, name='history'),
# 绩效考核详情
#url(r'^detail/$', views.detail, name='detail'),
# 绩效考核月度详情
url(r'^month_detail/$', views.month_detail, name='month_detail'),
#结果查看
url(r'^result/$',views.result,name='result'),
] | 23.447368 | 69 | 0.618406 | #!/usr/bin/env python
#coding=utf8
'''
Created on 2016/9/19
@author: cloudy
'''
from django.conf.urls import url
from . import views
urlpatterns = [
#绩效首页
url(r'^index/$',views.index,name='index'),
#绩效趋势post 请求url
url(r'^echart/$',views.echart,name='echart'),
#绩效设置
url(r'^setting/$',views.setting,name='setting'),
#绩效考核打分页面
url(r'^list/$',views.list,name='list'),
#绩效考核打分页面
url(r'^newlist/$',views.newlist,name='newlist'),
#绩效考核提交
url(r'^post/$',views.action_post,name='action_post'),
# 绩效考核提交
url(r'^check/$', views.check_done, name='check_done'),
# 历史记录
url(r'^history/$', views.history, name='history'),
# 绩效考核详情
#url(r'^detail/$', views.detail, name='detail'),
# 绩效考核月度详情
url(r'^month_detail/$', views.month_detail, name='month_detail'),
#结果查看
url(r'^result/$',views.result,name='result'),
] | 0 | 0 | 0 |
fce02a6f6198d19f7d8e63f6dc38abbbcb34ee90 | 2,642 | py | Python | vehicle_tracking/license-plate-ocr.py | nickchenchj/darknet | 49e0dcfed005a8982e4fc80885f234e9cffee4c8 | [
"BSD-3-Clause"
] | null | null | null | vehicle_tracking/license-plate-ocr.py | nickchenchj/darknet | 49e0dcfed005a8982e4fc80885f234e9cffee4c8 | [
"BSD-3-Clause"
] | null | null | null | vehicle_tracking/license-plate-ocr.py | nickchenchj/darknet | 49e0dcfed005a8982e4fc80885f234e9cffee4c8 | [
"BSD-3-Clause"
] | null | null | null | import sys
import cv2
import numpy as np
import traceback
import darknet.python.darknet as dn
from os.path import splitext, basename
from glob import glob
from darknet.python.darknet import detect
from src.label import dknet_label_conversion
from src.utils import nms
if __name__ == '__main__':
try:
input_dir = sys.argv[1].rstrip('/')
output_dir = input_dir
lp_target = sys.argv[2]
ocr_threshold = .4
ocr_weights = bytes('data/ocr/ocr-net.weights', encoding='utf-8')
ocr_netcfg = bytes('data/ocr/ocr-net.cfg', encoding='utf-8')
ocr_dataset = bytes('data/ocr/ocr-net.data', encoding='utf-8')
ocr_net = dn.load_net(ocr_netcfg, ocr_weights, 0)
ocr_meta = dn.load_meta(ocr_dataset)
imgs_paths = sorted(glob('%s/*lp.png' % output_dir))
print('Performing OCR...')
print('Target: %s' % lp_target)
target_found = False
for i,img_path in enumerate(imgs_paths):
if target_found == True:
print('\tTarget found. Ending OCR...')
break
print('\tScanning %s' % img_path)
bname = basename(splitext(img_path)[0])
R,(width,height) = detect(ocr_net, ocr_meta, img_path ,thresh=ocr_threshold, nms=None)
if len(R):
L = dknet_label_conversion(R,width,height)
L = nms(L,.45)
L.sort(key=lambda x: x.tl()[0])
lp_str = ''.join([chr(l.cl()) for l in L])
lp_len = len(lp_str)
if lp_len >= 6 and lp_len <= 7:
print('\t\tLP: %s' % lp_str)
if lp_str == lp_target:
target_found = True
# Erases the trailing substring "_lp" from `bname`.
# original format of bname: "out<frame_id>_<object_id><class_name>_lp"
# modified format of bname: "out<frame_id>_<object_id><class_name>"
bname_target = bname.rsplit('_', 1)[0]
with open('%s/target.txt' % (output_dir), 'w') as f:
f.write(bname_target + '\n')
with open('%s/%s_str.txt' % (output_dir,bname),'w') as f:
f.write(lp_str + '\n')
else:
print('No characters found')
if target_found == False:
print('\tTarget not found. Ending OCR...')
except:
traceback.print_exc()
sys.exit(1)
sys.exit(0)
| 30.72093 | 98 | 0.520818 | import sys
import cv2
import numpy as np
import traceback
import darknet.python.darknet as dn
from os.path import splitext, basename
from glob import glob
from darknet.python.darknet import detect
from src.label import dknet_label_conversion
from src.utils import nms
if __name__ == '__main__':
try:
input_dir = sys.argv[1].rstrip('/')
output_dir = input_dir
lp_target = sys.argv[2]
ocr_threshold = .4
ocr_weights = bytes('data/ocr/ocr-net.weights', encoding='utf-8')
ocr_netcfg = bytes('data/ocr/ocr-net.cfg', encoding='utf-8')
ocr_dataset = bytes('data/ocr/ocr-net.data', encoding='utf-8')
ocr_net = dn.load_net(ocr_netcfg, ocr_weights, 0)
ocr_meta = dn.load_meta(ocr_dataset)
imgs_paths = sorted(glob('%s/*lp.png' % output_dir))
print('Performing OCR...')
print('Target: %s' % lp_target)
target_found = False
for i,img_path in enumerate(imgs_paths):
if target_found == True:
print('\tTarget found. Ending OCR...')
break
print('\tScanning %s' % img_path)
bname = basename(splitext(img_path)[0])
R,(width,height) = detect(ocr_net, ocr_meta, img_path ,thresh=ocr_threshold, nms=None)
if len(R):
L = dknet_label_conversion(R,width,height)
L = nms(L,.45)
L.sort(key=lambda x: x.tl()[0])
lp_str = ''.join([chr(l.cl()) for l in L])
lp_len = len(lp_str)
if lp_len >= 6 and lp_len <= 7:
print('\t\tLP: %s' % lp_str)
if lp_str == lp_target:
target_found = True
# Erases the trailing substring "_lp" from `bname`.
# original format of bname: "out<frame_id>_<object_id><class_name>_lp"
# modified format of bname: "out<frame_id>_<object_id><class_name>"
bname_target = bname.rsplit('_', 1)[0]
with open('%s/target.txt' % (output_dir), 'w') as f:
f.write(bname_target + '\n')
with open('%s/%s_str.txt' % (output_dir,bname),'w') as f:
f.write(lp_str + '\n')
else:
print('No characters found')
if target_found == False:
print('\tTarget not found. Ending OCR...')
except:
traceback.print_exc()
sys.exit(1)
sys.exit(0)
| 0 | 0 | 0 |
947660da92354b19b18c068cc4a2b0144eb7145d | 731 | py | Python | pypadre/pod/app/code_app.py | padre-lab-eu/pypadre | c244a5f1d4eb7bf168cc06dd9b43416883534268 | [
"MIT"
] | 3 | 2019-12-19T13:29:52.000Z | 2019-12-20T07:32:05.000Z | pypadre/pod/app/code_app.py | padre-lab-eu/pypadre | c244a5f1d4eb7bf168cc06dd9b43416883534268 | [
"MIT"
] | 1 | 2019-12-16T13:39:24.000Z | 2019-12-16T13:39:24.000Z | pypadre/pod/app/code_app.py | padre-lab-eu/pypadre | c244a5f1d4eb7bf168cc06dd9b43416883534268 | [
"MIT"
] | null | null | null | from typing import List
from pypadre.core.validation.validation import ValidateableFactory
from pypadre.pod.app.base_app import BaseChildApp
from pypadre.pod.repository.i_repository import ICodeRepository
from pypadre.pod.service.code_service import CodeService
| 34.809524 | 82 | 0.731874 | from typing import List
from pypadre.core.validation.validation import ValidateableFactory
from pypadre.pod.app.base_app import BaseChildApp
from pypadre.pod.repository.i_repository import ICodeRepository
from pypadre.pod.service.code_service import CodeService
class CodeApp(BaseChildApp):
def __init__(self, parent, backends: List[ICodeRepository], **kwargs):
super().__init__(parent, service=CodeService(backends=backends), **kwargs)
def create(self, *args, clz, handlers=None, **kwargs):
if handlers is None:
handlers = []
return ValidateableFactory.make(clz, *args, handlers=handlers, **kwargs)
def put(self, obj, **kwargs):
return self.service.put(obj, **kwargs)
| 356 | 7 | 104 |
dbf2ab7c80207b2d8cadfbed64731197538a2e95 | 2,625 | py | Python | toontown/rpc/ToontownRPCHandlerBase.py | MasterLoopyBM/Toontown | ebed7fc3f2ef06a529cf02eda7ab46361aceef9d | [
"MIT"
] | 1 | 2020-02-07T18:15:12.000Z | 2020-02-07T18:15:12.000Z | toontown/rpc/ToontownRPCHandlerBase.py | journeyfan/toontown-journey | 7a4db507e5c1c38a014fc65588086d9655aaa5b4 | [
"MIT"
] | null | null | null | toontown/rpc/ToontownRPCHandlerBase.py | journeyfan/toontown-journey | 7a4db507e5c1c38a014fc65588086d9655aaa5b4 | [
"MIT"
] | 2 | 2020-09-26T20:37:18.000Z | 2020-11-15T20:55:33.000Z | import base64
from direct.directnotify.DirectNotifyGlobal import directNotify
import json
import time
from Crypto.Cipher import AES
UNKNOWN = 700
USER = 100
COMMUNITY_MANAGER = 200
MODERATOR = 300
ARTIST = 400
PROGRAMMER = 500
ADMINISTRATOR = 600
SYSTEM_ADMINISTRATOR = 700
rpcmethod = RPCMethod
| 30.523256 | 91 | 0.629714 | import base64
from direct.directnotify.DirectNotifyGlobal import directNotify
import json
import time
from Crypto.Cipher import AES
UNKNOWN = 700
USER = 100
COMMUNITY_MANAGER = 200
MODERATOR = 300
ARTIST = 400
PROGRAMMER = 500
ADMINISTRATOR = 600
SYSTEM_ADMINISTRATOR = 700
class RPCMethod:
def __init__(self, accessLevel=UNKNOWN):
self.accessLevel = accessLevel
def __call__(self, method):
method.accessLevel = self.accessLevel
return method
rpcmethod = RPCMethod
class ToontownRPCHandlerBase:
notify = directNotify.newCategory('ToontownRPCHandlerBase')
def __init__(self, air):
self.air = air
def authenticate(self, token, method):
"""
Ensure the provided token is valid, and meets the access level
requirements of the method.
"""
# First, base64 decode the token:
try:
token = base64.b64decode(token)
except TypeError:
return (-32001, 'Token decode failure')
# Ensure this token is a valid size:
if (not token) or ((len(token) % 16) != 0):
return (-32002, 'Invalid token length')
# Next, decrypt the token using AES-128 in CBC mode:
rpcServerSecret = config.GetString('rpc-server-secret', '6163636f756e7473')
# Ensure that our secret is the correct size:
if len(rpcServerSecret) > AES.block_size:
self.notify.error('rpc-server-secret is too big!')
elif len(rpcServerSecret) < AES.block_size:
self.notify.error('rpc-server-secret is too small!')
# Take the initialization vector off the front of the token:
iv = token[:AES.block_size]
# Truncate the token to get our cipher text:
cipherText = token[AES.block_size:]
# Decrypt!
cipher = AES.new(rpcServerSecret, mode=AES.MODE_CBC, IV=iv)
try:
token = json.loads(cipher.decrypt(cipherText).replace('\x00', ''))
if ('timestamp' not in token) or (not isinstance(token['timestamp'], int)):
raise ValueError
if ('accesslevel' not in token) or (not isinstance(token['accesslevel'], int)):
raise ValueError
except ValueError:
return (-32003, 'Invalid token')
# Next, check if this token has expired:
period = config.GetInt('rpc-token-period', 5)
delta = int(time.time()) - token['timestamp']
if delta > period:
return (-32004, 'Token expired')
if token['accesslevel'] < method.accessLevel:
return (-32005, 'Insufficient access')
| 158 | 2,065 | 99 |
dc8a4c0c98ba3c54cc164d9bbee0155c0196abfc | 901 | py | Python | hedp/eos/ionmix.py | luli/hedp | ab78879106ef2d7b6e54ac6a69d24439ec8c9a8b | [
"CECILL-B"
] | 9 | 2015-04-07T12:45:40.000Z | 2020-10-26T14:40:49.000Z | hedp/eos/ionmix.py | luli/hedp | ab78879106ef2d7b6e54ac6a69d24439ec8c9a8b | [
"CECILL-B"
] | 9 | 2015-10-20T13:01:09.000Z | 2016-09-09T15:24:36.000Z | hedp/eos/ionmix.py | luli/hedp | ab78879106ef2d7b6e54ac6a69d24439ec8c9a8b | [
"CECILL-B"
] | 12 | 2015-12-17T14:24:29.000Z | 2021-04-26T13:42:48.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright CNRS 2012
# Roman Yurchak (LULI)
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software.
import numpy as np
from scipy.constants import N_A
def ionmix_validity(rho, temp, Zbar, Abar):
"""
Returns the distance to the IONMIX EoS validity domain:
if Ion_density < 1e20 (T/Zbar)³ cm⁻³ return 0
else: return orthogonal distance to that validity limit
in the log log space
Parameters
----------
- rho: ndarray: density [g.cm⁻³]
- temp:ndarray: temperature [eV]
- Zbar:ndarray: average ionization
- Abar:ndarray or float: average atomic mass
Returns
-------
- d: ndarray: distance to the validity region in
log log space
"""
return temp >= (rho*N_A*Zbar**3/(1e20*Abar))**(1./3)
| 27.30303 | 72 | 0.642619 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright CNRS 2012
# Roman Yurchak (LULI)
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software.
import numpy as np
from scipy.constants import N_A
def ionmix_validity(rho, temp, Zbar, Abar):
"""
Returns the distance to the IONMIX EoS validity domain:
if Ion_density < 1e20 (T/Zbar)³ cm⁻³ return 0
else: return orthogonal distance to that validity limit
in the log log space
Parameters
----------
- rho: ndarray: density [g.cm⁻³]
- temp:ndarray: temperature [eV]
- Zbar:ndarray: average ionization
- Abar:ndarray or float: average atomic mass
Returns
-------
- d: ndarray: distance to the validity region in
log log space
"""
return temp >= (rho*N_A*Zbar**3/(1e20*Abar))**(1./3)
| 0 | 0 | 0 |
7a50d68592a2409060080bce73ef482f58f014a7 | 17,986 | py | Python | du/trial.py | diogo149/doo | d83a1715fb9d4e5eac9f5d3d384a45cfc26fec2f | [
"MIT"
] | 1 | 2016-11-17T06:34:39.000Z | 2016-11-17T06:34:39.000Z | du/trial.py | diogo149/doo | d83a1715fb9d4e5eac9f5d3d384a45cfc26fec2f | [
"MIT"
] | null | null | null | du/trial.py | diogo149/doo | d83a1715fb9d4e5eac9f5d3d384a45cfc26fec2f | [
"MIT"
] | null | null | null | """
trial is a way of creating a project
features:
- copies the source of the file you're using to run it
(to have a copy of the file)
(WARNING: there can be a race condition if the file changes before the trial
code is run - the earlier you import du, the less likely this is to
occur, due to the cache_inspect function)
- creates a directory where files for the run can be stored without worrying
about collisions
- that directory also has a temporary directory that is cleared out
- allows storing intermediate values, to later be retrieved
- seeds random number generator
- creates a summary file (in trial.yml) with all of the important stored
parameters
WARNING: when canceling a trial run, don't spam CTRL-C, since that can cancel
the clean up operations too
eg.
# in some file:
with run_trial("some_trial_name") as trial:
# printed stuff is stored to _trials_dir_/some_trial_name/1/log.txt
print "Hello world"
# storing values
trial.store_important("foo", 1)
trial.store_important("foo", 2)
trial.get_important("foo") # returns [1, 2]
"""
import os
import sys
import re
import logging
import shutil
import datetime
import inspect
import importlib
import contextlib
from collections import defaultdict
import six
import setproctitle
from . import utils, random_utils, io_utils, yaml_db
def cache_inspect():
"""
NOTE: seems to cache source code, so that future calls to
inspect.getsource return the older sources, rather than the
source when getsource is called
"""
for record in inspect.getouterframes(inspect.currentframe()):
frame = record[0]
module = inspect.getmodule(frame)
if module is not None:
inspect.getsource(module)
try:
cache_inspect()
except Exception as e:
# catch exception for running for REPL
if str(e) != "<module '__main__'> is a built-in module":
raise e
def trial_lock(trial_base_path):
"""
only for making sure no race conditions when adding new iterations
"""
return io_utils.file_lock(os.path.join(trial_base_path, ".lock"))
@contextlib.contextmanager
def create_trial_dir(trial_base_path, iteration_num, replace_strategy):
"""
creates a new directory named 'iteration_num' under 'trial_base_dir',
automatically selecting the next sequential iteration number if
iteration_num is None.
returns iteration_num because it needs to be chosen with the trial locked
(if not specified)
"""
with trial_lock(trial_base_path):
if iteration_num is None:
iteration_num = get_next_iteration_num(trial_base_path)
assert iteration_num > 0 and isinstance(iteration_num, int)
trial_path = path_in_trial_dir(trial_base_path, iteration_num)
# check if already existing, if so handle appropriately
if os.path.exists(trial_path):
if replace_strategy == "force":
replace = True
elif replace_strategy == "ask":
in_str = None
while in_str not in ["y", "n"]:
in_str = six.moves.input(
("%s exists, would you like "
"to overwrite? (y/n) ") % trial_path)
replace = in_str == "y"
elif replace_strategy is None:
replace_strategy = False
else:
raise ValueError("replace strategy %s not found"
% replace_strategy)
if replace:
shutil.rmtree(trial_path)
else:
raise ValueError("Trial already exists: %s" % trial_path)
os.mkdir(trial_path)
return iteration_num
@contextlib.contextmanager
def temporarily_add_file_logger_to(filename, loggers):
"""
adds a FileLogger to each of the given loggers, which causes the
logger to log to that file as well
eg.
>>> with temporarily_add_file_logger_to("log.txt", [logger]):
>>> logger.info("foo")
"""
# create log file
io_utils.guarantee_exists(filename)
# add log handler
file_logger = logging.FileHandler(filename)
try:
for logger in loggers:
file_logger.setFormatter(logger.handlers[0].formatter)
logger.addHandler(file_logger)
yield
finally:
# remove log handler
for logger in loggers:
logger.removeHandler(file_logger)
@contextlib.contextmanager
@contextlib.contextmanager
def _run_trial_internal(trial_name,
iteration_num=None,
description=None,
snippets=None,
trials_dir=utils.config["trial"]["trials_dir"],
loggers="default",
random_seed=42,
trial_runner_string=None,
replace_strategy=None):
"""
trial_name:
name of the trial as a string
iteration_num:
integer of which the iteration in the current trial
description:
any json encodable object (eg. string or list)
snippets:
list of pairs of a snippet name (used to import the snippet) and
a source file used in this trial
"""
# handling default values
if description is None:
description = []
if snippets is None:
snippets = []
# expand the trials_dir
trials_dir = os.path.realpath(trials_dir)
if loggers == "default":
loggers = [utils.DEFAULT_LOGGER,
utils.SIMPLE_LOGGER]
# validation
assert re.match(r'^[A-Za-z0-9_\-]+$', trial_name), trial_name
for snippet_name, snippet_path in snippets:
assert isinstance(snippet_name, str)
assert "." not in snippet_name
assert isinstance(snippet_path, str)
snippet_names = set([snippet[0] for snippet in snippets])
assert len(snippets) == len(snippet_names),\
"Snippet names must be unique"
assert "trial_runner" not in snippet_names
# make trials dir if doesn't exist
io_utils.guarantee_dir_exists(trials_dir)
trial_base_path = os.path.join(trials_dir, trial_name)
# make trial base dir if doesn't exist
io_utils.guarantee_dir_exists(trial_base_path)
# make yaml_db if doesn't exist
io_utils.guarantee_exists(os.path.join(trial_base_path, "trial.yml"))
start_date = datetime.datetime.now()
iteration_num = create_trial_dir(trial_base_path,
iteration_num,
replace_strategy)
path_in_this_trial_dir = utils.partial(path_in_trial_dir,
trial_base_path,
iteration_num)
# file paths
src_path = path_in_this_trial_dir("src")
tmp_path = path_in_this_trial_dir("tmp")
files_path = path_in_this_trial_dir("files")
params_path = path_in_this_trial_dir("params.yml")
log_path = path_in_this_trial_dir("log.txt")
# create directories
for dirname in [src_path,
files_path]:
os.mkdir(dirname)
# copy files
for snippet_name, snippet_path in snippets:
new_snippet_path = path_in_this_trial_dir(
"src", snippet_name + ".py")
shutil.copy(snippet_path, new_snippet_path)
# write down the string to create the trial
if trial_runner_string is not None:
trial_runner_path = path_in_this_trial_dir("src", "trial_runner.py")
assert not os.path.exists(trial_runner_path)
with open(trial_runner_path, 'w') as f:
f.write(trial_runner_string)
# writing description to trial db
with trial_db_iteration_transaction(trial_base_path, iteration_num) as m:
m["description"] = description
# create trial state
trial = TrialState(trial_name=trial_name,
iteration_num=iteration_num,
trials_dir=trials_dir)
utils.simple_info("Running trial %s:%d on pid %d"
% (trial_name, iteration_num, os.getpid()))
try:
proc_title = setproctitle.getproctitle()
setproctitle.setproctitle("%s:%d" % (trial_name, iteration_num))
with random_utils.seed_random(random_seed):
with io_utils.temporary_directory(tmp_path):
with temporarily_add_file_logger_to(log_path, loggers):
# capture stdout and stderr as well as loggers,
# so that all printing gets logged
with io_utils.Tee(log_path,
"a",
"stderr",
auto_flush=True):
with io_utils.Tee(log_path,
"a",
"stdout",
auto_flush=True):
# execute trial
yield trial
finally:
# save params / state to persistent storage
trial.dump()
io_utils.yaml_dump(dict(
trial_name=trial_name,
iteration_num=iteration_num,
snippets=snippets,
description=description,
random_seed=random_seed,
start_date=str(start_date),
end_date=str(datetime.datetime.now())
),
params_path)
setproctitle.setproctitle(proc_title)
def _get_source_of_caller(additional_frames=0):
"""
utility function to get source code of the file calling the function
calling this function (i.e. 2 levels up)
"""
# this might be sketchy using inspect and is not REPL
# friendly
current_frame = inspect.currentframe()
outer_frames = inspect.getouterframes(current_frame)
# drop the first 2 (this function + the calling function)
caller_frame = outer_frames[2 + additional_frames][0]
return inspect.getsource(inspect.getmodule(caller_frame))
def run_trial(*args, _run_trial_additional_frames=0, **kwargs):
"""
wrapper around _run_trial_internal that reads the file that this function
was called from and saves its contents as a string
see docstring of _run_trial_internal for arguments
"""
assert "trial_runner_string" not in kwargs
runner_str = _get_source_of_caller(_run_trial_additional_frames)
# just a sanity check that it worked
assert "run_trial(" in runner_str
return _run_trial_internal(*args, trial_runner_string=runner_str, **kwargs)
def run_trial_function(trial_function, args=None, kwargs=None, **_kwargs):
"""
wrapper around _run_trial_internal that saves the source code of the given
function as a string, and calls the function with a TrialState object
args:
positional arguments to pass into trial_function
kwargs:
keyword arguments to pass into trial_function
see docstring of _run_trial_internal for arguments
"""
assert "trial_runner_string" not in _kwargs
if args is None:
args = ()
if kwargs is None:
kwargs = {}
func_str = "".join(inspect.getsourcelines(trial_function)[0])
with _run_trial_internal(trial_runner_string=func_str,
**_kwargs) as trial:
return trial_function(trial, *args, **kwargs)
| 34.992218 | 79 | 0.609752 | """
trial is a way of creating a project
features:
- copies the source of the file you're using to run it
(to have a copy of the file)
(WARNING: there can be a race condition if the file changes before the trial
code is run - the earlier you import du, the less likely this is to
occur, due to the cache_inspect function)
- creates a directory where files for the run can be stored without worrying
about collisions
- that directory also has a temporary directory that is cleared out
- allows storing intermediate values, to later be retrieved
- seeds random number generator
- creates a summary file (in trial.yml) with all of the important stored
parameters
WARNING: when canceling a trial run, don't spam CTRL-C, since that can cancel
the clean up operations too
eg.
# in some file:
with run_trial("some_trial_name") as trial:
# printed stuff is stored to _trials_dir_/some_trial_name/1/log.txt
print "Hello world"
# storing values
trial.store_important("foo", 1)
trial.store_important("foo", 2)
trial.get_important("foo") # returns [1, 2]
"""
import os
import sys
import re
import logging
import shutil
import datetime
import inspect
import importlib
import contextlib
from collections import defaultdict
import six
import setproctitle
from . import utils, random_utils, io_utils, yaml_db
def cache_inspect():
"""
NOTE: seems to cache source code, so that future calls to
inspect.getsource return the older sources, rather than the
source when getsource is called
"""
for record in inspect.getouterframes(inspect.currentframe()):
frame = record[0]
module = inspect.getmodule(frame)
if module is not None:
inspect.getsource(module)
try:
cache_inspect()
except Exception as e:
# catch exception for running for REPL
if str(e) != "<module '__main__'> is a built-in module":
raise e
def get_next_iteration_num(path):
in_path = os.listdir(path)
in_path_nums = list(filter(lambda x: re.match(r"\d+", x), in_path))
if len(in_path_nums) == 0:
return 1
else:
existing_iterations = map(int, in_path_nums)
return max(existing_iterations) + 1
def trial_lock(trial_base_path):
"""
only for making sure no race conditions when adding new iterations
"""
return io_utils.file_lock(os.path.join(trial_base_path, ".lock"))
def trial_db_transaction(trial_base_path):
# NOTE: this returns a context manager, since
# yaml_db.db_transaction is a context manager
db_path = os.path.join(trial_base_path, "trial.yml")
return yaml_db.db_transaction(db_path)
@contextlib.contextmanager
def trial_db_iteration_transaction(trial_base_path, iteration_num):
assert isinstance(iteration_num, int)
with trial_db_transaction(trial_base_path) as db:
iter_maps = filter(lambda x: x["iteration_num"] == iteration_num, db)
iter_maps = list(iter_maps) # eagerly evaluate
assert len(iter_maps) <= 1
if len(iter_maps) == 0:
iter_map = dict(
iteration_num=iteration_num,
)
db.append(iter_map)
elif len(iter_maps) == 1:
iter_map = iter_maps[0]
else:
raise ValueError("Wrong number (%d) of maps with the same "
"iteration number: %s"
% (len(iter_maps), iter_maps))
yield iter_map
def path_in_trial_dir(trial_base_path, iteration_num, *ps):
assert isinstance(iteration_num, int)
return os.path.join(trial_base_path, str(iteration_num), *ps)
def create_trial_dir(trial_base_path, iteration_num, replace_strategy):
"""
creates a new directory named 'iteration_num' under 'trial_base_dir',
automatically selecting the next sequential iteration number if
iteration_num is None.
returns iteration_num because it needs to be chosen with the trial locked
(if not specified)
"""
with trial_lock(trial_base_path):
if iteration_num is None:
iteration_num = get_next_iteration_num(trial_base_path)
assert iteration_num > 0 and isinstance(iteration_num, int)
trial_path = path_in_trial_dir(trial_base_path, iteration_num)
# check if already existing, if so handle appropriately
if os.path.exists(trial_path):
if replace_strategy == "force":
replace = True
elif replace_strategy == "ask":
in_str = None
while in_str not in ["y", "n"]:
in_str = six.moves.input(
("%s exists, would you like "
"to overwrite? (y/n) ") % trial_path)
replace = in_str == "y"
elif replace_strategy is None:
replace_strategy = False
else:
raise ValueError("replace strategy %s not found"
% replace_strategy)
if replace:
shutil.rmtree(trial_path)
else:
raise ValueError("Trial already exists: %s" % trial_path)
os.mkdir(trial_path)
return iteration_num
@contextlib.contextmanager
def temporarily_add_file_logger_to(filename, loggers):
"""
adds a FileLogger to each of the given loggers, which causes the
logger to log to that file as well
eg.
>>> with temporarily_add_file_logger_to("log.txt", [logger]):
>>> logger.info("foo")
"""
# create log file
io_utils.guarantee_exists(filename)
# add log handler
file_logger = logging.FileHandler(filename)
try:
for logger in loggers:
file_logger.setFormatter(logger.handlers[0].formatter)
logger.addHandler(file_logger)
yield
finally:
# remove log handler
for logger in loggers:
logger.removeHandler(file_logger)
@contextlib.contextmanager
def temporarily_add_to_path(directory_path):
if directory_path in sys.path:
yield
else:
sys.path.insert(0, directory_path)
try:
yield
finally:
sys.path.remove(directory_path)
class TrialState(object):
def __init__(self,
trial_name,
iteration_num,
trials_dir=None):
"""
- container for global state of the current trial
- eg. trial specific directory for temporary files
- can store parameters for the current trial, as well as output values
"""
if trials_dir is None:
trials_dir = utils.config["trial"]["trials_dir"]
self.trials_dir = trials_dir
self.trial_name = trial_name
self.iteration_num = iteration_num
self.trial_base_path_ = os.path.join(trials_dir, trial_name)
self.store_file_ = path_in_trial_dir(self.trial_base_path_,
self.iteration_num,
"stored.yml")
self.src_path_ = path_in_trial_dir(self.trial_base_path_,
self.iteration_num,
"src")
self.files_path_ = path_in_trial_dir(self.trial_base_path_,
self.iteration_num,
"files")
self.tmp_path_ = path_in_trial_dir(self.trial_base_path_,
self.iteration_num,
"tmp")
self.state_ = dict(
important=defaultdict(list),
unimportant=defaultdict(list),
)
def tmp_path(self, *ps):
"""
creates a file path in the experiment's tmp directory
"""
assert os.path.isdir(self.tmp_path_)
return os.path.join(self.tmp_path_, *ps)
def file_path(self, *ps):
"""
creates a file path in the experiment's file directory
"""
assert ps
return os.path.join(self.files_path_, *ps)
def src_path(self, *ps):
"""
creates a file path in the experiment's src directory
"""
assert ps
return os.path.join(self.src_path_, *ps)
def module(self, module_name):
import warnings
warnings.warn("TrialState.module is deprecated, "
"use TrialState.load_module instead")
return self.load_module(module_name)
def load_module(self, module_name):
with temporarily_add_to_path(self.src_path_):
utils.simple_debug("(Trial:%s:%d) loading module: %s",
self.trial_name,
self.iteration_num,
module_name)
return importlib.import_module(module_name)
def store(self, key, value, important=False, silent=False):
if not silent:
utils.simple_debug("(Trial:%s:%d) %s = %s",
self.trial_name,
self.iteration_num,
key,
value)
self.state_[
"important" if important else "unimportant"
][key].append(value)
def store_important(self, key, value, **kwargs):
return self.store(key, value, important=True, **kwargs)
def get(self, key, important=False):
return self.state_["important" if important else "unimportant"][key]
def get_important(self, key):
return self.get(key, important=True)
def delete(self):
"""
removes the current trial from both the yaml_db and deletes the trial
directory
"""
# delete trial from yaml_db
with trial_db_transaction(self.trial_base_path_) as db:
indices = []
for idx, m in enumerate(db):
if m["iteration_num"] == self.iteration_num:
indices.append(idx)
assert len(indices) == 1
db.pop(indices[0])
# delete trial directory
shutil.rmtree(path_in_trial_dir(self.trial_base_path_,
self.iteration_num))
def dump(self):
"""
saves state to trial (both stored.yml and trial.yml)
"""
# dump state to stored.yml
io_utils.yaml_dump(self.state_, self.store_file_)
# commit to trial.yml yaml_db
self.commit()
def load(self):
"""
NOTE: this is somewhat inconsistent with TrialState knowing where/how
to load itself, while run_trial knows how to save it
(so the logic of where the file is is in 2 different places)
"""
state = io_utils.yaml_load(self.store_file_)
# NOTE: doesn't load as defaultdict's
self.state_ = state
def commit(self):
with trial_db_iteration_transaction(self.trial_base_path_,
self.iteration_num) as m:
m["important"] = self.state_["important"]
@contextlib.contextmanager
def _run_trial_internal(trial_name,
iteration_num=None,
description=None,
snippets=None,
trials_dir=utils.config["trial"]["trials_dir"],
loggers="default",
random_seed=42,
trial_runner_string=None,
replace_strategy=None):
"""
trial_name:
name of the trial as a string
iteration_num:
integer of which the iteration in the current trial
description:
any json encodable object (eg. string or list)
snippets:
list of pairs of a snippet name (used to import the snippet) and
a source file used in this trial
"""
# handling default values
if description is None:
description = []
if snippets is None:
snippets = []
# expand the trials_dir
trials_dir = os.path.realpath(trials_dir)
if loggers == "default":
loggers = [utils.DEFAULT_LOGGER,
utils.SIMPLE_LOGGER]
# validation
assert re.match(r'^[A-Za-z0-9_\-]+$', trial_name), trial_name
for snippet_name, snippet_path in snippets:
assert isinstance(snippet_name, str)
assert "." not in snippet_name
assert isinstance(snippet_path, str)
snippet_names = set([snippet[0] for snippet in snippets])
assert len(snippets) == len(snippet_names),\
"Snippet names must be unique"
assert "trial_runner" not in snippet_names
# make trials dir if doesn't exist
io_utils.guarantee_dir_exists(trials_dir)
trial_base_path = os.path.join(trials_dir, trial_name)
# make trial base dir if doesn't exist
io_utils.guarantee_dir_exists(trial_base_path)
# make yaml_db if doesn't exist
io_utils.guarantee_exists(os.path.join(trial_base_path, "trial.yml"))
start_date = datetime.datetime.now()
iteration_num = create_trial_dir(trial_base_path,
iteration_num,
replace_strategy)
path_in_this_trial_dir = utils.partial(path_in_trial_dir,
trial_base_path,
iteration_num)
# file paths
src_path = path_in_this_trial_dir("src")
tmp_path = path_in_this_trial_dir("tmp")
files_path = path_in_this_trial_dir("files")
params_path = path_in_this_trial_dir("params.yml")
log_path = path_in_this_trial_dir("log.txt")
# create directories
for dirname in [src_path,
files_path]:
os.mkdir(dirname)
# copy files
for snippet_name, snippet_path in snippets:
new_snippet_path = path_in_this_trial_dir(
"src", snippet_name + ".py")
shutil.copy(snippet_path, new_snippet_path)
# write down the string to create the trial
if trial_runner_string is not None:
trial_runner_path = path_in_this_trial_dir("src", "trial_runner.py")
assert not os.path.exists(trial_runner_path)
with open(trial_runner_path, 'w') as f:
f.write(trial_runner_string)
# writing description to trial db
with trial_db_iteration_transaction(trial_base_path, iteration_num) as m:
m["description"] = description
# create trial state
trial = TrialState(trial_name=trial_name,
iteration_num=iteration_num,
trials_dir=trials_dir)
utils.simple_info("Running trial %s:%d on pid %d"
% (trial_name, iteration_num, os.getpid()))
try:
proc_title = setproctitle.getproctitle()
setproctitle.setproctitle("%s:%d" % (trial_name, iteration_num))
with random_utils.seed_random(random_seed):
with io_utils.temporary_directory(tmp_path):
with temporarily_add_file_logger_to(log_path, loggers):
# capture stdout and stderr as well as loggers,
# so that all printing gets logged
with io_utils.Tee(log_path,
"a",
"stderr",
auto_flush=True):
with io_utils.Tee(log_path,
"a",
"stdout",
auto_flush=True):
# execute trial
yield trial
finally:
# save params / state to persistent storage
trial.dump()
io_utils.yaml_dump(dict(
trial_name=trial_name,
iteration_num=iteration_num,
snippets=snippets,
description=description,
random_seed=random_seed,
start_date=str(start_date),
end_date=str(datetime.datetime.now())
),
params_path)
setproctitle.setproctitle(proc_title)
def _get_source_of_caller(additional_frames=0):
"""
utility function to get source code of the file calling the function
calling this function (i.e. 2 levels up)
"""
# this might be sketchy using inspect and is not REPL
# friendly
current_frame = inspect.currentframe()
outer_frames = inspect.getouterframes(current_frame)
# drop the first 2 (this function + the calling function)
caller_frame = outer_frames[2 + additional_frames][0]
return inspect.getsource(inspect.getmodule(caller_frame))
def run_trial(*args, _run_trial_additional_frames=0, **kwargs):
"""
wrapper around _run_trial_internal that reads the file that this function
was called from and saves its contents as a string
see docstring of _run_trial_internal for arguments
"""
assert "trial_runner_string" not in kwargs
runner_str = _get_source_of_caller(_run_trial_additional_frames)
# just a sanity check that it worked
assert "run_trial(" in runner_str
return _run_trial_internal(*args, trial_runner_string=runner_str, **kwargs)
def run_trial_function(trial_function, args=None, kwargs=None, **_kwargs):
"""
wrapper around _run_trial_internal that saves the source code of the given
function as a string, and calls the function with a TrialState object
args:
positional arguments to pass into trial_function
kwargs:
keyword arguments to pass into trial_function
see docstring of _run_trial_internal for arguments
"""
assert "trial_runner_string" not in _kwargs
if args is None:
args = ()
if kwargs is None:
kwargs = {}
func_str = "".join(inspect.getsourcelines(trial_function)[0])
with _run_trial_internal(trial_runner_string=func_str,
**_kwargs) as trial:
return trial_function(trial, *args, **kwargs)
| 2,958 | 3,527 | 136 |
56f7f2d6cb9004200230d63ce87f7e661b04685d | 938 | py | Python | raytracing/examples/invariantAndEfficiency.py | himbeles/RayTracing | 3ea14d0df25c46b9749a344723d29cc29436e539 | [
"MIT"
] | 91 | 2019-01-31T17:25:09.000Z | 2022-03-19T11:03:05.000Z | raytracing/examples/invariantAndEfficiency.py | himbeles/RayTracing | 3ea14d0df25c46b9749a344723d29cc29436e539 | [
"MIT"
] | 267 | 2019-02-02T18:53:55.000Z | 2022-01-11T11:39:25.000Z | raytracing/examples/invariantAndEfficiency.py | himbeles/RayTracing | 3ea14d0df25c46b9749a344723d29cc29436e539 | [
"MIT"
] | 28 | 2019-01-30T15:40:40.000Z | 2022-03-10T22:36:53.000Z | import envexamples # modifies path
from raytracing import *
"""
The Lagrange invariant is a constant defining the collection efficiency of an optical system. The Lagrange
invariant is calculated using the principal and axial rays, whether the optical invariant is calculated with
another combination of rays. This code uses the optical invariant to characterize the ray transmission in a
4f system and shows that the optical invariant is greatly affected by the used optics. Indeed, changing the
diameter of the first lens affects the number of detected rays at the imaged plane.
"""
path = ImagingPath()
path.design(fontScale=1.7)
path.append(System4f(f1=10, diameter1=25.4, f2=20, diameter2=25.4))
path.reportEfficiency()
path.display(interactive=False)
path2 = ImagingPath()
path2.design(fontScale=1.5)
path2.append(System4f(f1=10, diameter1=12.7, f2=20, diameter2=25.4))
path2.reportEfficiency()
path2.display(interactive=False) | 42.636364 | 108 | 0.797441 | import envexamples # modifies path
from raytracing import *
"""
The Lagrange invariant is a constant defining the collection efficiency of an optical system. The Lagrange
invariant is calculated using the principal and axial rays, whether the optical invariant is calculated with
another combination of rays. This code uses the optical invariant to characterize the ray transmission in a
4f system and shows that the optical invariant is greatly affected by the used optics. Indeed, changing the
diameter of the first lens affects the number of detected rays at the imaged plane.
"""
path = ImagingPath()
path.design(fontScale=1.7)
path.append(System4f(f1=10, diameter1=25.4, f2=20, diameter2=25.4))
path.reportEfficiency()
path.display(interactive=False)
path2 = ImagingPath()
path2.design(fontScale=1.5)
path2.append(System4f(f1=10, diameter1=12.7, f2=20, diameter2=25.4))
path2.reportEfficiency()
path2.display(interactive=False) | 0 | 0 | 0 |
13df5f2771a7524de6b7fc38586380e421f8cad0 | 215 | py | Python | src/result_raw.py | Aculisme/zero_algorithms | 3b5c80bdb663dade07578e010aeffd3aa501fdf1 | [
"MIT"
] | 4 | 2019-06-30T15:30:18.000Z | 2019-06-30T15:36:30.000Z | src/result_raw.py | Aculisme/zero_algorithms | 3b5c80bdb663dade07578e010aeffd3aa501fdf1 | [
"MIT"
] | null | null | null | src/result_raw.py | Aculisme/zero_algorithms | 3b5c80bdb663dade07578e010aeffd3aa501fdf1 | [
"MIT"
] | null | null | null | from .methods import Bisection, Newton, Secant
from .function_examples import f_root
if __name__ == '__main__':
# change the method and function as desired
v = Newton(**f_root).solve(**f_root)
print(v) | 26.875 | 47 | 0.716279 | from .methods import Bisection, Newton, Secant
from .function_examples import f_root
if __name__ == '__main__':
# change the method and function as desired
v = Newton(**f_root).solve(**f_root)
print(v) | 0 | 0 | 0 |
cfcab80f6d377877211013744990692d255ba505 | 204 | py | Python | nonebug/mixin/__init__.py | nonebot/nonebug | 40fcd4f3eff8f4b2118e95938fabc3d77ff6819c | [
"MIT"
] | 9 | 2021-10-09T05:19:13.000Z | 2022-03-18T15:18:00.000Z | nonebug/mixin/__init__.py | AkiraXie/nonebug | 5556f94f3e85a26602fc015013e9fbdda07f8c71 | [
"MIT"
] | 2 | 2021-11-23T06:29:20.000Z | 2022-03-18T15:51:51.000Z | nonebug/mixin/__init__.py | AkiraXie/nonebug | 5556f94f3e85a26602fc015013e9fbdda07f8c71 | [
"MIT"
] | 1 | 2022-02-19T08:57:50.000Z | 2022-02-19T08:57:50.000Z | from .driver import DriverMixin as DriverMixin
from .process import ProcessMixin as ProcessMixin
from .call_api import CallApiMixin as CallApiMixin
from .dependent import DependentMixin as DependentMixin
| 40.8 | 55 | 0.862745 | from .driver import DriverMixin as DriverMixin
from .process import ProcessMixin as ProcessMixin
from .call_api import CallApiMixin as CallApiMixin
from .dependent import DependentMixin as DependentMixin
| 0 | 0 | 0 |
ad1e8496267cbf9a4e1552355549ca26ab87aece | 32,148 | py | Python | openstackinabox/tests/models/keystone/test_model.py | BenjamenMeyer/openstackinabox | b5097695719b818dd06e3773899f80a15e7e71c1 | [
"Apache-2.0"
] | 1 | 2017-11-19T20:31:48.000Z | 2017-11-19T20:31:48.000Z | openstackinabox/tests/models/keystone/test_model.py | TestInABox/openstackinabox | 00dcac601d14e1cfc240840dd92895ee322caf96 | [
"Apache-2.0"
] | 38 | 2016-05-05T18:03:21.000Z | 2020-04-11T03:33:01.000Z | openstackinabox/tests/models/keystone/test_model.py | BenjamenMeyer/openstackinabox | b5097695719b818dd06e3773899f80a15e7e71c1 | [
"Apache-2.0"
] | 1 | 2015-05-28T14:53:46.000Z | 2015-05-28T14:53:46.000Z | import mock
import ddt
import six
from openstackinabox.tests.base import TestBase
from openstackinabox.models.keystone import exceptions
from openstackinabox.models.keystone.model import (
schema,
KeystoneModel
)
@ddt.ddt
@ddt.ddt
| 33.417879 | 79 | 0.542678 | import mock
import ddt
import six
from openstackinabox.tests.base import TestBase
from openstackinabox.models.keystone import exceptions
from openstackinabox.models.keystone.model import (
schema,
KeystoneModel
)
@ddt.ddt
class TestKeystoneModel(TestBase):
def setUp(self):
super(TestKeystoneModel, self).setUp(initialize=False)
self.model = KeystoneModel
self.db = self.master_model.database
def tearDown(self):
super(TestKeystoneModel, self).tearDown()
def test_initialize_db_schema(self):
db_cursor = mock.MagicMock()
db_execute = mock.MagicMock()
db_commit = mock.MagicMock()
db_instance = mock.MagicMock()
db_instance.cursor.return_value = db_cursor
db_instance.commit = db_commit
db_cursor.execute = db_execute
self.model.initialize_db_schema(db_instance)
self.assertTrue(db_instance.cursor.called)
self.assertTrue(db_execute.called)
self.assertTrue(db_commit.called)
self.assertEqual(db_execute.call_count, len(schema))
for s in schema:
db_execute.assert_any_call(s)
def test_get_child_models(self):
master = 'alpha'
db = 'omega'
child_models = self.model.get_child_models(master, db)
self.assertEqual(len(child_models), len(self.model.CHILD_MODELS))
def assert_has_instance(model_name, model_class):
for cm_name, cm_instance in six.iteritems(child_models):
if isinstance(cm_instance, model_class):
return
self.assertFalse(
True,
msg="instance of {0} ({1}) not in list".format(
model_name,
model_class
)
)
for child_model_name, child_model_type in six.iteritems(
self.model.CHILD_MODELS
):
assert_has_instance(child_model_name, child_model_type)
def test_initialization(self):
self.assertIsNone(self.master_model.roles.admin_role_id)
self.assertIsNone(self.master_model.roles.viewer_role_id)
self.assertIsNone(self.master_model.tenants.admin_tenant_id)
self.assertIsNone(self.master_model.users.admin_user_id)
self.assertIsNone(self.master_model.tokens.admin_token)
self.master_model.init_database()
self.assertIsNotNone(self.master_model.roles.admin_role_id)
self.assertIsNotNone(self.master_model.roles.viewer_role_id)
self.assertIsNotNone(self.master_model.tenants.admin_tenant_id)
self.assertIsNotNone(self.master_model.users.admin_user_id)
self.assertIsNotNone(self.master_model.tokens.admin_token)
token_data = self.master_model.tokens.get_by_user_id(
user_id=self.master_model.users.admin_user_id
)
self.assertEqual(
token_data['tenant_id'],
self.master_model.tenants.admin_tenant_id
)
self.assertEqual(
token_data['user_id'],
self.master_model.users.admin_user_id
)
self.assertEqual(
token_data['token'],
self.master_model.tokens.admin_token
)
self.assertFalse(token_data['revoked'])
def test_properties(self):
self.master_model.init_database()
self.assertEqual(
self.master_model.child_models['users'],
self.master_model.users
)
self.assertEqual(
self.master_model.child_models['tenants'],
self.master_model.tenants
)
self.assertEqual(
self.master_model.child_models['tokens'],
self.master_model.tokens
)
self.assertEqual(
self.master_model.child_models['roles'],
self.master_model.roles
)
self.assertEqual(
self.master_model.child_models['services'],
self.master_model.services
)
self.assertEqual(
self.master_model.child_models['endpoints'],
self.master_model.endpoints
)
@ddt.data(
0,
1
)
def test_validate_token_admin(self, extra_role_count):
self.master_model.init_database()
with self.assertRaises(exceptions.KeystoneInvalidTokenError):
self.master_model.validate_token_admin('foobar')
tenant_id = self.master_model.tenants.add(
tenant_name='foo',
description='bar',
enabled=True
)
user_id = self.master_model.users.add(
tenant_id=tenant_id,
username='bar',
email='foo@bar',
password='bar',
apikey='foo',
enabled=True
)
self.master_model.tokens.add(
tenant_id=tenant_id,
user_id=user_id,
token='foobar'
)
with self.assertRaises(exceptions.KeystoneInvalidTokenError):
self.master_model.validate_token_admin('foobar')
role_names = [
'role_{0}'.format(x)
for x in range(extra_role_count)
]
role_data = [
{
'name': role_name,
'id': self.master_model.roles.add(role_name)
}
for role_name in role_names
]
for role in role_data:
self.master_model.roles.add_user_role_by_id(
tenant_id=tenant_id,
user_id=user_id,
role_id=role['id']
)
self.master_model.roles.add_user_role_by_id(
tenant_id=tenant_id,
user_id=user_id,
role_id=self.master_model.roles.admin_role_id
)
validation_user_data = self.master_model.validate_token_admin('foobar')
self.assertEqual(validation_user_data['tenantid'], tenant_id)
self.assertEqual(validation_user_data['userid'], user_id)
self.assertEqual(validation_user_data['token'], 'foobar')
def test_validate_token_service_admin(self):
self.master_model.init_database()
tenant_id = self.master_model.tenants.add(
tenant_name='foo',
description='bar',
enabled=True
)
user_id = self.master_model.users.add(
tenant_id=tenant_id,
username='bar',
email='foo@bar',
password='bar',
apikey='foo',
enabled=True
)
self.master_model.tokens.add(
tenant_id=tenant_id,
user_id=user_id,
token='foobar'
)
self.master_model.roles.add_user_role_by_id(
tenant_id=tenant_id,
user_id=user_id,
role_id=self.master_model.roles.admin_role_id
)
with self.assertRaises(exceptions.KeystoneInvalidTokenError):
self.master_model.validate_token_service_admin('foobar')
user_data = self.master_model.validate_token_service_admin(
self.master_model.tokens.admin_token
)
self.assertEqual(
user_data['tenantid'],
self.master_model.tenants.admin_tenant_id
)
self.assertEqual(
user_data['userid'],
self.master_model.users.admin_user_id
)
self.assertEqual(
user_data['token'],
self.master_model.tokens.admin_token
)
@ddt.ddt
class TestKeystoneModelServiceCatalog(TestBase):
def setUp(self):
super(TestKeystoneModelServiceCatalog, self).setUp(initialize=False)
self.model = KeystoneModel
self.db = self.master_model.database
self.master_model.init_database()
self.token = 'f1gur3f0ll0w$f4$h10n'
self.tenant_info = {
'name': 'foo',
'description': 'bar',
'enabled': True
}
self.user_info = {
'username': 'bar',
'email': 'foo@bar',
'password': 'b4R',
'apikey': 'foo',
'enabled': True
}
self.tenant_id = self.master_model.tenants.add(
tenant_name=self.tenant_info['name'],
description=self.tenant_info['description'],
enabled=self.tenant_info['enabled']
)
self.user_id = self.master_model.users.add(
tenant_id=self.tenant_id,
username=self.user_info['username'],
email=self.user_info['email'],
password=self.user_info['password'],
apikey=self.user_info['apikey'],
enabled=self.user_info['enabled']
)
self.master_model.tokens.add(
tenant_id=self.tenant_id,
user_id=self.user_id,
token=self.token
)
self.user_data = self.master_model.users.get_by_id(
tenant_id=self.tenant_id,
user_id=self.user_id
)
self.token_data = self.master_model.tokens.validate_token(
self.token
)
def tearDown(self):
super(TestKeystoneModelServiceCatalog, self).tearDown()
def generate_roles(self, role_count):
role_names = [
'role_{0}'.format(x)
for x in range(role_count)
]
role_data = [
{
'name': role_name,
'id': self.master_model.roles.add(role_name)
}
for role_name in role_names
]
for role in role_data:
self.master_model.roles.add_user_role_by_id(
tenant_id=self.tenant_id,
user_id=self.user_id,
role_id=role['id']
)
return (
role_names,
role_data,
)
def generate_services(
self, service_count, endpoint_count, endpoint_url_count,
has_region=True, has_version_info=True, has_version_list=True,
has_version_id=True
):
services = {
'service_{0}'.format(sn): {
'description': 'test service {0}'.format(sn),
'endpoints': [
{
'name': 'endpoint_{0}'.format(epn),
'region': 'r{0}'.format(epn) if has_region else None,
'version_info': (
'version.info' if has_version_info else None
),
'version_list': (
'version.list' if has_version_list else None
),
'version_id': str(epn) if has_version_id else None,
'urls': [
{
'name': 'url_{0}'.format(urln),
'url': 'ur.l/{0}'.format(urln)
}
for urln in range(endpoint_url_count)
]
}
for epn in range(endpoint_count)
]
}
for sn in range(service_count)
}
for service_name, service_info in six.iteritems(services):
services[service_name]['id'] = self.master_model.services.add(
service_name,
service_info['description']
)
for endpoint_info in service_info['endpoints']:
endpoint_info['id'] = self.master_model.endpoints.add(
services[service_name]['id'],
endpoint_info['region'],
endpoint_info['version_info'],
endpoint_info['version_list'],
endpoint_info['version_id']
)
for endpoint_url_info in endpoint_info['urls']:
endpoint_url_info['id'] = (
self.master_model.endpoints.add_url(
endpoint_info['id'],
endpoint_url_info['name'],
endpoint_url_info['url']
)
)
return services
def check_service_catalog_auth_section(self, auth_entry):
self.assertEqual(auth_entry['id'], self.token_data['token'])
self.assertEqual(auth_entry['expires'], self.token_data['expires'])
self.assertEqual(auth_entry['tenant']['id'], self.tenant_id)
self.assertEqual(
auth_entry['tenant']['name'], self.user_data['username']
)
def check_service_catalog_user_entry(
self, role_count, role_names, role_data, user_entry
):
self.assertEqual(user_entry['id'], self.user_id)
self.assertEqual(user_entry['name'], self.user_data['username'])
self.assertEqual(len(user_entry['roles']), role_count)
def assertRoleInList(role_id, role_name):
for role in role_data:
if role['id'] == role_id and role['name'] == role_name:
# found it
return
# failed to find it, so assert
self.assertFalse(
True,
msg=(
'Unable to find role ({0} - {1}) in role_data'.format(
role_id,
role_name
)
)
)
for role_entry in user_entry['roles']:
assertRoleInList(
role_entry['id'],
role_entry['name']
)
def check_service_catalog_services(self, services, services_entries):
self.assertEqual(len(services), len(services_entries))
for service_info in services_entries:
self.assertIn(service_info['name'], services)
self.assertEqual(
service_info['type'],
services[service_info['name']]['description']
)
self.assertEqual(
len(service_info['endpoints']),
len(services[service_info['name']]['endpoints'])
)
for endpoint_info in service_info['endpoints']:
found_endpoint = False
for endpoint_data in (
services[service_info['name']]['endpoints']
):
if (
endpoint_info['region'] == endpoint_data['region'] and
endpoint_info['versionId'] == endpoint_data[
'version_id'] and
endpoint_info['versionList'] == endpoint_data[
'version_list'] and
endpoint_info['versionInfo'] == endpoint_data[
'version_info']
):
for url_data in endpoint_data['urls']:
self.assertIn(url_data['name'], endpoint_info)
self.assertEqual(
url_data['url'],
endpoint_info[url_data['name']]
)
found_endpoint = True
self.assertTrue(
found_endpoint,
msg=(
"Unable to find endpoint data: {0}, {1}".format(
endpoint_data,
endpoint_info
)
)
)
def check_service_catalog(
self, role_count, role_names, role_data, services, service_catalog,
):
self.check_service_catalog_auth_section(
service_catalog['token']
)
self.check_service_catalog_user_entry(
role_count, role_names, role_data, service_catalog['user']
)
self.check_service_catalog_services(
services, service_catalog['serviceCatalog']
)
def test_service_catalog_auth_entry(self):
self.assertEqual(self.token_data['token'], self.token)
auth_entry = self.master_model.get_auth_token_entry(
self.token_data,
self.user_data
)
self.check_service_catalog_auth_section(auth_entry)
@ddt.data(
0,
1,
10,
20
)
def test_service_catalog_user_entry(self, role_count):
role_names, role_data = self.generate_roles(role_count)
user_entry = self.master_model.get_auth_user_entry(
self.user_data
)
self.check_service_catalog_user_entry(
role_count, role_names, role_data, user_entry
)
@ddt.data(
(0, 0, 0, True, True, True, True),
(1, 0, 0, True, True, True, True),
(1, 1, 0, True, True, True, True),
(5, 4, 0, True, True, True, True),
(5, 10, 0, True, True, True, True),
(0, 0, 1, True, True, True, True),
(1, 0, 1, True, True, True, True),
(1, 1, 1, True, True, True, True),
(5, 4, 1, True, True, True, True),
(5, 10, 1, True, True, True, True),
(1, 3, 2, False, True, True, True),
(1, 3, 2, True, False, True, True),
(1, 3, 2, True, True, False, True),
# TODO: Fix the below test cases
# (1, 3, 2, True, True, True, False),
# (1, 3, 2, False, False, False, False),
)
@ddt.unpack
def test_service_catalog_services_entry(
self, service_count, endpoint_count, endpoint_url_count,
has_region, has_version_info, has_version_list, has_version_id
):
services = self.generate_services(
service_count, endpoint_count, endpoint_url_count,
has_region, has_version_info, has_version_list, has_version_id
)
services_entries = self.master_model.get_auth_service_catalog(
self.user_data
)
self.check_service_catalog_services(services, services_entries)
@ddt.data(
(0, 0, 0, 0),
(1, 1, 1, 1),
(5, 10, 3, 4),
(2, 20, 15, 10)
)
@ddt.unpack
def test_service_catalog_services_entry_2(
self, role_count, service_count, endpoint_count, endpoint_url_count
):
role_names, role_data = self.generate_roles(role_count)
services = self.generate_services(
service_count, endpoint_count, endpoint_url_count
)
service_catalog = self.master_model.get_service_catalog(
self.token_data,
self.user_data
)
self.check_service_catalog(
role_count, role_names, role_data, services, service_catalog
)
def test_password_auth_failures(self):
with self.assertRaises(exceptions.KeystoneUserError):
password_data = {
'username': '43failme',
'password': self.user_info['password']
}
self.master_model.password_authenticate(
password_data
)
with self.assertRaises(exceptions.KeystoneUserError):
password_data = {
'username': self.user_info['username'],
'password': '$$$$'
}
self.master_model.password_authenticate(
password_data
)
with self.assertRaises(exceptions.KeystoneUserInvalidPasswordError):
password_data = {
'username': self.user_info['username'],
'password': self.user_info['password'] + 'a'
}
self.master_model.password_authenticate(
password_data
)
with self.assertRaises(exceptions.KeystoneUnknownUserError):
password_data = {
'username': self.user_info['username'] + 'a',
'password': self.user_info['password']
}
self.master_model.password_authenticate(
password_data
)
with self.assertRaises(exceptions.KeystoneUserInvalidPasswordError):
password_data = {
'username': self.user_info['username'],
'password': self.user_info['password'] + 'a'
}
self.master_model.password_authenticate(
password_data
)
with self.assertRaises(exceptions.KeystoneDisabledUserError):
self.master_model.users.update_by_id(
tenant_id=self.tenant_id,
user_id=self.user_id,
email=self.user_info['email'],
password=self.user_info['password'],
apikey=self.user_info['apikey'],
enabled=False
)
password_data = {
'username': self.user_info['username'],
'password': self.user_info['password']
}
self.master_model.password_authenticate(
password_data
)
self.master_model.users.update_by_id(
tenant_id=self.tenant_id,
user_id=self.user_id,
email=self.user_info['email'],
password=self.user_info['password'],
apikey=self.user_info['apikey'],
enabled=True
)
@ddt.data(
(0, 0, 0, 0),
(1, 1, 1, 1),
(5, 10, 3, 4),
(2, 20, 15, 10)
)
@ddt.unpack
def test_password_auth(
self, role_count, service_count, endpoint_count, endpoint_url_count
):
role_names, role_data = self.generate_roles(role_count)
services = self.generate_services(
service_count, endpoint_count, endpoint_url_count
)
password_data = {
'username': self.user_info['username'],
'password': self.user_info['password']
}
service_catalog = self.master_model.password_authenticate(
password_data
)
self.check_service_catalog(
role_count, role_names, role_data, services, service_catalog
)
def test_apikey_auth_failures(self):
with self.assertRaises(exceptions.KeystoneUserError):
apikey_data = {
'username': '43failme',
'apiKey': self.user_info['password']
}
self.master_model.apikey_authenticate(
apikey_data
)
with self.assertRaises(exceptions.KeystoneUserError):
apikey_data = {
'username': self.user_info['username'],
'apiKey': 9392
}
self.master_model.apikey_authenticate(
apikey_data
)
with self.assertRaises(exceptions.KeystoneUserInvalidApiKeyError):
apikey_data = {
'username': self.user_info['username'],
'apiKey': self.user_info['apikey'] + 'a'
}
self.master_model.apikey_authenticate(
apikey_data
)
with self.assertRaises(exceptions.KeystoneUnknownUserError):
apikey_data = {
'username': self.user_info['username'] + 'a',
'apiKey': self.user_info['apikey']
}
self.master_model.apikey_authenticate(
apikey_data
)
with self.assertRaises(exceptions.KeystoneUserInvalidApiKeyError):
apikey_data = {
'username': self.user_info['username'],
'apiKey': self.user_info['apikey'] + 'a'
}
self.master_model.apikey_authenticate(
apikey_data
)
with self.assertRaises(exceptions.KeystoneDisabledUserError):
self.master_model.users.update_by_id(
tenant_id=self.tenant_id,
user_id=self.user_id,
email=self.user_info['email'],
password=self.user_info['password'],
apikey=self.user_info['apikey'],
enabled=False
)
apikey_data = {
'username': self.user_info['username'],
'apiKey': self.user_info['apikey']
}
self.master_model.apikey_authenticate(
apikey_data
)
self.master_model.users.update_by_id(
tenant_id=self.tenant_id,
user_id=self.user_id,
email=self.user_info['email'],
password=self.user_info['password'],
apikey=self.user_info['apikey'],
enabled=True
)
@ddt.data(
(0, 0, 0, 0),
(1, 1, 1, 1),
(5, 10, 3, 4),
(2, 20, 15, 10)
)
@ddt.unpack
def test_apikey_auth(
self, role_count, service_count, endpoint_count, endpoint_url_count
):
role_names, role_data = self.generate_roles(role_count)
services = self.generate_services(
service_count, endpoint_count, endpoint_url_count
)
apikey_data = {
'username': self.user_info['username'],
'apiKey': self.user_info['apikey']
}
service_catalog = self.master_model.apikey_authenticate(
apikey_data
)
self.check_service_catalog(
role_count, role_names, role_data, services, service_catalog
)
def test_tenant_id_token_auth_failures(self):
with self.assertRaises(exceptions.KeystoneUserError):
token_data = {
'token': {
'id': self.token
}
}
self.master_model.tenant_id_token_auth(token_data)
with self.assertRaises(exceptions.KeystoneUserError):
token_data = {
'tenantId': self.tenant_id,
'token': {
}
}
self.master_model.tenant_id_token_auth(token_data)
with self.assertRaises(exceptions.KeystoneUserError):
token_data = {
'tenantId': self.tenant_id
}
self.master_model.tenant_id_token_auth(token_data)
with self.assertRaises(exceptions.KeystoneUserError):
token_data = {
'tenantId': 'aphrodite',
'token': {
'id': self.token
}
}
self.master_model.tenant_id_token_auth(token_data)
with self.assertRaises(exceptions.KeystoneInvalidTokenError):
token_data = {
'tenantId': self.tenant_id,
'token': {
'id': self.token + 'a'
}
}
self.master_model.tenant_id_token_auth(token_data)
with self.assertRaises(exceptions.KeystoneTenantError):
token_data = {
'tenantId': 93920395,
'token': {
'id': self.token
}
}
self.master_model.tenant_id_token_auth(token_data)
with self.assertRaises(exceptions.KeystoneTenantError):
self.master_model.tenants.update_status(
tenant_id=self.tenant_id,
enabled=False
)
token_data = {
'tenantId': self.tenant_id,
'token': {
'id': self.token
}
}
self.master_model.tenant_id_token_auth(token_data)
self.master_model.tenants.update_status(
tenant_id=self.tenant_id,
enabled=True
)
new_tenant_id = self.master_model.tenants.add(
tenant_name='krash-kourse',
description='breaking things',
)
new_user_id = self.master_model.users.add(
tenant_id=new_tenant_id,
username='krispy',
email='kri@spy',
password='$py',
apikey='kryme',
enabled=True
)
with self.assertRaises(exceptions.KeystoneUnknownUserError):
token_data = {
'tenantId': new_tenant_id,
'token': {
'id': self.token
}
}
self.master_model.tenant_id_token_auth(token_data)
with self.assertRaises(exceptions.KeystoneUnknownUserError):
self.master_model.tokens.add(
tenant_id=new_tenant_id,
user_id=new_user_id
)
token_data = {
'tenantId': new_tenant_id,
'token': {
'id': self.token
}
}
self.master_model.tenant_id_token_auth(token_data)
with self.assertRaises(exceptions.KeystoneDisabledUserError):
self.master_model.users.update_by_id(
tenant_id=self.tenant_id,
user_id=self.user_id,
email=self.user_info['email'],
password=self.user_info['password'],
apikey=self.user_info['apikey'],
enabled=False
)
token_data = {
'tenantId': self.tenant_id,
'token': {
'id': self.token
}
}
self.master_model.tenant_id_token_auth(token_data)
self.master_model.users.update_by_id(
tenant_id=self.tenant_id,
user_id=self.user_id,
email=self.user_info['email'],
password=self.user_info['password'],
apikey=self.user_info['apikey'],
enabled=True
)
@ddt.data(
(0, 0, 0, 0),
(1, 1, 1, 1),
(5, 10, 3, 4),
(2, 20, 15, 10)
)
@ddt.unpack
def test_tenant_id_token_auth(
self, role_count, service_count, endpoint_count, endpoint_url_count
):
role_names, role_data = self.generate_roles(role_count)
services = self.generate_services(
service_count, endpoint_count, endpoint_url_count
)
token_data = {
'tenantId': self.tenant_id,
'token': {
'id': self.token
}
}
service_catalog = self.master_model.tenant_id_token_auth(
token_data
)
self.check_service_catalog(
role_count, role_names, role_data, services, service_catalog
)
def test_tenant_name_token_auth_failures(self):
with self.assertRaises(exceptions.KeystoneUserError):
token_data = {
'token': {
'id': self.token
}
}
self.master_model.tenant_id_token_auth(token_data)
with self.assertRaises(exceptions.KeystoneUserError):
token_data = {
'tenantName': self.tenant_info['name'],
'token': {
}
}
self.master_model.tenant_id_token_auth(token_data)
with self.assertRaises(exceptions.KeystoneUserError):
token_data = {
'tenantName': self.tenant_info['name']
}
self.master_model.tenant_id_token_auth(token_data)
with self.assertRaises(exceptions.KeystoneUserError):
token_data = {
'tenantName': self.tenant_info['name'] + 'x',
'token': {
'id': self.token
}
}
self.master_model.tenant_id_token_auth(token_data)
@ddt.data(
(0, 0, 0, 0),
(1, 1, 1, 1),
(5, 10, 3, 4),
(2, 20, 15, 10)
)
@ddt.unpack
def test_tenant_name_token_auth(
self, role_count, service_count, endpoint_count, endpoint_url_count
):
role_names, role_data = self.generate_roles(role_count)
services = self.generate_services(
service_count, endpoint_count, endpoint_url_count
)
token_data = {
'tenantName': self.tenant_info['name'],
'token': {
'id': self.token
}
}
service_catalog = self.master_model.tenant_name_token_auth(
token_data
)
self.check_service_catalog(
role_count, role_names, role_data, services, service_catalog
)
| 29,577 | 2,281 | 44 |
bf143c74f7e3d07b8e12b01f9a1477c8eb4b225b | 3,471 | py | Python | GubaCrawler/GubaCrawler/settings.py | Alexanderli0816/Spiders-for-Chinese-Stock-Comments | 75224bd9b35aa2823238de28b9a7d202abd0bd17 | [
"MIT"
] | 1 | 2020-07-18T14:08:22.000Z | 2020-07-18T14:08:22.000Z | GubaCrawler/GubaCrawler/settings.py | Alexanderli0816/Spiders-for-Chinese-Stock-Comments | 75224bd9b35aa2823238de28b9a7d202abd0bd17 | [
"MIT"
] | null | null | null | GubaCrawler/GubaCrawler/settings.py | Alexanderli0816/Spiders-for-Chinese-Stock-Comments | 75224bd9b35aa2823238de28b9a7d202abd0bd17 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Scrapy settings for GubaCrawler project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'GubaCrawler'
SPIDER_MODULES = ['GubaCrawler.spiders']
NEWSPIDER_MODULE = 'GubaCrawler.spiders'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 20
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 0.0001
# DOWNLOAD_TIMEOUT = 10
# The download delay setting will honor only one of:
CONCURRENT_REQUESTS_PER_DOMAIN = 32
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
# 'GubaDC.middlewares.GubadcSpiderMiddleware': 543,
}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
'GubaCrawler.middlewares.GubaDownloaderMiddleware': 843,
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': None,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# # 'scrapy.extensions.telnet.TelnetConsole': None,
# # 'scrapy.extensions.throttle.AutoThrottle': 561,
# }
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'GubaCrawler.pipelines.GubaPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True
# The initial download delay
AUTOTHROTTLE_START_DELAY = 0.0001
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
AUTOTHROTTLE_TARGET_CONCURRENCY = 20
# Enable showing throttling stats for every response received:
AUTOTHROTTLE_DEBUG = True
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 1
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'
REDIRECT_ENABLED = False
DUPEFILTER_CLASS = "scrapy_splash.SplashAwareDupeFilter"
LOG_LEVEL = 'INFO'
SPLASH_URL = "http://localhost:8050/"
# JOBDIR = '/home/alex/桌面/Python/Project/Spider_Project/GubaCrawler/GubaCrawler/job_info'
| 36.15625 | 103 | 0.785364 | # -*- coding: utf-8 -*-
# Scrapy settings for GubaCrawler project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'GubaCrawler'
SPIDER_MODULES = ['GubaCrawler.spiders']
NEWSPIDER_MODULE = 'GubaCrawler.spiders'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 20
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 0.0001
# DOWNLOAD_TIMEOUT = 10
# The download delay setting will honor only one of:
CONCURRENT_REQUESTS_PER_DOMAIN = 32
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
# 'GubaDC.middlewares.GubadcSpiderMiddleware': 543,
}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
'GubaCrawler.middlewares.GubaDownloaderMiddleware': 843,
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': None,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# # 'scrapy.extensions.telnet.TelnetConsole': None,
# # 'scrapy.extensions.throttle.AutoThrottle': 561,
# }
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'GubaCrawler.pipelines.GubaPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True
# The initial download delay
AUTOTHROTTLE_START_DELAY = 0.0001
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
AUTOTHROTTLE_TARGET_CONCURRENCY = 20
# Enable showing throttling stats for every response received:
AUTOTHROTTLE_DEBUG = True
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 1
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'
REDIRECT_ENABLED = False
DUPEFILTER_CLASS = "scrapy_splash.SplashAwareDupeFilter"
LOG_LEVEL = 'INFO'
SPLASH_URL = "http://localhost:8050/"
# JOBDIR = '/home/alex/桌面/Python/Project/Spider_Project/GubaCrawler/GubaCrawler/job_info'
| 0 | 0 | 0 |
fc37ef57f6ac984d04a92879ab385d15255a5e73 | 1,187 | py | Python | exec.py | developerHaneum/Progress | cb921222f9f9f04ad8c0fc8af78f4b75f72bc543 | [
"MIT"
] | null | null | null | exec.py | developerHaneum/Progress | cb921222f9f9f04ad8c0fc8af78f4b75f72bc543 | [
"MIT"
] | null | null | null | exec.py | developerHaneum/Progress | cb921222f9f9f04ad8c0fc8af78f4b75f72bc543 | [
"MIT"
] | null | null | null | # 기록장 (progress)
import pickle
from datetime import *
import os.path
# 작성함수
# 파일이 존재하는가?
print("-- Progress --")
clock = datetime.now()
line = 0
file = "File/progress_{}.md".format(clock.day) # 파일명 초기화
texts = ''
while True:
line += 1
text = input("%d: "%line)
if text == "q": # 저장 후 종료
Save(texts, file)
exit()
if text == "q!": # 저장하지 않고 종료
isfile = Isfile(file)
if isfile == 1:
exit()
else:
print("=> 저장을 하지 말까요? <=")
dist = str(input("y/n: "))
if dist == "y":
exit()
else:
Save(texts, file)
isfile = Isfile(file)
if isfile == 1:
print("=> 저장되었어요 <=")
exit()
else:
print("=> Error <=")
exit()
l = text.split() # Str -> List
l[-1] += "\n"
text = ' '.join(l)
texts += text
Save(texts, file) | 24.729167 | 56 | 0.443976 | # 기록장 (progress)
import pickle
from datetime import *
import os.path
# 작성함수
def Save(text, log):
with open(log, "wb") as file:
return pickle.dump(text, file)
# 파일이 존재하는가?
def Isfile(file):
if os.path.isfile(file): # True
return 1
else: # False
return -1
print("-- Progress --")
clock = datetime.now()
line = 0
file = "File/progress_{}.md".format(clock.day) # 파일명 초기화
texts = ''
while True:
line += 1
text = input("%d: "%line)
if text == "q": # 저장 후 종료
Save(texts, file)
exit()
if text == "q!": # 저장하지 않고 종료
isfile = Isfile(file)
if isfile == 1:
exit()
else:
print("=> 저장을 하지 말까요? <=")
dist = str(input("y/n: "))
if dist == "y":
exit()
else:
Save(texts, file)
isfile = Isfile(file)
if isfile == 1:
print("=> 저장되었어요 <=")
exit()
else:
print("=> Error <=")
exit()
l = text.split() # Str -> List
l[-1] += "\n"
text = ' '.join(l)
texts += text
Save(texts, file) | 157 | 0 | 44 |
d3428bbd414a82086caf4c230ff2d147c6472619 | 2,644 | py | Python | skeleton4/project/bunker.py | borko81/SU_OOP_2021 | 8c38682bd4a2b032ca09f85b0a579be152223a59 | [
"MIT"
] | null | null | null | skeleton4/project/bunker.py | borko81/SU_OOP_2021 | 8c38682bd4a2b032ca09f85b0a579be152223a59 | [
"MIT"
] | null | null | null | skeleton4/project/bunker.py | borko81/SU_OOP_2021 | 8c38682bd4a2b032ca09f85b0a579be152223a59 | [
"MIT"
] | null | null | null | from typing import List
from project.medicine.medicine import Medicine
from project.supply.supply import Supply
from project.survivor import Survivor
| 33.05 | 83 | 0.603631 | from typing import List
from project.medicine.medicine import Medicine
from project.supply.supply import Supply
from project.survivor import Survivor
class Bunker:
def __init__(self):
self.survivors: List[Survivor] = []
self.supplies: List[Supply] = []
self.medicine: List[Medicine] = []
@property
def food(self):
temp = [s for s in self.supplies if s.__class__.__name__ == 'FoodSupply']
if len(temp) == 0:
raise IndexError("There are no food supplies left!")
return temp
@property
def water(self):
temp = [s for s in self.supplies if s.__class__.__name__ == 'WaterSupply']
if len(temp) == 0:
raise ValueError("There are no water supplies left!")
return temp
@property
def painkillers(self):
temp = [s for s in self.medicine if s.__class__.__name__ == 'Painkiller']
if len(temp) == 0:
raise IndexError("There are no painkillers left!")
return temp
@property
def salves(self):
temp = [s for s in self.medicine if s.__class__.__name__ == 'Salve']
if len(temp) == 0:
raise IndexError("There are no salves left!")
return temp
def add_survivor(self, survivor: Survivor):
if survivor in self.survivors:
raise ValueError(f"Survivor with name {survivor.name} already exists.")
self.survivors.append(survivor)
def add_supply(self, supply: Supply):
self.supplies.append(supply)
def add_medicine(self, medicine: Medicine):
self.medicine.append(medicine)
def heal(self, survivor: Survivor, medicine_type: str):
if survivor.needs_healing:
if medicine_type == "Painkiller":
pill = self.painkillers[-1]
else:
pill = self.salves[-1]
survivor.health += pill.health_increase
del self.medicine[-1]
return f"{survivor.name} healed successfully with {medicine_type}"
def sustain(self, survivor: Survivor, sustenance_type: str):
if survivor.needs_sustenance:
if sustenance_type == "FoodSupply":
soup = self.food[-1]
else:
soup = self.water[-1]
survivor.needs += soup.needs_increase
del self.supplies[-1]
return f"{survivor.name} sustained successfully with {sustenance_type}"
def next_day(self):
for s in self.survivors:
s.needs -= s.age * 2
for s in self.survivors:
self.sustain(s, 'FoodSupply')
self.sustain(s, 'WaterSupply')
| 2,125 | 344 | 23 |
82dd39215f31889f78650f985a55e4b6d358dbe2 | 697 | py | Python | aoc2020/day09/part1.py | shoreofwonder/adventofcode | 15fd2f761533a48b456e510b0a59f7cbc64e8e91 | [
"MIT"
] | null | null | null | aoc2020/day09/part1.py | shoreofwonder/adventofcode | 15fd2f761533a48b456e510b0a59f7cbc64e8e91 | [
"MIT"
] | null | null | null | aoc2020/day09/part1.py | shoreofwonder/adventofcode | 15fd2f761533a48b456e510b0a59f7cbc64e8e91 | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
main() | 21.121212 | 52 | 0.522238 |
def read_input():
# for puzzles where each input line is an object
with open('input.txt') as fh:
for line in fh.readlines():
yield int(line.strip())
def is_sum_of_two_in(num, buf):
for i in range(len(buf)):
for j in range(i+1,len(buf)):
if num==buf[i]+buf[j]:
return True
return False
def main():
buf=[0]*25
bufptr=0
full=False
for num in read_input():
if full:
if not is_sum_of_two_in(num, buf):
print(num)
return
buf[bufptr]=num
if bufptr==24:
full=True
bufptr=(bufptr+1)%25
if __name__ == '__main__':
main() | 587 | 0 | 69 |
9ee1069e0a49ec2f308cfb595f27703f270a6808 | 3,816 | py | Python | game_engine/tiles.py | brandontrabucco/game_engine | c32fd7d6b04afb428f2c3d46c56bf31623f856bf | [
"MIT"
] | null | null | null | game_engine/tiles.py | brandontrabucco/game_engine | c32fd7d6b04afb428f2c3d46c56bf31623f856bf | [
"MIT"
] | null | null | null | game_engine/tiles.py | brandontrabucco/game_engine | c32fd7d6b04afb428f2c3d46c56bf31623f856bf | [
"MIT"
] | null | null | null | '''Author: Brandon Trabucco, Copyright 2019
Helper functions to display and run a simple game'''
from game_engine.colors import *
from game_engine.drawable import Drawable
from game_engine.entity import Entity
from game_engine.interactables import *
####################################
# lets make some game tiles to use #
####################################
| 24.941176 | 85 | 0.60718 | '''Author: Brandon Trabucco, Copyright 2019
Helper functions to display and run a simple game'''
from game_engine.colors import *
from game_engine.drawable import Drawable
from game_engine.entity import Entity
from game_engine.interactables import *
####################################
# lets make some game tiles to use #
####################################
class Tile(Drawable):
def __init__(self, letter, text_color, background_color):
super(Tile, self).__init__(40, 30)
assert(isinstance(letter, str) and len(letter) == 1 and
isinstance(text_color, Color) and isinstance(background_color, Color))
self.letter = letter
self.text_color = text_color
self.background_color = background_color
self.handle_to_background = None
self.handle_to_text = None
def __eq__(self, x):
assert(isinstance(x, Tile))
return (self.letter == x.letter and
self.text_color == x.text_color and
self.background_color == x.background_color)
def draw(self, canvas, x, y):
super(Tile, self).draw(canvas)
self.handle_to_background = canvas.create_rectangle(
(x ) * self.width, (y ) * self.height,
(x + 1) * self.width, (y + 1) * self.height,
fill=self.background_color.hex(), outline=self.background_color.hex())
self.handle_to_text = canvas.create_text(
(x +.5) * self.width, (y +.5) * self.height,
text=self.letter, font=('Sans Serif', -20),
fill=self.text_color.hex())
def undraw(self, canvas):
super(Tile, self).undraw(canvas)
if self.handle_to_background is not None:
canvas.delete(self.handle_to_background)
self.handle_to_background = None
if self.handle_to_text is not None:
canvas.delete(self.handle_to_text)
self.handle_to_text = None
class Null(Tile):
def __init__(self):
super(Null, self).__init__(
letter=' ', text_color=Black(), background_color=Black())
class Background(Tile, Walkable):
def __init__(self):
super(Background, self).__init__(
letter='.', text_color=Grey(), background_color=Black())
class Wall(Tile, Breakable):
def __init__(self):
super(Wall, self).__init__(
letter='|', text_color=Black(), background_color=Grey())
class Floor(Tile, Walkable):
def __init__(self):
Tile.__init__(self,
letter='.', text_color=Grey(), background_color=White())
class Water(Tile, Harmful):
def __init__(self):
super(Water, self).__init__(
letter='~', text_color=White(), background_color=Cyan())
class Tree(Tile, Breakable):
def __init__(self):
super(Tree, self).__init__(
letter='^', text_color=Black(), background_color=Green())
class Grass(Tile, Walkable):
def __init__(self):
super(Grass, self).__init__(
letter='=', text_color=Green(), background_color=Brown())
class Dirt(Tile, Walkable):
def __init__(self):
super(Dirt, self).__init__(
letter='.', text_color=Grey(), background_color=Brown())
class Rock(Tile, Breakable):
def __init__(self):
super(Rock, self).__init__(
letter='A', text_color=Red(), background_color=Brown())
class Player(Tile, Entity):
def __init__(self, x, y):
Tile.__init__(self,
letter='^', text_color=Blue(), background_color=Yellow())
Entity.__init__(self, x, y)
class Enemy(Tile, Entity):
def __init__(self, x, y):
Tile.__init__(self,
letter='^', text_color=Blue(), background_color=Magenta())
Entity.__init__(self, x, y) | 2,678 | 66 | 681 |
49e6e8c76fb19d8b76b327c10be6ecacb8a85a53 | 4,387 | py | Python | anomalib/core/model/k_center_greedy.py | TheaperDeng/anomalib | f1c8a6b553b82a8974ccc9fd27aa5f9b939d5917 | [
"Apache-2.0"
] | null | null | null | anomalib/core/model/k_center_greedy.py | TheaperDeng/anomalib | f1c8a6b553b82a8974ccc9fd27aa5f9b939d5917 | [
"Apache-2.0"
] | null | null | null | anomalib/core/model/k_center_greedy.py | TheaperDeng/anomalib | f1c8a6b553b82a8974ccc9fd27aa5f9b939d5917 | [
"Apache-2.0"
] | null | null | null | """This module comprises PatchCore Sampling Methods for the embedding.
- k Center Greedy Method
Returns points that minimizes the maximum distance of any point to a center.
. https://arxiv.org/abs/1708.00489
"""
from typing import List, Optional
import torch
import torch.nn.functional as F
from torch import Tensor
from anomalib.core.model.random_projection import SparseRandomProjection
class KCenterGreedy:
"""Implements k-center-greedy method.
Args:
embedding (Tensor): Embedding vector extracted from a CNN
sampling_ratio (float): Ratio to choose coreset size from the embedding size.
Example:
>>> embedding.shape
torch.Size([219520, 1536])
>>> sampler = KCenterGreedy(embedding=embedding)
>>> sampled_idxs = sampler.select_coreset_idxs()
>>> coreset = embedding[sampled_idxs]
>>> coreset.shape
torch.Size([219, 1536])
"""
def reset_distances(self) -> None:
"""Reset minimum distances."""
self.min_distances = None
def update_distances(self, cluster_centers: List[int]) -> None:
"""Update min distances given cluster centers.
Args:
cluster_centers (List[int]): indices of cluster centers
"""
if cluster_centers:
centers = self.features[cluster_centers]
distance = F.pairwise_distance(self.features, centers, p=2).reshape(-1, 1)
if self.min_distances is None:
self.min_distances = distance
else:
self.min_distances = torch.minimum(self.min_distances, distance)
def get_new_idx(self) -> int:
"""Get index value of a sample.
Based on minimum distance of the cluster
Returns:
int: Sample index
"""
if isinstance(self.min_distances, Tensor):
idx = int(torch.argmax(self.min_distances).item())
else:
raise ValueError(f"self.min_distances must be of type Tensor. Got {type(self.min_distances)}")
return idx
def select_coreset_idxs(self, selected_idxs: Optional[List[int]] = None) -> List[int]:
"""Greedily form a coreset to minimize the maximum distance of a cluster.
Args:
selected_idxs: index of samples already selected. Defaults to an empty set.
Returns:
indices of samples selected to minimize distance to cluster centers
"""
if selected_idxs is None:
selected_idxs = []
if self.embedding.ndim == 2:
self.model.fit(self.embedding)
self.features = self.model.transform(self.embedding)
self.reset_distances()
else:
self.features = self.embedding.reshape(self.embedding.shape[0], -1)
self.update_distances(cluster_centers=selected_idxs)
selected_coreset_idxs: List[int] = []
idx = int(torch.randint(high=self.n_observations, size=(1,)).item())
for _ in range(self.coreset_size):
self.update_distances(cluster_centers=[idx])
idx = self.get_new_idx()
if idx in selected_idxs:
raise ValueError("New indices should not be in selected indices.")
self.min_distances[idx] = 0
selected_coreset_idxs.append(idx)
return selected_coreset_idxs
def sample_coreset(self, selected_idxs: Optional[List[int]] = None) -> Tensor:
"""Select coreset from the embedding.
Args:
selected_idxs: index of samples already selected. Defaults to an empty set.
Returns:
Tensor: Output coreset
Example:
>>> embedding.shape
torch.Size([219520, 1536])
>>> sampler = KCenterGreedy(...)
>>> coreset = sampler.sample_coreset()
>>> coreset.shape
torch.Size([219, 1536])
"""
idxs = self.select_coreset_idxs(selected_idxs)
coreset = self.embedding[idxs]
return coreset
| 32.496296 | 106 | 0.62708 | """This module comprises PatchCore Sampling Methods for the embedding.
- k Center Greedy Method
Returns points that minimizes the maximum distance of any point to a center.
. https://arxiv.org/abs/1708.00489
"""
from typing import List, Optional
import torch
import torch.nn.functional as F
from torch import Tensor
from anomalib.core.model.random_projection import SparseRandomProjection
class KCenterGreedy:
"""Implements k-center-greedy method.
Args:
embedding (Tensor): Embedding vector extracted from a CNN
sampling_ratio (float): Ratio to choose coreset size from the embedding size.
Example:
>>> embedding.shape
torch.Size([219520, 1536])
>>> sampler = KCenterGreedy(embedding=embedding)
>>> sampled_idxs = sampler.select_coreset_idxs()
>>> coreset = embedding[sampled_idxs]
>>> coreset.shape
torch.Size([219, 1536])
"""
def __init__(self, embedding: Tensor, sampling_ratio: float) -> None:
self.embedding = embedding
self.coreset_size = int(embedding.shape[0] * sampling_ratio)
self.model = SparseRandomProjection(eps=0.9)
self.features: Tensor
self.min_distances: Tensor = None
self.n_observations = self.embedding.shape[0]
def reset_distances(self) -> None:
"""Reset minimum distances."""
self.min_distances = None
def update_distances(self, cluster_centers: List[int]) -> None:
"""Update min distances given cluster centers.
Args:
cluster_centers (List[int]): indices of cluster centers
"""
if cluster_centers:
centers = self.features[cluster_centers]
distance = F.pairwise_distance(self.features, centers, p=2).reshape(-1, 1)
if self.min_distances is None:
self.min_distances = distance
else:
self.min_distances = torch.minimum(self.min_distances, distance)
def get_new_idx(self) -> int:
"""Get index value of a sample.
Based on minimum distance of the cluster
Returns:
int: Sample index
"""
if isinstance(self.min_distances, Tensor):
idx = int(torch.argmax(self.min_distances).item())
else:
raise ValueError(f"self.min_distances must be of type Tensor. Got {type(self.min_distances)}")
return idx
def select_coreset_idxs(self, selected_idxs: Optional[List[int]] = None) -> List[int]:
"""Greedily form a coreset to minimize the maximum distance of a cluster.
Args:
selected_idxs: index of samples already selected. Defaults to an empty set.
Returns:
indices of samples selected to minimize distance to cluster centers
"""
if selected_idxs is None:
selected_idxs = []
if self.embedding.ndim == 2:
self.model.fit(self.embedding)
self.features = self.model.transform(self.embedding)
self.reset_distances()
else:
self.features = self.embedding.reshape(self.embedding.shape[0], -1)
self.update_distances(cluster_centers=selected_idxs)
selected_coreset_idxs: List[int] = []
idx = int(torch.randint(high=self.n_observations, size=(1,)).item())
for _ in range(self.coreset_size):
self.update_distances(cluster_centers=[idx])
idx = self.get_new_idx()
if idx in selected_idxs:
raise ValueError("New indices should not be in selected indices.")
self.min_distances[idx] = 0
selected_coreset_idxs.append(idx)
return selected_coreset_idxs
def sample_coreset(self, selected_idxs: Optional[List[int]] = None) -> Tensor:
"""Select coreset from the embedding.
Args:
selected_idxs: index of samples already selected. Defaults to an empty set.
Returns:
Tensor: Output coreset
Example:
>>> embedding.shape
torch.Size([219520, 1536])
>>> sampler = KCenterGreedy(...)
>>> coreset = sampler.sample_coreset()
>>> coreset.shape
torch.Size([219, 1536])
"""
idxs = self.select_coreset_idxs(selected_idxs)
coreset = self.embedding[idxs]
return coreset
| 332 | 0 | 27 |
672fef1691a8537c32627337c5c56d1950d05e21 | 9,458 | py | Python | CrvDatabase/CrvUtilities/myTime.py | Mu2e/CRVHardwareDB | d288a78fef89244e80e528fce946d043623ca25a | [
"Apache-2.0"
] | null | null | null | CrvDatabase/CrvUtilities/myTime.py | Mu2e/CRVHardwareDB | d288a78fef89244e80e528fce946d043623ca25a | [
"Apache-2.0"
] | null | null | null | CrvDatabase/CrvUtilities/myTime.py | Mu2e/CRVHardwareDB | d288a78fef89244e80e528fce946d043623ca25a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
## File = 'myTime.py"
#
# A python script to return the time and
# date from the computer when called.
# Written by Merrill Jenkins 2014Dec17
# Department of Physics
# University of South Alabama
# Mobile, AL 36688
# Used "Core Python Programmaing", Page 49
# And "Python Essential Reference", Page 405
##
## Modified by cmj2021May11 to remove tabs for blocked statements
#
# To use:
# import myTime *
# t = myTime()
# t.getComputerTime() # call at instant you want time
# t.getCalendarDate() # returns string with calandar date
# t.getClockTime() # returns as string with the clock time
# To test, just run this file (with stand alone test program at the end)
# python myTime.py
# Other functions available:
# getYear() # returns string with year
# getMon() # returns string with month
# getDay() # returns string with day
# getHour() # returns string with hour
# getSec() # returns string with second
# getDayOfWeek() # returns string with the day of the week
# getTimeZone() # returns string with the time zone
# getInternationalCalendarDate() # returns string with calendar date and time zone
# getInternationalClockTime() # returns string with time and time zone
# getTimeForSavedFiles() # returns string with calandar date and time
# # in format to use with file name
# getInternationalTimeForSavedFiles() # returns string with calandar date,
# # time and time zone in format to us
# # with file name
#
## To transmit any changes to the dependent
## python scripts, complete these steps in the
## Utilities directory:
## > rm Utilities.zip
## > zip -r Utilities.zip *.py
## Modified by cmj2021Mar1.... Convert from python2 to python3: 2to3 -w *.py
## Modified by cmj2021Mar1.... replace dataloader with dataloader3
## Modified by cmj2021May11... replace tabs with spaces for block statements to convert to python 3
#
#!/bin/env python
from time import *
#
# Get the computer time at the instant this method is called
# This time is decoded into useful strings with the other methods
#
# Test as stand alone.... This program only runs if this script is run in the command line:
# python myTime.py
if __name__ == "__main__":
whatTimeIsIt = myTime()
whatTimeIsIt.getComputerTime()
print('Date = %s' % whatTimeIsIt.getDate())
print('Year = %s' % whatTimeIsIt.getYear())
print('Month = %s' % whatTimeIsIt.getMonth())
print('Day = %s' % whatTimeIsIt.getDay())
print('Hour = %s' % whatTimeIsIt.getHour())
print('Minute = %s' % whatTimeIsIt.getMin())
print('Second = %s' % whatTimeIsIt.getSec())
print('Day of the Week = %s' % whatTimeIsIt.getDayOfWeek())
print('TimeZone = %s' % whatTimeIsIt.getTimeZone())
print() ; print()
print('------------------------- ')
print('Calendar = %s' % whatTimeIsIt.getCalendarDate())
print('Calendar with Day of Week = %s' % whatTimeIsIt.getCalendarDateWithDay())
print('Clock Time = %s' % whatTimeIsIt.getClockTime())
print('International Clock Time = %s' % whatTimeIsIt.getInternationalClockTime())
print('FileNameFragment = %s' % whatTimeIsIt.getTimeForSavedFiles())
print('FileNameFragment (with Time Zone) = %s' % whatTimeIsIt.getInternationalTimeForSavedFiles())
## Test that clock is updated after each call:
from time import sleep
print(); print(); print(' ============================ ')
for mm in range(1,10):
whatTimeIsIt.getComputerTime()
myDate = whatTimeIsIt.getCalendarDateWithDay()
myTime = whatTimeIsIt.getInternationalClockTime()
print('%s Clock Time = %s' %(myDate,myTime))
sleep(1.0) # ask the script to sleep for one second between iterations
| 49.260417 | 114 | 0.561218 | # -*- coding: utf-8 -*-
## File = 'myTime.py"
#
# A python script to return the time and
# date from the computer when called.
# Written by Merrill Jenkins 2014Dec17
# Department of Physics
# University of South Alabama
# Mobile, AL 36688
# Used "Core Python Programmaing", Page 49
# And "Python Essential Reference", Page 405
##
## Modified by cmj2021May11 to remove tabs for blocked statements
#
# To use:
# import myTime *
# t = myTime()
# t.getComputerTime() # call at instant you want time
# t.getCalendarDate() # returns string with calandar date
# t.getClockTime() # returns as string with the clock time
# To test, just run this file (with stand alone test program at the end)
# python myTime.py
# Other functions available:
# getYear() # returns string with year
# getMon() # returns string with month
# getDay() # returns string with day
# getHour() # returns string with hour
# getSec() # returns string with second
# getDayOfWeek() # returns string with the day of the week
# getTimeZone() # returns string with the time zone
# getInternationalCalendarDate() # returns string with calendar date and time zone
# getInternationalClockTime() # returns string with time and time zone
# getTimeForSavedFiles() # returns string with calandar date and time
# # in format to use with file name
# getInternationalTimeForSavedFiles() # returns string with calandar date,
# # time and time zone in format to us
# # with file name
#
## To transmit any changes to the dependent
## python scripts, complete these steps in the
## Utilities directory:
## > rm Utilities.zip
## > zip -r Utilities.zip *.py
## Modified by cmj2021Mar1.... Convert from python2 to python3: 2to3 -w *.py
## Modified by cmj2021Mar1.... replace dataloader with dataloader3
## Modified by cmj2021May11... replace tabs with spaces for block statements to convert to python 3
#
#!/bin/env python
from time import *
class myTime:
def __init__(self):
# Initialize variables...
self.date = 0; self.year = 0; self.month = 0
self.day = 0; self.hour = 0; self.minute = 0
self.second = 0; self.tmeZone = 0; self.dayOfWeek = 0
self.calendarDate = 0;
self.calendarDateWithDay = 0;
self.clockTime = 0
self.clockTime = 0
self.timeForSavedFiles = 0;
self.inputTuple = ''
self.computerTime = ''
#
# Get the computer time at the instant this method is called
# This time is decoded into useful strings with the other methods
def getComputerTime(self):
# Initialize before each call to the computer clock.
self.date = 0; self.year = 0; self.month = 0
self.day = 0; self.hour = 0; self.minute = 0
self.second = 0; self.tmeZone = 0; self.dayOfWeek = 0
self.calandarDate = 0; self.clockTime = 0
self.timeForSavedFiles = 0;
# Load the tuple that has the computer time.
self.inputTuple = ''
self.inputTuple = localtime()
self.computerTime = asctime(self.inputTuple)
def getDate(self): # method to get
self.date = self.computerTime
return(self.date)
def getYear(self):
self.year = self.computerTime[20:24]
return(self.year)
def getMonth(self): # method to get month
self.month = self.computerTime[4:7]
return(self.month)
def getDay(self): # method to get day
self.day = self.computerTime[8:10]
# if int(self.day) < 10:
if self.computerTime[8:9] == ' ':
self.day=self.computerTime[9:10]
return(self.day)
def getZeroDay(self): # method to get day
self.day = '0d2' % self.computerTime[8:10]
# if int(self.day) < 10:
# if self.computerTime[8:9] == ' ':
# self.day=self.computerTime[9:10]
return(self.day)
def getHour(self): # method to get hour
self.hour = self.computerTime[11:13]
return(self.hour)
def getMin(self): # method to get minutes
self.minute = self.computerTime[14:16]
return(self.minute)
def getSec(self): # method to get seconds
self.second = self.computerTime[17:19]
return(self.second)
def getTimeZone(self): # method to get time zone
self.timeZone = strftime('%Z',self.inputTuple)
return(self.timeZone)
def getDayOfWeek(self): # method to get day of the week
self.dayOfWeek = self.computerTime[0:3]
return(self.dayOfWeek)
def getCalendarDate(self): # construct the calandar date
myMonth = self.getMonth()
myDay = self.getDay()
myYear = self.getYear()
self.calendarDate = myMonth+'/'+myDay+'/'+myYear
return(self.calendarDate)
def getCalendarDateWithDay(self): # construct the calandar date
myMonth = self.getMonth()
myDay = self.getDay()
myYear = self.getYear()
myDayOfWeek = self.getDayOfWeek()
self.calendarDateWithDay = '('+myDayOfWeek+')'+myMonth+'/'+myDay+'/'+myYear
return(self.calendarDateWithDay)
def getClockTime(self): # construct the local wall clock time
myHour = self.getHour()
myMinute = self.getMin()
mySecond = self.getSec()
self.clockTime = myHour+':'+myMinute+':'+mySecond
return(self.clockTime)
def getInternationalClockTime(self): # construct the local wall clock time
# with Time Zone
myHour = self.getHour()
myMinute = self.getMin()
mySecond = self.getSec()
myTimeZone = self.getTimeZone()
myclockTime = myTimeZone+'::'+myHour+':'+myMinute+':'+mySecond
return(myclockTime)
def getTimeForSavedFiles(self): # construct a string to be included in file names
myMonth = self.getMonth()
myDay = self.getDay()
myYear = self.getYear()
myHour = self.getHour()
myMinute = self.getMin()
mySecond = self.getSec()
self.timeForSavedFiles = '_'+myYear+myMonth+myDay+'_'+myHour+'_'+myMinute+'_'+mySecond+'_'
return(self.timeForSavedFiles)
def getInternationalTimeForSavedFiles(self): # construct a string to be included
# in file names with time zone
myMonth = self.getMonth()
myDay = self.getDay()
myYear = self.getYear()
myHour = self.getHour()
myMinute = self.getMin()
mySecond = self.getSec()
myTimeZone = self.getTimeZone()
myTimeForSavedFiles = '_'+myTimeZone+'_'+myYear+myMonth+myDay+'_'+myHour+'_'+myMinute+'_'+mySecond+'_'
return(myTimeForSavedFiles)
#
# Test as stand alone.... This program only runs if this script is run in the command line:
# python myTime.py
if __name__ == "__main__":
whatTimeIsIt = myTime()
whatTimeIsIt.getComputerTime()
print('Date = %s' % whatTimeIsIt.getDate())
print('Year = %s' % whatTimeIsIt.getYear())
print('Month = %s' % whatTimeIsIt.getMonth())
print('Day = %s' % whatTimeIsIt.getDay())
print('Hour = %s' % whatTimeIsIt.getHour())
print('Minute = %s' % whatTimeIsIt.getMin())
print('Second = %s' % whatTimeIsIt.getSec())
print('Day of the Week = %s' % whatTimeIsIt.getDayOfWeek())
print('TimeZone = %s' % whatTimeIsIt.getTimeZone())
print() ; print()
print('------------------------- ')
print('Calendar = %s' % whatTimeIsIt.getCalendarDate())
print('Calendar with Day of Week = %s' % whatTimeIsIt.getCalendarDateWithDay())
print('Clock Time = %s' % whatTimeIsIt.getClockTime())
print('International Clock Time = %s' % whatTimeIsIt.getInternationalClockTime())
print('FileNameFragment = %s' % whatTimeIsIt.getTimeForSavedFiles())
print('FileNameFragment (with Time Zone) = %s' % whatTimeIsIt.getInternationalTimeForSavedFiles())
## Test that clock is updated after each call:
from time import sleep
print(); print(); print(' ============================ ')
for mm in range(1,10):
whatTimeIsIt.getComputerTime()
myDate = whatTimeIsIt.getCalendarDateWithDay()
myTime = whatTimeIsIt.getInternationalClockTime()
print('%s Clock Time = %s' %(myDate,myTime))
sleep(1.0) # ask the script to sleep for one second between iterations
| 4,676 | -8 | 526 |
50b6c1aa906181c9736f81e540fc75e98009ba53 | 1,551 | py | Python | execnet_ssh.py | dimaqq/recipes | e424ef3ab68a38f9cabc47ee142147fa578f2c81 | [
"MIT"
] | null | null | null | execnet_ssh.py | dimaqq/recipes | e424ef3ab68a38f9cabc47ee142147fa578f2c81 | [
"MIT"
] | 1 | 2015-12-15T12:59:37.000Z | 2015-12-15T12:59:37.000Z | execnet_ssh.py | dimaqq/recipes | e424ef3ab68a38f9cabc47ee142147fa578f2c81 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import execnet, execnet.gateway, execnet.multi
execnet.multi.Group.makeportgateway = makeportgateway
execnet.makeportgateway = execnet.multi.default_group.makeportgateway
# originally posted as http://code.activestate.com/recipes/577545-monkey-patch-execnet-with-more-ssh-settings-port-i/
| 41.918919 | 117 | 0.656351 | #!/usr/bin/python
import execnet, execnet.gateway, execnet.multi
class SshPortGateway(execnet.gateway.SshGateway):
def __init__(self, sshaddress, id, remotepython = None,
ssh_config=None,
ssh_port=None,
ssh_identity=None,
ssh_batchmode=None):
self.remoteaddress = sshaddress
if not remotepython: remotepython = "python"
args = ['ssh', '-C' ]
if ssh_config: args.extend(['-F', ssh_config])
if ssh_port: args.extend(['-p', ssh_port])
if ssh_identity: args.extend(['-i', ssh_identity])
if ssh_batchmode: args.extend(["-o", "BatchMode yes"])
remotecmd = '%s -c "%s"' % (remotepython, execnet.gateway.popen_bootstrapline)
args.extend([sshaddress, remotecmd])
execnet.gateway.PopenCmdGateway.__init__(self, args, id=id)
def makeportgateway(self, spec):
spec = execnet.XSpec(spec)
self.allocate_id(spec)
gw = SshPortGateway(spec.ssh,
remotepython=spec.python,
ssh_config=spec.ssh_config,
id=spec.id,
ssh_port=spec.ssh_port,
ssh_identity=spec.ssh_identity,
ssh_batchmode=spec.ssh_batchmode)
gw.spec = spec
self._register(gw)
# TODO add spec.{chdir,nice,env}
return gw
execnet.multi.Group.makeportgateway = makeportgateway
execnet.makeportgateway = execnet.multi.default_group.makeportgateway
# originally posted as http://code.activestate.com/recipes/577545-monkey-patch-execnet-with-more-ssh-settings-port-i/
| 1,147 | 28 | 68 |
d1201202ecf22025525f0b8320d9ec26d1233ee1 | 1,458 | py | Python | language/gen_lang.py | HeadCrab654/Marutools | 427bea3730fb9da6e4a5cf81e244586f833df51a | [
"MIT"
] | null | null | null | language/gen_lang.py | HeadCrab654/Marutools | 427bea3730fb9da6e4a5cf81e244586f833df51a | [
"MIT"
] | null | null | null | language/gen_lang.py | HeadCrab654/Marutools | 427bea3730fb9da6e4a5cf81e244586f833df51a | [
"MIT"
] | null | null | null | src_lang = ["ja_JP","ja"]
import os
from langs import langs, same
cd=os.path.join(os.getcwd(),"language")
#cd = "/home/maruo/ドキュメント/program/Marutools/language"
import babel, googletrans, json, os, sys
os.chdir(cd)
if input(f'The lang file will be save at "{cd}". Is it OK? [y/n]')!="y": exit()
translator=googletrans.Translator()
src = json.load(open(src_lang[0]+".lang","r", encoding="utf8"))
for glang, slangs in same.items():
try:
print(glang, end="", flush=True)
translate(slangs[0], glang)
except:
print("...error!!")
else:
print("...done") | 31.695652 | 89 | 0.537037 | src_lang = ["ja_JP","ja"]
import os
from langs import langs, same
cd=os.path.join(os.getcwd(),"language")
#cd = "/home/maruo/ドキュメント/program/Marutools/language"
import babel, googletrans, json, os, sys
os.chdir(cd)
if input(f'The lang file will be save at "{cd}". Is it OK? [y/n]')!="y": exit()
translator=googletrans.Translator()
src = json.load(open(src_lang[0]+".lang","r", encoding="utf8"))
def translate(lang, glang, langs=None):
if langs is None:
if os.path.exists(lang+".lang"):
with open(lang+".lang","r", encoding="utf8") as f:
temp = json.load(f)
else:
temp={}
else:
for i in langs:
if os.path.exists(i+".lang"):
with open(i+".lang","r", encoding="utf8") as f:
temp = json.load(f)
break
else:
temp={}
for id, src_text in src.items():
if not id in temp:
try:
temp[id]=translator.translate(src_text, src=src_lang[1], dest=glang).text
except ValueError:
print(lang, id)
break
else:
with open(lang+".lang","w", encoding="utf8") as f:
json.dump(temp, f, ensure_ascii=False, indent=2)
for glang, slangs in same.items():
try:
print(glang, end="", flush=True)
translate(slangs[0], glang)
except:
print("...error!!")
else:
print("...done") | 844 | 0 | 22 |
c6082463e10bff6dddab92e94bb14ad3f5baa382 | 3,716 | py | Python | moltres-thermal-fluids/validation-assembly/postprocessing.py | arfc/mhtgr350-benchmark | 18f7b3fe5742dabb1114c3bf7760b84590d16062 | [
"BSD-3-Clause"
] | 1 | 2021-07-24T16:20:34.000Z | 2021-07-24T16:20:34.000Z | moltres-thermal-fluids/validation-assembly/postprocessing.py | arfc/mhtgr350-benchmark | 18f7b3fe5742dabb1114c3bf7760b84590d16062 | [
"BSD-3-Clause"
] | 51 | 2020-05-26T16:17:57.000Z | 2021-02-22T20:08:59.000Z | moltres-thermal-fluids/validation-assembly/postprocessing.py | robfairh/mhtgr350-benchmark | a99d440bef498d781a1a4a193b876fc1611d1d03 | [
"BSD-3-Clause"
] | 2 | 2020-01-02T19:22:59.000Z | 2020-01-11T15:42:36.000Z | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from matplotlib.cbook import get_sample_data
import matplotlib.patches as mpatches
def add_legends_val_fuel(figure, save):
'''
This function adds legends to the assembly model validation problem mesh.
Parameters:
-----------
figure: [string]
name of figure to add legends
save: [string]
name of the new figure
'''
red = mpatches.Patch(color=(1., 0., 0.), label='Fuel')
green = mpatches.Patch(color=(0., 1., 0.), label='Gap')
gray = mpatches.Patch(color=(0.63, 0.63, 0.63), label='Moderator')
yellow = mpatches.Patch(color=(1., 1., 0.), label='Film')
blue = mpatches.Patch(color=(0., 0., 1.), label='Coolant')
cwd = os.getcwd()
fname = get_sample_data('{0}/{1}.png'.format(cwd, figure))
im = plt.imread(fname)
plt.imshow(im)
plt.legend(handles=[red, green, gray, yellow, blue], loc="lower right")
plt.text(x=3, y=616, s='1', fontsize=15, color='w')
plt.text(x=3, y=490, s='2', fontsize=16, color='w')
plt.text(x=3, y=365, s='3', fontsize=16, color='w')
plt.text(x=3, y=235, s='4', fontsize=16, color='w')
plt.text(x=3, y=110, s='5', fontsize=16, color='w')
plt.text(x=90, y=550, s='6', fontsize=16, color='w')
plt.text(x=102, y=430, s='7', fontsize=16, color='w')
plt.text(x=102, y=300, s='8', fontsize=16, color='w')
plt.text(x=102, y=174, s='9', fontsize=16, color='w')
plt.text(x=170, y=360, s='10', fontsize=16, color='w')
plt.text(x=200, y=235, s='11', fontsize=16, color='w')
plt.text(x=200, y=108, s='12', fontsize=16, color='w')
plt.text(x=280, y=170, s='13', fontsize=16, color='w')
plt.text(x=170, y=0, s='gap', fontsize=16, color='black')
plt.text(x=-20, y=780, s='A', fontsize=20, color='black')
plt.text(x=-20, y=0, s='B', fontsize=20, color='black')
plt.text(x=420, y=0, s='C', fontsize=20, color='black')
plt.axis('off')
plt.savefig(save, dpi=300, bbox_inches="tight")
def plot_val_assem_results():
'''
Plots assembly model results.
Two plots:
- Temperature on line AB
- Temperature on line AC
Includes case with no gap and case with 3mm gap.
'''
plt.figure()
file = 'input_lineAB_0002.csv'
file = pd.read_csv(file)
d = file['y'].tolist()
temp = file['temp'].tolist()
plt.plot(d, temp, label='0 mm')
file = 'input-g_lineAB_0002.csv'
file = pd.read_csv(file)
d = file['y'].tolist()
temp = file['temp'].tolist()
plt.plot(d, temp, label='3 mm')
plt.legend(loc='upper right', fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(np.linspace(900, 1050, 6), fontsize=14)
plt.xlabel('Distance from point A [cm]', fontsize=14)
plt.ylabel(r'Temperature [$^{\circ}$C]', fontsize=14)
plt.savefig('val-assem-line-AB', dpi=300, bbox_inches="tight")
plt.close()
plt.figure()
file = 'input_lineAC_0002.csv'
file = pd.read_csv(file)
d = file['y'].tolist()
temp = file['temp'].tolist()
plt.plot(d, temp, label='0 mm')
file = 'input-g_lineAC_0002.csv'
file = pd.read_csv(file)
d = file['y'].tolist()
temp = file['temp'].tolist()
plt.plot(d, temp, label='3 mm')
plt.legend(loc='upper right', fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel('Distance from point A [cm]', fontsize=14)
plt.ylabel(r'Temperature [$^{\circ}$C]', fontsize=14)
plt.savefig('val-assem-line-AC', dpi=300, bbox_inches="tight")
plt.close()
if __name__ == "__main__":
# adds legend to mesh figure
add_legends_val_fuel('mesh', 'val-assem-mesh')
# plot results
plot_val_assem_results()
| 32.884956 | 77 | 0.613294 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from matplotlib.cbook import get_sample_data
import matplotlib.patches as mpatches
def add_legends_val_fuel(figure, save):
'''
This function adds legends to the assembly model validation problem mesh.
Parameters:
-----------
figure: [string]
name of figure to add legends
save: [string]
name of the new figure
'''
red = mpatches.Patch(color=(1., 0., 0.), label='Fuel')
green = mpatches.Patch(color=(0., 1., 0.), label='Gap')
gray = mpatches.Patch(color=(0.63, 0.63, 0.63), label='Moderator')
yellow = mpatches.Patch(color=(1., 1., 0.), label='Film')
blue = mpatches.Patch(color=(0., 0., 1.), label='Coolant')
cwd = os.getcwd()
fname = get_sample_data('{0}/{1}.png'.format(cwd, figure))
im = plt.imread(fname)
plt.imshow(im)
plt.legend(handles=[red, green, gray, yellow, blue], loc="lower right")
plt.text(x=3, y=616, s='1', fontsize=15, color='w')
plt.text(x=3, y=490, s='2', fontsize=16, color='w')
plt.text(x=3, y=365, s='3', fontsize=16, color='w')
plt.text(x=3, y=235, s='4', fontsize=16, color='w')
plt.text(x=3, y=110, s='5', fontsize=16, color='w')
plt.text(x=90, y=550, s='6', fontsize=16, color='w')
plt.text(x=102, y=430, s='7', fontsize=16, color='w')
plt.text(x=102, y=300, s='8', fontsize=16, color='w')
plt.text(x=102, y=174, s='9', fontsize=16, color='w')
plt.text(x=170, y=360, s='10', fontsize=16, color='w')
plt.text(x=200, y=235, s='11', fontsize=16, color='w')
plt.text(x=200, y=108, s='12', fontsize=16, color='w')
plt.text(x=280, y=170, s='13', fontsize=16, color='w')
plt.text(x=170, y=0, s='gap', fontsize=16, color='black')
plt.text(x=-20, y=780, s='A', fontsize=20, color='black')
plt.text(x=-20, y=0, s='B', fontsize=20, color='black')
plt.text(x=420, y=0, s='C', fontsize=20, color='black')
plt.axis('off')
plt.savefig(save, dpi=300, bbox_inches="tight")
def plot_val_assem_results():
'''
Plots assembly model results.
Two plots:
- Temperature on line AB
- Temperature on line AC
Includes case with no gap and case with 3mm gap.
'''
plt.figure()
file = 'input_lineAB_0002.csv'
file = pd.read_csv(file)
d = file['y'].tolist()
temp = file['temp'].tolist()
plt.plot(d, temp, label='0 mm')
file = 'input-g_lineAB_0002.csv'
file = pd.read_csv(file)
d = file['y'].tolist()
temp = file['temp'].tolist()
plt.plot(d, temp, label='3 mm')
plt.legend(loc='upper right', fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(np.linspace(900, 1050, 6), fontsize=14)
plt.xlabel('Distance from point A [cm]', fontsize=14)
plt.ylabel(r'Temperature [$^{\circ}$C]', fontsize=14)
plt.savefig('val-assem-line-AB', dpi=300, bbox_inches="tight")
plt.close()
plt.figure()
file = 'input_lineAC_0002.csv'
file = pd.read_csv(file)
d = file['y'].tolist()
temp = file['temp'].tolist()
plt.plot(d, temp, label='0 mm')
file = 'input-g_lineAC_0002.csv'
file = pd.read_csv(file)
d = file['y'].tolist()
temp = file['temp'].tolist()
plt.plot(d, temp, label='3 mm')
plt.legend(loc='upper right', fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel('Distance from point A [cm]', fontsize=14)
plt.ylabel(r'Temperature [$^{\circ}$C]', fontsize=14)
plt.savefig('val-assem-line-AC', dpi=300, bbox_inches="tight")
plt.close()
if __name__ == "__main__":
# adds legend to mesh figure
add_legends_val_fuel('mesh', 'val-assem-mesh')
# plot results
plot_val_assem_results()
| 0 | 0 | 0 |
154f62db1ae15f6bc3122f16ce7c1c68774ac98d | 82 | py | Python | enthought/tvtk/indenter.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | 3 | 2016-12-09T06:05:18.000Z | 2018-03-01T13:00:29.000Z | enthought/tvtk/indenter.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | 1 | 2020-12-02T00:51:32.000Z | 2020-12-02T08:48:55.000Z | enthought/tvtk/indenter.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | null | null | null | # proxy module
from __future__ import absolute_import
from tvtk.indenter import *
| 20.5 | 38 | 0.829268 | # proxy module
from __future__ import absolute_import
from tvtk.indenter import *
| 0 | 0 | 0 |
d57dd501a164f45834c0c8ea6086685a5f5ec1e2 | 3,389 | py | Python | Example Usage.py | StJude-HTB/Echo-Combination-Builder | 33eee3d8e8a80201c4dda1fe5a53406a6a763147 | [
"MIT"
] | null | null | null | Example Usage.py | StJude-HTB/Echo-Combination-Builder | 33eee3d8e8a80201c4dda1fe5a53406a6a763147 | [
"MIT"
] | null | null | null | Example Usage.py | StJude-HTB/Echo-Combination-Builder | 33eee3d8e8a80201c4dda1fe5a53406a6a763147 | [
"MIT"
] | null | null | null | import combination_builder as Combine
# 1. Set Values and Initialize a Combinations Object
map_filepath = "Example_Files\\ExamplePlatemap.txt"
concentration_file = "Example_Files\\Example_Final_Concs.csv"
save_filepath = "Example_Files\\ExampleOutput3.csv"
cmt_filepath = "Example_Files\\ExampleOutput4.cmt"
backfill_wells = Combine.generate_well_range("A21", "P24")
control_wells = Combine.generate_well_range("A1","P2")
control_wells.extend(Combine.generate_well_range("A13","P14"))
static_transfer_volume = 100
assay_volume = 30
combination_max = 3
substance_id_regex = r'SJ[0-9-]+'
# Initialize the object
exp = Combine.Combinations()
# 2. Load the plate map
exp.load_platemap(map_filepath, substance_id_regex)
# 3. Setup the backfill wells - Comment/Uncomment as needed
# Option 1: Manually supply a list of wells
# This is fine for a small number of wells
# wells = ["A21", "A22", "A23", "A24", "B21", "B22", "B23", "B24"]
# Option 2: Generate well list from start and stop wells
# This option is good for a large number of wells
# List comprehension is required to get well alphas
wells = [x[0] for x in backfill_wells]
# Set backfill wells is specific to individual plates
# Repeat for all plates with backfill wells
exp.platemap.plates["E3P00000776"].set_backfill_wells(wells)
# 4. Set up Combinations - Comment/Uncomment as needed
# Option 1: Supply a manually curated list of combinations
# List compounds in separate columns, any number of
# columns is supported, header and any compound not
# in the platemap are skipped
# combinations_filepath = "Combination Template.csv"
# exp.load_platemap(combinations_filepath)
# Option 2: Calculate all permutations in the script
# Specify how many compounds to include in each combination
exp.generate_combinations(combination_max)
# 5. Set transfer volume or assay conditions
# Option 1: Set a static volume for all substances
# Volume is in nanoliters - All combinations will be
# the 1:1:1 volume ratios
# exp.set_transfer_volume(static_transfer_volume)
# Option 2: Set assay volume and assay concentration
# Assay volume is in microliters
# Assay concentration(s) must be supplied
exp.set_assay_volume(assay_volume)
# Set a constant concentration for all substances
# exp.set_assay_concentration(conc=50, unit="mM")
# Or set each concentration idependently with a csv file
exp.set_assay_concentration(file=concentration_file)
# 6. Configure assay plate layout
exp.reserve_control_wells([w[0] for w in control_wells])
# 7. Create the transfer list
exp.create_transfers()
# 8. Sort transfer list for optimized transfer speed
exp.sort_transfers()
# 9. Save transfer list - Echo formatted CSV file
exp.save_transfers(save_filepath)
# 10. Save *.cmt file - Screener Mapping File
# OPTIONAL - Set replicate number to create replicate
# plates with the same plate mapping and concentrations
exp.save_cmt(cmt_filepath, 3)
# IN A NEW SESSION
# After using the Echo CSV to transfer samples
#
# 11. Update CMT with barcodes after performing transfers
import src.combination_builder.Combinations as Combine
cmt_filepath = "Example_Files\\ExampleOutput4.cmt"
barcode_filepath = "Example_Files\\Barcode_List.csv"
# Update barcodes
Combine.update_CMT_barcodes(cmt_filepath, barcode_filepath)
| 32.27619 | 69 | 0.757746 | import combination_builder as Combine
# 1. Set Values and Initialize a Combinations Object
map_filepath = "Example_Files\\ExamplePlatemap.txt"
concentration_file = "Example_Files\\Example_Final_Concs.csv"
save_filepath = "Example_Files\\ExampleOutput3.csv"
cmt_filepath = "Example_Files\\ExampleOutput4.cmt"
backfill_wells = Combine.generate_well_range("A21", "P24")
control_wells = Combine.generate_well_range("A1","P2")
control_wells.extend(Combine.generate_well_range("A13","P14"))
static_transfer_volume = 100
assay_volume = 30
combination_max = 3
substance_id_regex = r'SJ[0-9-]+'
# Initialize the object
exp = Combine.Combinations()
# 2. Load the plate map
exp.load_platemap(map_filepath, substance_id_regex)
# 3. Setup the backfill wells - Comment/Uncomment as needed
# Option 1: Manually supply a list of wells
# This is fine for a small number of wells
# wells = ["A21", "A22", "A23", "A24", "B21", "B22", "B23", "B24"]
# Option 2: Generate well list from start and stop wells
# This option is good for a large number of wells
# List comprehension is required to get well alphas
wells = [x[0] for x in backfill_wells]
# Set backfill wells is specific to individual plates
# Repeat for all plates with backfill wells
exp.platemap.plates["E3P00000776"].set_backfill_wells(wells)
# 4. Set up Combinations - Comment/Uncomment as needed
# Option 1: Supply a manually curated list of combinations
# List compounds in separate columns, any number of
# columns is supported, header and any compound not
# in the platemap are skipped
# combinations_filepath = "Combination Template.csv"
# exp.load_platemap(combinations_filepath)
# Option 2: Calculate all permutations in the script
# Specify how many compounds to include in each combination
exp.generate_combinations(combination_max)
# 5. Set transfer volume or assay conditions
# Option 1: Set a static volume for all substances
# Volume is in nanoliters - All combinations will be
# the 1:1:1 volume ratios
# exp.set_transfer_volume(static_transfer_volume)
# Option 2: Set assay volume and assay concentration
# Assay volume is in microliters
# Assay concentration(s) must be supplied
exp.set_assay_volume(assay_volume)
# Set a constant concentration for all substances
# exp.set_assay_concentration(conc=50, unit="mM")
# Or set each concentration idependently with a csv file
exp.set_assay_concentration(file=concentration_file)
# 6. Configure assay plate layout
exp.reserve_control_wells([w[0] for w in control_wells])
# 7. Create the transfer list
exp.create_transfers()
# 8. Sort transfer list for optimized transfer speed
exp.sort_transfers()
# 9. Save transfer list - Echo formatted CSV file
exp.save_transfers(save_filepath)
# 10. Save *.cmt file - Screener Mapping File
# OPTIONAL - Set replicate number to create replicate
# plates with the same plate mapping and concentrations
exp.save_cmt(cmt_filepath, 3)
# IN A NEW SESSION
# After using the Echo CSV to transfer samples
#
# 11. Update CMT with barcodes after performing transfers
import src.combination_builder.Combinations as Combine
cmt_filepath = "Example_Files\\ExampleOutput4.cmt"
barcode_filepath = "Example_Files\\Barcode_List.csv"
# Update barcodes
Combine.update_CMT_barcodes(cmt_filepath, barcode_filepath)
| 0 | 0 | 0 |
4381ae45f56072ce96f10960b4e1cca233ea2aba | 877 | py | Python | src/ipaparser/_code/features/backness.py | danmysak/ipa-parser | bb4f5fc1a8f95ef87793d2ffd79430a9a0ffbeaf | [
"MIT"
] | null | null | null | src/ipaparser/_code/features/backness.py | danmysak/ipa-parser | bb4f5fc1a8f95ef87793d2ffd79430a9a0ffbeaf | [
"MIT"
] | null | null | null | src/ipaparser/_code/features/backness.py | danmysak/ipa-parser | bb4f5fc1a8f95ef87793d2ffd79430a9a0ffbeaf | [
"MIT"
] | null | null | null | from .feature import assert_feature_mapping, Feature
__all__ = [
'Backness',
'BacknessCategory',
]
class Backness(Feature):
"""
https://en.wikipedia.org/wiki/International_Phonetic_Alphabet#Vowels
"""
FRONT = 'front'
NEAR_FRONT = 'near-front'
CENTRAL = 'central'
NEAR_BACK = 'near-back'
BACK = 'back'
BACKNESS_TO_CATEGORY = assert_feature_mapping({
Backness.FRONT: BacknessCategory.ABOUT_FRONT,
Backness.NEAR_FRONT: BacknessCategory.ABOUT_FRONT,
Backness.CENTRAL: BacknessCategory.ABOUT_CENTRAL,
Backness.NEAR_BACK: BacknessCategory.ABOUT_BACK,
Backness.BACK: BacknessCategory.ABOUT_BACK,
})
| 23.702703 | 72 | 0.718358 | from .feature import assert_feature_mapping, Feature
__all__ = [
'Backness',
'BacknessCategory',
]
class BacknessCategory(Feature):
ABOUT_FRONT = 'about front'
ABOUT_CENTRAL = 'about central'
ABOUT_BACK = 'about back'
class Backness(Feature):
"""
https://en.wikipedia.org/wiki/International_Phonetic_Alphabet#Vowels
"""
FRONT = 'front'
NEAR_FRONT = 'near-front'
CENTRAL = 'central'
NEAR_BACK = 'near-back'
BACK = 'back'
def derived(self) -> BacknessCategory:
return BACKNESS_TO_CATEGORY[self]
BACKNESS_TO_CATEGORY = assert_feature_mapping({
Backness.FRONT: BacknessCategory.ABOUT_FRONT,
Backness.NEAR_FRONT: BacknessCategory.ABOUT_FRONT,
Backness.CENTRAL: BacknessCategory.ABOUT_CENTRAL,
Backness.NEAR_BACK: BacknessCategory.ABOUT_BACK,
Backness.BACK: BacknessCategory.ABOUT_BACK,
})
| 59 | 109 | 50 |
72fd4dfd20221814709dbfce3780d99650dbdc35 | 1,634 | py | Python | compare_speed_test.py | NobuyukiInoue/get_tw_follower | f3fa186e6fd528e83b12b4550622ee31dffb653d | [
"MIT"
] | 1 | 2018-08-24T08:54:49.000Z | 2018-08-24T08:54:49.000Z | compare_speed_test.py | NobuyukiInoue/get_tw_follower | f3fa186e6fd528e83b12b4550622ee31dffb653d | [
"MIT"
] | null | null | null | compare_speed_test.py | NobuyukiInoue/get_tw_follower | f3fa186e6fd528e83b12b4550622ee31dffb653d | [
"MIT"
] | null | null | null | # coding: utf-8
import sys
import os
from datetime import datetime
import time
from mylibs import my_fileList
from mylibs import my_csv
if __name__ == "__main__":
main()
| 23.014085 | 59 | 0.5153 | # coding: utf-8
import sys
import os
from datetime import datetime
import time
from mylibs import my_fileList
from mylibs import my_csv
def main():
args = sys.argv
argc = len(args)
if argc <= 2:
print("Usage: python %s file1 file2" %(args[0]))
exit(0)
my_fileList.fileCheck(args[1])
my_fileList.fileCheck(args[2])
lines1 = my_csv.readCSVfile(args[1])
lines2 = my_csv.readCSVfile(args[2])
# ========== 二重ループで比較 ==========
time0 = time.time()
print("\n=== 二重ループ(i, j) Exist in [%s] ===" %(args[1]))
for user in my_csv.getDiff_oldversion(lines1, lines2):
print(user)
time1 = time.time()
print("time : %f[s]" %(time1 - time0))
# ========== 配列格納後に比較 ==========
time0 = time.time()
print("\n=== 配列に格納後にループ Exist in [%s] ===" %(args[1]))
for user in my_csv.getDiff(lines1, lines2):
print(user)
time1 = time.time()
print("time : %f[s]" %(time1 - time0))
# =====================================
# ========== 二重ループで比較 ==========
time0 = time.time()
print("\n=== 二重ループ(i, j) Exist in [%s] ===" %(args[2]))
for user in my_csv.getDiff_oldversion(lines2, lines1):
print(user)
time1 = time.time()
print("time : %f[s]" %(time1 - time0))
# ========== 配列格納後に比較 ==========
time0 = time.time()
print("\n=== 配列に格納後にループ Exist in [%s] ===" %(args[2]))
for user in my_csv.getDiff(lines2, lines1):
print(user)
time1 = time.time()
print("time : %f[s]" %(time1 - time0))
# =====================================
print()
if __name__ == "__main__":
main()
| 1,558 | 0 | 23 |
3a5e4f4a68ccd4bb0a7bda575d7f681b3cce5c29 | 203 | py | Python | example/logger_instance.py | Troppydash/tdlogging | eb9fdcd3851082528af4aecf167313291bff95aa | [
"MIT"
] | null | null | null | example/logger_instance.py | Troppydash/tdlogging | eb9fdcd3851082528af4aecf167313291bff95aa | [
"MIT"
] | null | null | null | example/logger_instance.py | Troppydash/tdlogging | eb9fdcd3851082528af4aecf167313291bff95aa | [
"MIT"
] | null | null | null | from tdlogging.tdlogger import TDLogger
from tdlogging.tdprinter import TDPrinter, BoxPrinter, OneLinerPrinter, CoolPrinter
logger = TDLogger(alias="My Custom Logger", printer=CoolPrinter()).config()
| 29 | 83 | 0.812808 | from tdlogging.tdlogger import TDLogger
from tdlogging.tdprinter import TDPrinter, BoxPrinter, OneLinerPrinter, CoolPrinter
logger = TDLogger(alias="My Custom Logger", printer=CoolPrinter()).config()
| 0 | 0 | 0 |
0e21f6dbc9f58de742fd12c5bcd16fbb9899d1d3 | 1,213 | py | Python | tapispy/clients/subscribe.py | tapis-project/tapispy | fc7d5e79f8b5a73fa0517e6129f737dd753c2561 | [
"Python-2.0",
"OLDAP-2.3"
] | null | null | null | tapispy/clients/subscribe.py | tapis-project/tapispy | fc7d5e79f8b5a73fa0517e6129f737dd753c2561 | [
"Python-2.0",
"OLDAP-2.3"
] | null | null | null | tapispy/clients/subscribe.py | tapis-project/tapispy | fc7d5e79f8b5a73fa0517e6129f737dd753c2561 | [
"Python-2.0",
"OLDAP-2.3"
] | null | null | null | """
subscribe.py
Subscribe to TACC apis
"""
import getpass
import requests
from .exceptions import AgaveClientError
from ..utils import handle_bad_response_status_code
def clients_subscribe(username, client_name, tenant_url,
api_name, api_version, api_provider):
""" Subscribe an oauth client to an api
"""
# Set request endpoint.
endpoint = "{}/clients/v2/{}/subscriptions".format(tenant_url, client_name)
# Get user's password.
passwd = getpass.getpass(prompt="API password: ")
# Make sure client_name is valid.
if client_name == "" or client_name is None:
raise AgaveClientError("Error creating client: invalid client_name")
# Make request.
try:
data = {
"apiName": api_name,
"apiVersion": api_version,
"apiProvider": api_provider,
}
resp = requests.post(endpoint, data=data, auth=(username, passwd))
del passwd
except Exception as err:
del passwd
raise AgaveClientError(err)
# Handle bad status code.
handle_bad_response_status_code(resp)
| 28.880952 | 80 | 0.605936 | """
subscribe.py
Subscribe to TACC apis
"""
import getpass
import requests
from .exceptions import AgaveClientError
from ..utils import handle_bad_response_status_code
def clients_subscribe(username, client_name, tenant_url,
api_name, api_version, api_provider):
""" Subscribe an oauth client to an api
"""
# Set request endpoint.
endpoint = "{}/clients/v2/{}/subscriptions".format(tenant_url, client_name)
# Get user's password.
passwd = getpass.getpass(prompt="API password: ")
# Make sure client_name is valid.
if client_name == "" or client_name is None:
raise AgaveClientError("Error creating client: invalid client_name")
# Make request.
try:
data = {
"apiName": api_name,
"apiVersion": api_version,
"apiProvider": api_provider,
}
resp = requests.post(endpoint, data=data, auth=(username, passwd))
del passwd
except Exception as err:
del passwd
raise AgaveClientError(err)
# Handle bad status code.
handle_bad_response_status_code(resp)
| 0 | 0 | 0 |
c94f84642ef0bd7f67524ef1a93130334a9ec7a5 | 5,046 | py | Python | avatar/sampling.py | Julian-Theis/AVATAR | 24fcd6eaa26f413be528a160d865d5d7e49a780b | [
"MIT"
] | 7 | 2020-12-22T12:09:14.000Z | 2022-03-29T12:50:35.000Z | avatar/sampling.py | ProminentLab/AVATAR | a20c767d8739a52f538927b4ec3d528952263d5a | [
"MIT"
] | 10 | 2020-11-13T17:45:59.000Z | 2022-02-10T00:50:38.000Z | avatar/sampling.py | ProminentLab/AVATAR | a20c767d8739a52f538927b4ec3d528952263d5a | [
"MIT"
] | 2 | 2020-03-26T22:27:27.000Z | 2020-07-07T22:36:41.000Z | import argparse
import os
import tensorflow as tf
import numpy as np
from avatar.util.LoadRelgan import LoadRelgan
from avatar.util.MHGAN import MHGAN
from avatar.util.util import writeToFile, readTraces
from conf.settings import DATA_PATH
WORK_PATH = os.path.abspath(os.getcwd())
if __name__ == "__main__":
np.random.seed(seed=1234)
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--system', help='System to evaluate (e.g. pa_system_2_3)', required=True)
parser.add_argument('-j', '--job', help='0 = beta 100, 1 = beta 1000', required=True)
parser.add_argument('-sfx', '--suffix', help='Which suffix, i.e. final epoch of the trained SGAN to use?', required=True)
parser.add_argument('-gpu', '--gpu', help='GPU on which the training is performed. For example 0.', required=True)
""" Selector """
parser.add_argument('-strategy', '--strategy', help='select "naive" or "mh"', required=True)
""" Parameter for Naively Sampling """
parser.add_argument('-n_n', '--n_samples', help='(NAIVE ONLY) Number of samples to generate? (Default: 10000)', default=10000)
""" Parameter for MH Sampling """
parser.add_argument('-mh_c', '--mh_count', help='(MH ONLY) Number of samples per batch? (Default: 50)',
default=50)
parser.add_argument('-mh_p', '--mh_patience',
help='(MH ONLY) Patience parameter (Default: 5)',
default=5)
parser.add_argument('-mh_k', '--mh_k',
help='(MH ONLY) Length of Markov chain (Default: 500)',
default=500)
parser.add_argument('-mh_mi', '--mh_maxiter',
help='(MH ONLY) Max sampling iterations? (Default: 200)',
default=200)
args = parser.parse_args()
system = args.system
job = int(args.job)
suffix = args.suffix
strategy = args.strategy
n_samples = int(args.n_samples)
mh_count = int(args.mh_count)
mh_patience = int(args.mh_patience)
mh_k = int(args.mh_count)
mh_maxiter = int(args.mh_maxiter)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if strategy == "naive":
tf.reset_default_graph()
print("****** SAMPLE FOR SUFFIX ", suffix, " ******")
relgan = LoadRelgan(system=system, suffix=suffix, job=job)
if DATA_PATH is None:
f_out = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_naive.txt")
else:
f_out = os.path.join(DATA_PATH, "avatar", "variants",system + "_relgan_" + str(suffix) + "_j" + str(job) + "_naive.txt")
print("Start NAIVE SAMPLING")
gen_samples = relgan.generate(n_samples=n_samples)
print("Generated samples - shape:", gen_samples.shape)
print("Writing to file", f_out)
writeToFile(relgan, f_out, gen_samples)
elif strategy == "mh":
if DATA_PATH is None:
eval_path = os.path.join(WORK_PATH, "data", "avatar", "train_data", system + "_eval.txt")
f_out = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_mh.txt")
else:
eval_path = os.path.join(DATA_PATH, "avatar", "train_data", system + "_eval.txt")
f_out = os.path.join(DATA_PATH, "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_mh.txt")
tf.reset_default_graph()
print("****** SAMPLE FOR SUFFIX ", suffix, " ******")
relgan = LoadRelgan(system=system, suffix=suffix, job=job)
calibrate = readTraces(eval_path)
calibrate = relgan.prep(calibrate)
mhgan = MHGAN(relgan, c=mh_count, k=mh_k, real_samples=calibrate)
samples = None
gen_size = 0
iter = 1
cnt_patience = 0
continue_sampling = True
print("Start MH SAMPLING")
while continue_sampling:
print("**** MH-GAN Iteration", iter, ":")
gen_samples, accepts, rejects = mhgan.generate_enhanced(
relgan.sess,
count=mh_count,
k=mh_k
)
if samples is None:
samples = gen_samples
else:
samples = np.concatenate([samples, gen_samples], axis=0)
samples = np.unique(samples, axis=0)
if gen_size != samples.shape[0]:
cnt_patience = 0
else:
cnt_patience += 1
gen_size = samples.shape[0]
print("Generated samples (cumulative): ", gen_size)
iter += 1
if cnt_patience >= mh_patience:
continue_sampling = False
if mh_maxiter != -1 and iter >= mh_maxiter:
continue_sampling = False
print("Generated samples - shape:", samples.shape)
print("Writing to file", f_out)
writeToFile(relgan, f_out, samples)
else:
raise ValueError("Unknown sampling strategy.")
| 39.421875 | 141 | 0.591954 | import argparse
import os
import tensorflow as tf
import numpy as np
from avatar.util.LoadRelgan import LoadRelgan
from avatar.util.MHGAN import MHGAN
from avatar.util.util import writeToFile, readTraces
from conf.settings import DATA_PATH
WORK_PATH = os.path.abspath(os.getcwd())
if __name__ == "__main__":
np.random.seed(seed=1234)
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--system', help='System to evaluate (e.g. pa_system_2_3)', required=True)
parser.add_argument('-j', '--job', help='0 = beta 100, 1 = beta 1000', required=True)
parser.add_argument('-sfx', '--suffix', help='Which suffix, i.e. final epoch of the trained SGAN to use?', required=True)
parser.add_argument('-gpu', '--gpu', help='GPU on which the training is performed. For example 0.', required=True)
""" Selector """
parser.add_argument('-strategy', '--strategy', help='select "naive" or "mh"', required=True)
""" Parameter for Naively Sampling """
parser.add_argument('-n_n', '--n_samples', help='(NAIVE ONLY) Number of samples to generate? (Default: 10000)', default=10000)
""" Parameter for MH Sampling """
parser.add_argument('-mh_c', '--mh_count', help='(MH ONLY) Number of samples per batch? (Default: 50)',
default=50)
parser.add_argument('-mh_p', '--mh_patience',
help='(MH ONLY) Patience parameter (Default: 5)',
default=5)
parser.add_argument('-mh_k', '--mh_k',
help='(MH ONLY) Length of Markov chain (Default: 500)',
default=500)
parser.add_argument('-mh_mi', '--mh_maxiter',
help='(MH ONLY) Max sampling iterations? (Default: 200)',
default=200)
args = parser.parse_args()
system = args.system
job = int(args.job)
suffix = args.suffix
strategy = args.strategy
n_samples = int(args.n_samples)
mh_count = int(args.mh_count)
mh_patience = int(args.mh_patience)
mh_k = int(args.mh_count)
mh_maxiter = int(args.mh_maxiter)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if strategy == "naive":
tf.reset_default_graph()
print("****** SAMPLE FOR SUFFIX ", suffix, " ******")
relgan = LoadRelgan(system=system, suffix=suffix, job=job)
if DATA_PATH is None:
f_out = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_naive.txt")
else:
f_out = os.path.join(DATA_PATH, "avatar", "variants",system + "_relgan_" + str(suffix) + "_j" + str(job) + "_naive.txt")
print("Start NAIVE SAMPLING")
gen_samples = relgan.generate(n_samples=n_samples)
print("Generated samples - shape:", gen_samples.shape)
print("Writing to file", f_out)
writeToFile(relgan, f_out, gen_samples)
elif strategy == "mh":
if DATA_PATH is None:
eval_path = os.path.join(WORK_PATH, "data", "avatar", "train_data", system + "_eval.txt")
f_out = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_mh.txt")
else:
eval_path = os.path.join(DATA_PATH, "avatar", "train_data", system + "_eval.txt")
f_out = os.path.join(DATA_PATH, "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_mh.txt")
tf.reset_default_graph()
print("****** SAMPLE FOR SUFFIX ", suffix, " ******")
relgan = LoadRelgan(system=system, suffix=suffix, job=job)
calibrate = readTraces(eval_path)
calibrate = relgan.prep(calibrate)
mhgan = MHGAN(relgan, c=mh_count, k=mh_k, real_samples=calibrate)
samples = None
gen_size = 0
iter = 1
cnt_patience = 0
continue_sampling = True
print("Start MH SAMPLING")
while continue_sampling:
print("**** MH-GAN Iteration", iter, ":")
gen_samples, accepts, rejects = mhgan.generate_enhanced(
relgan.sess,
count=mh_count,
k=mh_k
)
if samples is None:
samples = gen_samples
else:
samples = np.concatenate([samples, gen_samples], axis=0)
samples = np.unique(samples, axis=0)
if gen_size != samples.shape[0]:
cnt_patience = 0
else:
cnt_patience += 1
gen_size = samples.shape[0]
print("Generated samples (cumulative): ", gen_size)
iter += 1
if cnt_patience >= mh_patience:
continue_sampling = False
if mh_maxiter != -1 and iter >= mh_maxiter:
continue_sampling = False
print("Generated samples - shape:", samples.shape)
print("Writing to file", f_out)
writeToFile(relgan, f_out, samples)
else:
raise ValueError("Unknown sampling strategy.")
| 0 | 0 | 0 |
21a4d2fc23090f4d2bc7f32818872893c449fdaf | 2,273 | py | Python | bca4abm/processors/abm/auto_ownership.py | steventrev/in_midstates_bca | f762637a0b8976fa835320cb3b31a5eb5c423dfa | [
"BSD-3-Clause"
] | null | null | null | bca4abm/processors/abm/auto_ownership.py | steventrev/in_midstates_bca | f762637a0b8976fa835320cb3b31a5eb5c423dfa | [
"BSD-3-Clause"
] | null | null | null | bca4abm/processors/abm/auto_ownership.py | steventrev/in_midstates_bca | f762637a0b8976fa835320cb3b31a5eb5c423dfa | [
"BSD-3-Clause"
] | null | null | null | # bca4abm
# See full license in LICENSE.txt.
import logging
import os
import pandas as pd
from activitysim.core import config
from activitysim.core import inject
from activitysim.core import tracing
from activitysim.core import assign
from bca4abm import bca4abm as bca
from ...util.misc import add_result_columns, add_summary_results
logger = logging.getLogger(__name__)
"""
auto ownership processor
"""
@inject.injectable()
@inject.injectable()
@inject.step()
def auto_ownership_processor(
persons_merged,
auto_ownership_spec,
auto_ownership_settings,
coc_column_names,
chunk_size,
trace_hh_id):
"""
Compute auto ownership benefits
"""
persons_df = persons_merged.to_frame()
logger.info("Running auto_ownership_processor with %d persons (chunk size = %s)"
% (len(persons_df), chunk_size))
locals_dict = config.get_model_constants(auto_ownership_settings)
locals_dict.update(config.setting('globals'))
trace_rows = trace_hh_id and persons_df['household_id'] == trace_hh_id
coc_summary, trace_results, trace_assigned_locals = \
bca.eval_and_sum(assignment_expressions=auto_ownership_spec,
df=persons_df,
locals_dict=locals_dict,
df_alias='persons',
group_by_column_names=coc_column_names,
chunk_size=chunk_size,
trace_rows=trace_rows)
result_prefix = 'AO_'
add_result_columns("coc_results", coc_summary, result_prefix)
add_summary_results(coc_summary, prefix=result_prefix, spec=auto_ownership_spec)
if trace_hh_id:
if trace_results is not None:
tracing.write_csv(trace_results,
file_name="auto_ownership",
index_label='person_id',
column_labels=['label', 'person'])
if trace_assigned_locals:
tracing.write_csv(trace_assigned_locals, file_name="auto_ownership_locals")
| 28.061728 | 87 | 0.671359 | # bca4abm
# See full license in LICENSE.txt.
import logging
import os
import pandas as pd
from activitysim.core import config
from activitysim.core import inject
from activitysim.core import tracing
from activitysim.core import assign
from bca4abm import bca4abm as bca
from ...util.misc import add_result_columns, add_summary_results
logger = logging.getLogger(__name__)
"""
auto ownership processor
"""
@inject.injectable()
def auto_ownership_spec():
return bca.read_assignment_spec('auto_ownership.csv')
@inject.injectable()
def auto_ownership_settings():
return config.read_model_settings('auto_ownership.yaml')
@inject.step()
def auto_ownership_processor(
persons_merged,
auto_ownership_spec,
auto_ownership_settings,
coc_column_names,
chunk_size,
trace_hh_id):
"""
Compute auto ownership benefits
"""
persons_df = persons_merged.to_frame()
logger.info("Running auto_ownership_processor with %d persons (chunk size = %s)"
% (len(persons_df), chunk_size))
locals_dict = config.get_model_constants(auto_ownership_settings)
locals_dict.update(config.setting('globals'))
trace_rows = trace_hh_id and persons_df['household_id'] == trace_hh_id
coc_summary, trace_results, trace_assigned_locals = \
bca.eval_and_sum(assignment_expressions=auto_ownership_spec,
df=persons_df,
locals_dict=locals_dict,
df_alias='persons',
group_by_column_names=coc_column_names,
chunk_size=chunk_size,
trace_rows=trace_rows)
result_prefix = 'AO_'
add_result_columns("coc_results", coc_summary, result_prefix)
add_summary_results(coc_summary, prefix=result_prefix, spec=auto_ownership_spec)
if trace_hh_id:
if trace_results is not None:
tracing.write_csv(trace_results,
file_name="auto_ownership",
index_label='person_id',
column_labels=['label', 'person'])
if trace_assigned_locals:
tracing.write_csv(trace_assigned_locals, file_name="auto_ownership_locals")
| 133 | 0 | 44 |
cc6d2bb9a90ecd3f421a08e69b531f77b0c6cbec | 59 | py | Python | tests/misc/pkg/test.py | QuantStack/memestra | cde5ef6fb85f98b36efea6262bd6f38215778a12 | [
"BSD-3-Clause"
] | 21 | 2020-03-10T13:10:17.000Z | 2021-06-14T17:54:01.000Z | tests/misc/pkg/test.py | QuantStack/memestra | cde5ef6fb85f98b36efea6262bd6f38215778a12 | [
"BSD-3-Clause"
] | 48 | 2020-03-28T21:35:40.000Z | 2022-02-14T15:14:47.000Z | tests/misc/pkg/test.py | QuantStack/memestra | cde5ef6fb85f98b36efea6262bd6f38215778a12 | [
"BSD-3-Clause"
] | 9 | 2020-03-18T12:57:19.000Z | 2021-11-21T09:40:25.000Z | from .helper import helper
| 14.75 | 26 | 0.694915 | from .helper import helper
def test():
return helper()
| 10 | 0 | 22 |
f7e070da0f23eb08611f8b580a86d465c08e6775 | 7,531 | py | Python | text_processing.py | HLTLUTB/TextAnalysis | 909c94f2b2b6ce4341fe696c69adfbdd8d7bbb4f | [
"MIT"
] | null | null | null | text_processing.py | HLTLUTB/TextAnalysis | 909c94f2b2b6ce4341fe696c69adfbdd8d7bbb4f | [
"MIT"
] | null | null | null | text_processing.py | HLTLUTB/TextAnalysis | 909c94f2b2b6ce4341fe696c69adfbdd8d7bbb4f | [
"MIT"
] | null | null | null | import re
import nltk
import spacy
import unicodedata
import requests
from spacy_syllables import SpacySyllables
from bs4 import BeautifulSoup
from nltk import TweetTokenizer
from spacy.lang.es import Spanish
from spacy.lang.en import English
from nltk.util import ngrams
if __name__ == '__main__':
tp_es = TextProcessing(lang='es')
result_es = tp_es.nlp(
'Ahora a la gente todo le parece tóxico, más si dices lo que sientes o te molesta…y NO, tóxico es quedarse '
'callado por miedo a arruinar algo. Hay que aprender a quererse primero.')
for i in result_es:
print(i)
tp_en = TextProcessing(lang='en')
result_en = tp_en.nlp("The data doesn’t lie: here's what one of our teams learned when they tried a 4-day workweek.")
for i in result_en:
print(i)
| 42.308989 | 121 | 0.519984 | import re
import nltk
import spacy
import unicodedata
import requests
from spacy_syllables import SpacySyllables
from bs4 import BeautifulSoup
from nltk import TweetTokenizer
from spacy.lang.es import Spanish
from spacy.lang.en import English
from nltk.util import ngrams
class TextProcessing(object):
name = 'Text Processing'
lang = 'es'
def __init__(self, lang: str = 'es'):
self.lang = lang
@staticmethod
def nlp(text: str) -> list:
try:
list_tagger = []
tp_nlp = TextProcessing.load_spacy(TextProcessing.lang)
doc = tp_nlp(text.lower())
print('original_text: {0}'.format(text))
for token in doc:
item = {'text': token.text, 'lemma': token.lemma_, 'pos': token.pos_, 'tag': token.tag_,
'dep': token.dep_, 'shape': token.shape_, 'is_alpha': token.is_alpha,
'is_stop': token.is_stop, 'is_digit': token.is_digit, 'is_punct': token.is_punct,
'syllables': token._.syllables}
list_tagger.append(item)
return list_tagger
except Exception as e:
print('Error nlp: {0}'.format(e))
@staticmethod
def load_spacy(lang: str) -> object:
try:
spacy_model = {'es': 'es_core_news_sm', 'en': 'en_core_web_sm'}
if not spacy.util.is_package(spacy_model[lang]):
spacy.cli.download(spacy_model[lang])
component = spacy.load(spacy_model[lang])
SpacySyllables(component)
component.add_pipe('syllables', last=True)
print('- Text Processing: {0}'.format(component.pipe_names))
return component
except Exception as e:
print('Error load spacy: {0}'.format(e))
@staticmethod
def proper_encoding(text: str) -> str:
try:
text = unicodedata.normalize('NFD', text)
text = text.encode('ascii', 'ignore')
return text.decode("utf-8")
except Exception as e:
print('Error proper_encoding: {0}'.format(e))
@staticmethod
def stopwords(text: str) -> str:
try:
nlp = Spanish() if TextProcessing.lang == 'es' else English()
doc = nlp(text)
token_list = [token.text for token in doc]
sentence = []
for word in token_list:
lexeme = nlp.vocab[word]
if not lexeme.is_stop:
sentence.append(word)
return ' '.join(sentence)
except Exception as e:
print('Error stopwords: {0}'.format(e))
@staticmethod
def remove_patterns(text: str) -> str:
try:
text = re.sub(r'\©|\×|\⇔|\_|\»|\«|\~|\#|\$|\€|\Â|\�|\¬', '', text)
text = re.sub(r'\,|\;|\:|\!|\¡|\’|\‘|\”|\“|\"|\'|\`', '', text)
text = re.sub(r'\}|\{|\[|\]|\(|\)|\<|\>|\?|\¿|\°|\|', '', text)
text = re.sub(r'\/|\-|\+|\*|\=|\^|\%|\&|\$', '', text)
text = re.sub(r'\b\d+(?:\.\d+)?\s+', '', text)
return text.lower()
except Exception as e:
print('Error remove_patterns: {0}'.format(e))
@staticmethod
def transformer(text: str, stopwords: bool = False) -> str:
try:
text_out = TextProcessing.proper_encoding(text)
text_out = text_out.lower()
text_out = re.sub("[\U0001f000-\U000e007f]", '[EMOJI]', text_out)
text_out = re.sub(
r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+'
r'|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))',
'[URL]', text_out)
text_out = re.sub("@([A-Za-z0-9_]{1,40})", '[MENTION]', text_out)
text_out = re.sub("#([A-Za-z0-9_]{1,40})", '[HASTAG]', text_out)
text_out = TextProcessing.remove_patterns(text_out)
# text_out = TextAnalysis.lemmatization(text_out) if lemmatizer else text_out
text_out = TextProcessing.stopwords(text_out) if stopwords else text_out
text_out = re.sub(r'\s+', ' ', text_out).strip()
text_out = text_out.rstrip()
return text_out if text_out != ' ' else None
except Exception as e:
print('Error transformer: {0}'.format(e))
@staticmethod
def tokenizer(text: str) -> list:
try:
text_tokenizer = TweetTokenizer()
return text_tokenizer.tokenize(text)
except Exception as e:
print('Error make_ngrams: {0}'.format(e))
@staticmethod
def make_ngrams(text: str, num: int):
try:
n_grams = ngrams(nltk.word_tokenize(text), num)
return [' '.join(grams) for grams in n_grams]
except Exception as e:
print('Error make_ngrams: {0}'.format(e))
@staticmethod
def get_URL_title(text: str):
result = ''
pattern = r'\([0-9]*:[0-9]*\) => ' # Definimos los patrones a buscar y variables
patern2 = r'\[|\]' # con las que manipularemos los datos
patern3 = r'[\-\?\:\;\$\%\^\&\*\(\)\|\!\`\'\"\,\<\.\>]'
URL_cont = ''
try:
text = TextProcessing.transformer(text)
urx = re.sub(patern2, '', re.sub(pattern, '', str(text.urls)))
if urx != "None": # Se leeran los urls para obtener el titulo de las paginas
if "," in urx: # aqui se revisa si existe mas de 1 url
tado = urx.split(",")
else:
tado = urx + "," + "https://www.google.com"
tado = tado.split(",") # en caso contrario se agrega una direccion default
for cor in tado:
link = cor # para evitar errores en este ciclo
reqs = requests.get(link)
soup = BeautifulSoup(reqs.text, 'html.parser')
for title in soup.find_all('title'):
if title.getText() == "Google":
URL_cont += "Null" # aqui se elimina la pagina default
elif title.getText() != "Página no encontrada":
var = title.getText() # en caso de obtener el titulo de la pagina
temp0 = re.sub(patern3, '', var) # aqui normalizaremos el
temp0 = temp0.lower()
URL_cont += "" + str(temp0) # Se guarda en el contenido
else:
URL_cont += "Null" # si la pagina no es encontrada
URL_cont += "~"
elif urx == "None":
URL_cont += "Null" + "~" # En caso de no haber urls , se agrega null
result = URL_cont.split("~")
except Exception as e:
print('Error delete_special_patterns: {0}'.format(e))
return result
if __name__ == '__main__':
tp_es = TextProcessing(lang='es')
result_es = tp_es.nlp(
'Ahora a la gente todo le parece tóxico, más si dices lo que sientes o te molesta…y NO, tóxico es quedarse '
'callado por miedo a arruinar algo. Hay que aprender a quererse primero.')
for i in result_es:
print(i)
tp_en = TextProcessing(lang='en')
result_en = tp_en.nlp("The data doesn’t lie: here's what one of our teams learned when they tried a 4-day workweek.")
for i in result_en:
print(i)
| 6,247 | 485 | 23 |
52ac87c232d657f3f64271b0f50b5de7541b8d38 | 887 | py | Python | migrations/versions/0208_fix_unique_index.py | tlwr/notifications-api | 88a6b7729edb9be41ce3e7c027f1452b7b6d00d2 | [
"MIT"
] | 51 | 2016-04-03T23:36:17.000Z | 2022-03-21T20:04:52.000Z | migrations/versions/0208_fix_unique_index.py | tlwr/notifications-api | 88a6b7729edb9be41ce3e7c027f1452b7b6d00d2 | [
"MIT"
] | 1,335 | 2015-12-15T14:28:50.000Z | 2022-03-30T16:24:27.000Z | migrations/versions/0208_fix_unique_index.py | tlwr/notifications-api | 88a6b7729edb9be41ce3e7c027f1452b7b6d00d2 | [
"MIT"
] | 30 | 2016-01-08T19:05:32.000Z | 2021-12-20T16:37:23.000Z | """
Revision ID: 0208_fix_unique_index
Revises: 0207_set_callback_history_type
Create Date: 2018-07-25 13:55:24.941794
"""
from alembic import op
revision = '84c3b6eb16b3'
down_revision = '0207_set_callback_history_type'
| 36.958333 | 117 | 0.793687 | """
Revision ID: 0208_fix_unique_index
Revises: 0207_set_callback_history_type
Create Date: 2018-07-25 13:55:24.941794
"""
from alembic import op
revision = '84c3b6eb16b3'
down_revision = '0207_set_callback_history_type'
def upgrade():
op.create_unique_constraint('uix_service_callback_type', 'service_callback_api', ['service_id', 'callback_type'])
op.drop_index('ix_service_callback_api_service_id', table_name='service_callback_api')
op.create_index(op.f('ix_service_callback_api_service_id'), 'service_callback_api', ['service_id'], unique=False)
def downgrade():
op.drop_index(op.f('ix_service_callback_api_service_id'), table_name='service_callback_api')
op.create_index('ix_service_callback_api_service_id', 'service_callback_api', ['service_id'], unique=True)
op.drop_constraint('uix_service_callback_type', 'service_callback_api', type_='unique')
| 615 | 0 | 46 |
93456449dc042f5f46459ee1062a64d1c629d322 | 291 | py | Python | 1768.py | heltonricardo/URI | 160cca22d94aa667177c9ebf2a1c9864c5e55b41 | [
"MIT"
] | 6 | 2021-04-13T00:33:43.000Z | 2022-02-10T10:23:59.000Z | 1768.py | heltonricardo/URI | 160cca22d94aa667177c9ebf2a1c9864c5e55b41 | [
"MIT"
] | null | null | null | 1768.py | heltonricardo/URI | 160cca22d94aa667177c9ebf2a1c9864c5e55b41 | [
"MIT"
] | 3 | 2021-03-23T18:42:24.000Z | 2022-02-10T10:24:07.000Z | while True:
try:
n = int(input())
except EOFError: break
lim = int(n // 2)
for i in range(1, n+1, 2):
print(' ' * lim, end='')
lim -= 1
print('*' * i)
print((int(n // 2) * ' ') + '*')
print((int(n // 2) - 1) * ' ' + '***')
print()
| 22.384615 | 42 | 0.378007 | while True:
try:
n = int(input())
except EOFError: break
lim = int(n // 2)
for i in range(1, n+1, 2):
print(' ' * lim, end='')
lim -= 1
print('*' * i)
print((int(n // 2) * ' ') + '*')
print((int(n // 2) - 1) * ' ' + '***')
print()
| 0 | 0 | 0 |
3e1c020bdac07485f2ec00f3cc69ee79f0b9de5d | 2,521 | py | Python | Natalie Assistant/utils/Hotword_Detector.py | JafarAbbas33/Natalie-Assistant-Windows-x86- | fb9cb716d4da5abae1cd5e1fe8bd7509b3068e9d | [
"Apache-2.0"
] | null | null | null | Natalie Assistant/utils/Hotword_Detector.py | JafarAbbas33/Natalie-Assistant-Windows-x86- | fb9cb716d4da5abae1cd5e1fe8bd7509b3068e9d | [
"Apache-2.0"
] | null | null | null | Natalie Assistant/utils/Hotword_Detector.py | JafarAbbas33/Natalie-Assistant-Windows-x86- | fb9cb716d4da5abae1cd5e1fe8bd7509b3068e9d | [
"Apache-2.0"
] | null | null | null |
import sys, os, time
from selenium import webdriver
from selenium.webdriver.chrome.options import *
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
import time
from pynput import keyboard as kb
from pynput.keyboard import Key, Listener
from pynput.keyboard import Controller as kb2
kb = kb2()
driver = None
listener = Listener(on_release=wait_for_esc)
listener.start()
if __name__ == "__main__":
initialize_driver()
while True:
start_detection()
else:
import app_terminator
| 27.107527 | 128 | 0.645775 |
import sys, os, time
from selenium import webdriver
from selenium.webdriver.chrome.options import *
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
import time
from pynput import keyboard as kb
from pynput.keyboard import Key, Listener
from pynput.keyboard import Controller as kb2
kb = kb2()
driver = None
def initialize_driver():
global driver
options = Options()
options.add_argument("--log-level=3")
#options.add_argument("--silent")
options.add_argument("--headless")
options.add_argument("--disable-logging")
options.add_argument("disable-gpu")
options.add_argument("--use-fake-ui-for-media-stream")
options.add_argument("--user-data-dir=" + os.getcwd() + "\\CacheUserData")
try:
driver = webdriver.Chrome(options=options)
except:
from ChromeDriverDownloader import ChromeDriverDownloader
ChromeDriverDownloader.download(os.getcwd())
driver = webdriver.Chrome(options=options)
driver.implicitly_wait(10)
#driver.maximize_window()
driver.get("https://picovoice.ai/demos/lamp/")
def start_detection(root=None):
global driver
#while 'Press the microphone button to activate the demo' not in driver.page_source:
# pass
element = driver.find_element_by_xpath('//*[@id="gatsby-focus-wrapper"]/div/div[1]/div[2]/div/div[1]/div[1]/div[1]/button')
while 'Say “Hey Edison”' not in driver.page_source:
time.sleep(0.2)
driver.execute_script("arguments[0].click();", element)
print('Listening...')
app_terminator.update_text('Listening')
time.sleep(0.8)
if __name__ != "__main__":
root.state('iconic')
while 'Say a color' not in driver.page_source:
if not listener.is_alive():
if __name__ == "__main__":
driver.quit()
sys.exit()
driver.quit()
app_terminator.terminate()
return False
print('Hotword detected\n')
driver.execute_script("arguments[0].click();", element)
def wait_for_esc(key):
if key == Key.esc:
return False
listener = Listener(on_release=wait_for_esc)
listener.start()
if __name__ == "__main__":
initialize_driver()
while True:
start_detection()
else:
import app_terminator
| 1,793 | 0 | 75 |
ef5f75ca1c767d4e4fadeec1ebc03deb6de10e33 | 3,600 | py | Python | src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py | Roboy/LSM_SpiNNaker_MyoArm | 04fa1eaf78778edea3ba3afa4c527d20c491718e | [
"BSD-3-Clause"
] | 2 | 2020-11-01T13:22:11.000Z | 2020-11-01T13:22:20.000Z | src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py | Roboy/LSM_SpiNNaker_MyoArm | 04fa1eaf78778edea3ba3afa4c527d20c491718e | [
"BSD-3-Clause"
] | null | null | null | src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py | Roboy/LSM_SpiNNaker_MyoArm | 04fa1eaf78778edea3ba3afa4c527d20c491718e | [
"BSD-3-Clause"
] | null | null | null | from six import add_metaclass
from abc import ABCMeta
from abc import abstractmethod
import numpy
import math
@add_metaclass(ABCMeta)
| 35.294118 | 79 | 0.644722 | from six import add_metaclass
from abc import ABCMeta
from abc import abstractmethod
import numpy
import math
@add_metaclass(ABCMeta)
class AbstractSynapseDynamics(object):
NUMPY_CONNECTORS_DTYPE = [("source", "uint32"), ("target", "uint32"),
("weight", "float64"), ("delay", "float64")]
@abstractmethod
def is_same_as(self, synapse_dynamics):
""" Determines if this synapse dynamics is the same as another
"""
@abstractmethod
def are_weights_signed(self):
""" Determines if the weights are signed values
"""
@abstractmethod
def get_vertex_executable_suffix(self):
""" Get the executable suffix for a vertex for this dynamics
"""
@abstractmethod
def get_parameters_sdram_usage_in_bytes(self, n_neurons, n_synapse_types):
""" Get the SDRAM usage of the synapse dynamics parameters in bytes
"""
@abstractmethod
def write_parameters(self, spec, region, machine_time_step, weight_scales):
""" Write the synapse parameters to the spec
"""
def get_provenance_data(self, pre_population_label, post_population_label):
""" Get the provenance data from this synapse dynamics object
"""
return list()
def get_delay_maximum(self, connector):
""" Get the maximum delay for the synapses
"""
return connector.get_delay_maximum()
def get_weight_mean(
self, connector, n_pre_slices, pre_slice_index, n_post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
""" Get the mean weight for the synapses
"""
return connector.get_weight_mean(
n_pre_slices, pre_slice_index, n_post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice)
def get_weight_maximum(
self, connector, n_pre_slices, pre_slice_index, n_post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
""" Get the maximum weight for the synapses
"""
return connector.get_weight_maximum(
n_pre_slices, pre_slice_index, n_post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice)
def get_weight_variance(
self, connector, n_pre_slices, pre_slice_index, n_post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
""" Get the variance in weight for the synapses
"""
return connector.get_weight_variance(
n_pre_slices, pre_slice_index, n_post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice)
def convert_per_connection_data_to_rows(
self, connection_row_indices, n_rows, data):
""" Converts per-connection data generated from connections into\
row-based data to be returned from get_synaptic_data
"""
return [
data[connection_row_indices == i].reshape(-1)
for i in range(n_rows)
]
def get_n_items(self, rows, item_size):
""" Get the number of items in each row as 4-byte values, given the\
item size
"""
return numpy.array([
int(math.ceil(float(row.size) / float(item_size)))
for row in rows], dtype="uint32").reshape((-1, 1))
def get_words(self, rows):
""" Convert the row data to words
"""
words = [numpy.pad(
row, (0, (4 - (row.size % 4)) & 0x3), mode="constant",
constant_values=0).view("uint32") for row in rows]
return words
| 0 | 3,441 | 22 |
65d097834349dee08312b5bd4433faeac557c837 | 3,664 | py | Python | problems/12/Solver.py | tmct/adventOfCode2016 | bd5699ca179b873f9da01514903b1dd493a46b7b | [
"MIT"
] | null | null | null | problems/12/Solver.py | tmct/adventOfCode2016 | bd5699ca179b873f9da01514903b1dd493a46b7b | [
"MIT"
] | null | null | null | problems/12/Solver.py | tmct/adventOfCode2016 | bd5699ca179b873f9da01514903b1dd493a46b7b | [
"MIT"
] | null | null | null | import re
ABCD = 'abcd'
copy_regex = r'cpy (\S+) (\S+)'
inc_regex = r'inc ([abcd])'
dec_regex = r'dec ([abcd])'
jump_regex = r'jnz (\S+) (-?\d+)'
| 33.925926 | 96 | 0.580513 | import re
ABCD = 'abcd'
copy_regex = r'cpy (\S+) (\S+)'
inc_regex = r'inc ([abcd])'
dec_regex = r'dec ([abcd])'
jump_regex = r'jnz (\S+) (-?\d+)'
class Solver:
def __init__(self):
self.registers = [0, 0, 0, 0]
self.instructions = None
self.current_instruction_index = 0
self.number_of_instructions = None
def get_a_register_value(self, input_file_name):
self.parse_instructions(input_file_name)
# then run function starting with first instruction
self.run_program()
return self.registers[0]
def get_a_register_value_with_c_bodge(self, input_file_name):
self.registers[2] = 1
return self.get_a_register_value(input_file_name)
def parse_instructions(self, input_file_name):
instructions = []
with open(input_file_name, 'r') as input_file:
for line in input_file:
instruction = self.get_instruction_method(line.strip())
instructions.append(instruction)
self.instructions = tuple(instructions)
self.number_of_instructions = len(self.instructions)
def get_instruction_method(self, instruction_string):
m = re.search(copy_regex, instruction_string)
if m:
origin = m.group(1)
destination = ABCD.index(m.group(2))
if origin in ABCD:
register = ABCD.index(origin)
def copy_from_register_to_register():
self.registers[destination] = self.registers[register]
self.current_instruction_index += 1
return copy_from_register_to_register
else:
value = int(origin)
def copy_int_value_to_register():
self.registers[destination] = value
self.current_instruction_index += 1
return copy_int_value_to_register
m = re.search(inc_regex, instruction_string)
if m:
value = int(ABCD.index(m.group(1)))
def increment_register():
self.registers[value] += 1
self.current_instruction_index += 1
return increment_register
m = re.search(dec_regex, instruction_string)
if m:
value = int(ABCD.index(m.group(1)))
def decrement_register():
self.registers[value] -= 1
self.current_instruction_index += 1
return decrement_register
m = re.search(jump_regex, instruction_string)
if m:
check_value = m.group(1)
jump_size = int(m.group(2))
if check_value in ABCD:
register = ABCD.index(check_value)
def jump():
_jump_size = int(jump_size)
if not self.registers[register]:
_jump_size = 1
self.current_instruction_index += _jump_size
return jump
else:
value = int(check_value)
if not value:
jump_size = 1
def jump():
self.current_instruction_index += jump_size
return jump
raise Exception('Line not parsed as a known instruction: {}'.format(instruction_string))
def run_program(self):
counter = 0
while self.current_instruction_index < self.number_of_instructions:
next_instruction = self.instructions[self.current_instruction_index]
next_instruction()
counter += 1
print('Program terminated after {} instructions'.format(counter))
| 3,340 | -8 | 184 |
ad6eb6abd34ea03303229fcbef80df0fde370882 | 8,810 | py | Python | app/user/routes.py | NiketanG/TF_Quiz | 0dc91db5df4cae3035aea84e9baf969309b6ad93 | [
"MIT"
] | 1 | 2020-07-25T03:26:45.000Z | 2020-07-25T03:26:45.000Z | app/user/routes.py | NiketanG/TF_Quiz | 0dc91db5df4cae3035aea84e9baf969309b6ad93 | [
"MIT"
] | 26 | 2020-05-29T23:22:57.000Z | 2021-05-25T19:57:02.000Z | app/user/routes.py | NiketanG/TF_Quiz | 0dc91db5df4cae3035aea84e9baf969309b6ad93 | [
"MIT"
] | null | null | null | from flask import (
render_template,
jsonify,
Blueprint,
redirect,
url_for,
flash,
request,
session,
send_file,
send_from_directory,
current_app as app,
)
from flask_socketio import leave_room, join_room
from flask_login import login_required, current_user, logout_user, login_user
from app.models import users, questions, events
from app.user.forms import RegisterForm, LoginForm
from app import db, socketio, bcrypt
from sqlalchemy import and_, func
import random
import json
import os
user = Blueprint("user", __name__)
@user.route("/")
@user.route("/register", methods=["GET", "POST"])
@user.route("/login", methods=["GET", "POST"])
@user.route("/quiz")
@login_required
@socketio.on("connect", namespace="/quiz")
@socketio.on("disconnect", namespace="/quiz")
@socketio.on("fetch_questions", namespace="/quiz")
@socketio.on("update_time", namespace="/time")
@socketio.on("submit_answer", namespace="/quiz")
@user.route("/logout")
@login_required
@user.route("/finish")
@login_required
| 30.37931 | 88 | 0.600795 | from flask import (
render_template,
jsonify,
Blueprint,
redirect,
url_for,
flash,
request,
session,
send_file,
send_from_directory,
current_app as app,
)
from flask_socketio import leave_room, join_room
from flask_login import login_required, current_user, logout_user, login_user
from app.models import users, questions, events
from app.user.forms import RegisterForm, LoginForm
from app import db, socketio, bcrypt
from sqlalchemy import and_, func
import random
import json
import os
user = Blueprint("user", __name__)
@user.route("/")
def index():
return render_template("index.html")
@user.route("/register", methods=["GET", "POST"])
def register():
form = RegisterForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode(
"utf-8"
)
event = events.query.filter_by(id=form.quiz.data).first()
user_login = users(
email=form.email.data,
name=form.name.data,
clgname=form.clgname.data,
phno=str(form.phno.data),
password=hashed_password,
quiz_name=form.quiz.data,
timeleft=event.time,
)
try:
db.session.add(user_login)
db.session.commit()
flash("Signed up successfully.")
next = request.args.get("next")
socketio.emit(
"stats_updated",
{
"user_id": user_login.user_id,
"name": user_login.name,
"phno": user_login.phno,
"clgname": user_login.clgname,
"score": user_login.score,
},
namespace="/leaderboard/{}".format(event.event_name),
)
return redirect(next or url_for("user.login"))
except Exception as e:
print(e)
db.session.rollback()
else:
print(form.errors)
return render_template("register.html", form=form)
@user.route("/login", methods=["GET", "POST"])
def login():
form = LoginForm()
if form.validate_on_submit():
user = users.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
next_page = request.args.get("next")
if current_user.admin is True:
return (
redirect(next_page)
if next_page
else redirect(url_for("admin.ldrbrd"))
)
else:
return (
redirect(next_page) if next_page else redirect(url_for("user.quiz"))
)
else:
flash("Login Unsuccessful. Please check email and password")
else:
print(form.errors)
return render_template("login.html", form=form)
@user.route("/quiz")
@login_required
def quiz():
if current_user.admin is True:
return redirect(url_for("admin.ldrbrd"))
curr_user = users.query.filter_by(email=current_user.email).first()
print(curr_user.quiz_name)
attempted_ques = curr_user.attempted_index
db_answers = curr_user.answers
question_count = (
db.session.query(questions).filter_by(event_id=curr_user.quiz_name).count()
)
event = events.query.filter_by(id=curr_user.quiz_name).first()
question_list = random.sample(range(1, question_count), event.question_count)
if curr_user.question_ids == None:
curr_user.question_ids = question_list
try:
db.session.commit()
except Exception as e:
print(e)
db.session.rollback()
session["question_list"] = question_list
else:
session["question_list"] = curr_user.question_ids
for question_id in question_list:
question = questions.query.filter_by(
question_id=question_id, event_id=curr_user.quiz_name
).first()
if db_answers == None:
db_answers = []
if attempted_ques == None:
attempted_ques = []
return render_template(
"quiz.html",
time=curr_user.timeleft,
user=current_user,
attempted=attempted_ques,
dbanswers=db_answers,
quiz_name=curr_user.quiz_name,
ques_count=int(event.question_count),
)
@socketio.on("connect", namespace="/quiz")
def connect_handler():
if current_user.is_authenticated:
join_room(str(current_user.user_id))
@socketio.on("disconnect", namespace="/quiz")
def disconnect_handler():
if current_user.is_authenticated:
leave_room(str(current_user.user_id))
@socketio.on("fetch_questions", namespace="/quiz")
def fetch_questions():
curr_user = users.query.filter_by(email=current_user.email).first()
questions_dict = []
for question_id in curr_user.question_ids:
# To prevent automatic sorting of question_ids
ques = questions.query.filter_by(
question_id=question_id, event_id=curr_user.quiz_name
).first()
questions_dict.append(
{
"question_id": ques.question_id,
"question": ques.question,
"option_a": ques.option_a,
"option_b": ques.option_b,
"option_c": ques.option_c,
"option_d": ques.option_d,
}
)
return questions_dict
@socketio.on("update_time", namespace="/time")
def update_time(time):
curr_user = users.query.filter_by(email=current_user.email).first()
curr_user.timeleft = data.get("time")
try:
db.session.commit()
except Exception as e:
print(e)
db.session.rollback()
@socketio.on("submit_answer", namespace="/quiz")
def submit_answer(data):
curr_user = users.query.filter_by(email=current_user.email).first()
event = events.query.filter_by(id=curr_user.quiz_name).first()
question_id = int(data.get("question_id", 0))
answer = str(data.get("answer", 0))
db_question_id = session["question_list"][question_id]
ques = questions.query.filter_by(
question_id=db_question_id, event_id=curr_user.quiz_name
).first()
attempted_ques = curr_user.attempted
attempted_index_ques = curr_user.attempted_index
answers = curr_user.answers
if ques.answer == answer:
if curr_user.attempted != None:
if db_question_id not in curr_user.attempted:
curr_user.score = int(curr_user.score) + 1
socketio.emit(
"stats_updated",
{
"user_id": curr_user.user_id,
"name": curr_user.name,
"phno": curr_user.phno,
"clgname": curr_user.clgname,
"score": curr_user.score,
},
namespace="/leaderboard/{}".format(event.event_name),
)
else:
curr_user.score = int(curr_user.score) + 1
socketio.emit(
"stats_updated",
{
"user_id": curr_user.user_id,
"name": curr_user.name,
"phno": curr_user.phno,
"clgname": curr_user.clgname,
"score": curr_user.score,
},
namespace="/leaderboard/{}".format(event.event_name),
)
if attempted_ques == None:
attempted_ques = [db_question_id]
curr_user.attempted = attempted_ques
answers = [answer]
curr_user.answers = answers
else:
if db_question_id not in curr_user.attempted:
curr_user.attempted.append(db_question_id)
curr_user.answers.append(answer)
if attempted_index_ques == None:
attempted_index_ques = [question_id]
curr_user.attempted_index = attempted_index_ques
else:
if question_id not in curr_user.attempted_index:
curr_user.attempted_index.append(question_id)
curr_user.timeleft = data.get("time")
try:
db.session.commit()
except Exception as e:
print(e)
db.session.rollback()
data = {
"attempted_ques": attempted_index_ques,
"db_answers": answers,
"attempted_qa": dict(zip(attempted_index_ques, answers)),
}
return data
@user.route("/logout")
@login_required
def logout():
logout_user()
flash("You have been successfully logged out")
return redirect(url_for("user.register"))
@user.route("/finish")
@login_required
def finish():
flash("Quiz Finished")
return redirect(url_for("user.logout"))
| 7,512 | 0 | 242 |
fe618e7e320db3a034eb4b4abc24eafce8284bbf | 11,435 | py | Python | isympy.py | venimaster/sympy | d68f2996b04ab7aa214c05d8a1ae86a83ac43cc4 | [
"BSD-3-Clause"
] | 2 | 2018-12-05T02:30:43.000Z | 2020-11-14T01:43:15.000Z | isympy.py | venimaster/sympy | d68f2996b04ab7aa214c05d8a1ae86a83ac43cc4 | [
"BSD-3-Clause"
] | 1 | 2017-10-23T06:56:43.000Z | 2017-10-23T06:56:43.000Z | isympy.py | venimaster/sympy | d68f2996b04ab7aa214c05d8a1ae86a83ac43cc4 | [
"BSD-3-Clause"
] | 1 | 2020-01-01T19:49:22.000Z | 2020-01-01T19:49:22.000Z | # XXX: Don't put a newline here, or it will add an extra line with
# isympy --help
# |
# v
"""Python shell for SymPy.
This is just a normal Python shell (IPython shell if you have the
IPython package installed), that executes the following commands for
the user:
>>> from __future__ import division
>>> from sympy import *
>>> x, y, z, t = symbols('x y z t')
>>> k, m, n = symbols('k m n', integer=True)
>>> f, g, h = symbols('f g h', cls=Function)
>>> init_printing()
So starting 'isympy' is equivalent to starting Python (or IPython) and
executing the above commands by hand. It is intended for easy and quick
experimentation with SymPy. isympy is a good way to use SymPy as an
interactive calculator. If you have IPython and Matplotlib installed, then
interactive plotting is enabled by default.
COMMAND LINE OPTIONS
--------------------
-c CONSOLE, --console=CONSOLE
Use the specified shell (Python or IPython) shell as the console
backend instead of the default one (IPython if present, Python
otherwise), e.g.:
$isympy -c python
CONSOLE must be one of 'ipython' or 'python'
-p PRETTY, --pretty PRETTY
Setup pretty-printing in SymPy. When pretty-printing is enabled,
expressions can be printed with Unicode or ASCII. The default is
to use pretty-printing (with Unicode if the terminal supports it).
When this option is 'no', expressions will not be pretty-printed
and ASCII will be used:
$isympy -p no
PRETTY must be one of 'unicode', 'ascii', or 'no'
-t TYPES, --types=TYPES
Setup the ground types for the polys. By default, gmpy ground types
are used if gmpy2 or gmpy is installed, otherwise it falls back to python
ground types, which are a little bit slower. You can manually
choose python ground types even if gmpy is installed (e.g., for
testing purposes):
$isympy -t python
TYPES must be one of 'gmpy', 'gmpy1' or 'python'
Note that the ground type gmpy1 is primarily intended for testing; it
forces the use of gmpy version 1 even if gmpy2 is available.
This is the same as setting the environment variable
SYMPY_GROUND_TYPES to the given ground type (e.g.,
SYMPY_GROUND_TYPES='gmpy')
The ground types can be determined interactively from the variable
sympy.polys.domains.GROUND_TYPES.
-o ORDER, --order ORDER
Setup the ordering of terms for printing. The default is lex, which
orders terms lexicographically (e.g., x**2 + x + 1). You can choose
other orderings, such as rev-lex, which will use reverse
lexicographic ordering (e.g., 1 + x + x**2):
$isympy -o rev-lex
ORDER must be one of 'lex', 'rev-lex', 'grlex', 'rev-grlex',
'grevlex', 'rev-grevlex', 'old', or 'none'.
Note that for very large expressions, ORDER='none' may speed up
printing considerably but the terms will have no canonical order.
-q, --quiet
Print only Python's and SymPy's versions to stdout at startup.
-d, --doctest
Use the same format that should be used for doctests. This is
equivalent to -c python -p no.
-C, --no-cache
Disable the caching mechanism. Disabling the cache may slow certain
operations down considerably. This is useful for testing the cache,
or for benchmarking, as the cache can result in deceptive timings.
This is equivalent to setting the environment variable
SYMPY_USE_CACHE to 'no'.
-a, --auto-symbols (requires at least IPython 0.11)
Automatically create missing symbols. Normally, typing a name of a
Symbol that has not been instantiated first would raise NameError,
but with this option enabled, any undefined name will be
automatically created as a Symbol.
Note that this is intended only for interactive, calculator style
usage. In a script that uses SymPy, Symbols should be instantiated
at the top, so that it's clear what they are.
This will not override any names that are already defined, which
includes the single character letters represented by the mnemonic
QCOSINE (see the "Gotchas and Pitfalls" document in the
documentation). You can delete existing names by executing "del
name". If a name is defined, typing "'name' in dir()" will return True.
The Symbols that are created using this have default assumptions.
If you want to place assumptions on symbols, you should create them
using symbols() or var().
Finally, this only works in the top level namespace. So, for
example, if you define a function in isympy with an undefined
Symbol, it will not work.
See also the -i and -I options.
-i, --int-to-Integer (requires at least IPython 0.11)
Automatically wrap int literals with Integer. This makes it so that
things like 1/2 will come out as Rational(1, 2), rather than 0.5. This
works by preprocessing the source and wrapping all int literals with
Integer. Note that this will not change the behavior of int literals
assigned to variables, and it also won't change the behavior of functions
that return int literals.
If you want an int, you can wrap the literal in int(), e.g. int(3)/int(2)
gives 1.5 (with division imported from __future__).
-I, --interactive (requires at least IPython 0.11)
This is equivalent to --auto-symbols --int-to-Integer. Future options
designed for ease of interactive use may be added to this.
-D, --debug
Enable debugging output. This is the same as setting the
environment variable SYMPY_DEBUG to 'True'. The debug status is set
in the variable SYMPY_DEBUG within isympy.
-- IPython options
Additionally you can pass command line options directly to the IPython
interpreter (the standard Python shell is not supported). However you
need to add the '--' separator between two types of options, e.g the
startup banner option and the colors option. You need to enter the
options as required by the version of IPython that you are using, too:
in IPython 0.11,
$isympy -q -- --colors=NoColor
or older versions of IPython,
$isympy -q -- -colors NoColor
See also isympy --help.
"""
import os
import sys
# DO NOT IMPORT SYMPY HERE! Or the setting of the sympy environment variables
# by the command line will break.
if __name__ == "__main__":
main()
| 33.049133 | 110 | 0.65282 | # XXX: Don't put a newline here, or it will add an extra line with
# isympy --help
# |
# v
"""Python shell for SymPy.
This is just a normal Python shell (IPython shell if you have the
IPython package installed), that executes the following commands for
the user:
>>> from __future__ import division
>>> from sympy import *
>>> x, y, z, t = symbols('x y z t')
>>> k, m, n = symbols('k m n', integer=True)
>>> f, g, h = symbols('f g h', cls=Function)
>>> init_printing()
So starting 'isympy' is equivalent to starting Python (or IPython) and
executing the above commands by hand. It is intended for easy and quick
experimentation with SymPy. isympy is a good way to use SymPy as an
interactive calculator. If you have IPython and Matplotlib installed, then
interactive plotting is enabled by default.
COMMAND LINE OPTIONS
--------------------
-c CONSOLE, --console=CONSOLE
Use the specified shell (Python or IPython) shell as the console
backend instead of the default one (IPython if present, Python
otherwise), e.g.:
$isympy -c python
CONSOLE must be one of 'ipython' or 'python'
-p PRETTY, --pretty PRETTY
Setup pretty-printing in SymPy. When pretty-printing is enabled,
expressions can be printed with Unicode or ASCII. The default is
to use pretty-printing (with Unicode if the terminal supports it).
When this option is 'no', expressions will not be pretty-printed
and ASCII will be used:
$isympy -p no
PRETTY must be one of 'unicode', 'ascii', or 'no'
-t TYPES, --types=TYPES
Setup the ground types for the polys. By default, gmpy ground types
are used if gmpy2 or gmpy is installed, otherwise it falls back to python
ground types, which are a little bit slower. You can manually
choose python ground types even if gmpy is installed (e.g., for
testing purposes):
$isympy -t python
TYPES must be one of 'gmpy', 'gmpy1' or 'python'
Note that the ground type gmpy1 is primarily intended for testing; it
forces the use of gmpy version 1 even if gmpy2 is available.
This is the same as setting the environment variable
SYMPY_GROUND_TYPES to the given ground type (e.g.,
SYMPY_GROUND_TYPES='gmpy')
The ground types can be determined interactively from the variable
sympy.polys.domains.GROUND_TYPES.
-o ORDER, --order ORDER
Setup the ordering of terms for printing. The default is lex, which
orders terms lexicographically (e.g., x**2 + x + 1). You can choose
other orderings, such as rev-lex, which will use reverse
lexicographic ordering (e.g., 1 + x + x**2):
$isympy -o rev-lex
ORDER must be one of 'lex', 'rev-lex', 'grlex', 'rev-grlex',
'grevlex', 'rev-grevlex', 'old', or 'none'.
Note that for very large expressions, ORDER='none' may speed up
printing considerably but the terms will have no canonical order.
-q, --quiet
Print only Python's and SymPy's versions to stdout at startup.
-d, --doctest
Use the same format that should be used for doctests. This is
equivalent to -c python -p no.
-C, --no-cache
Disable the caching mechanism. Disabling the cache may slow certain
operations down considerably. This is useful for testing the cache,
or for benchmarking, as the cache can result in deceptive timings.
This is equivalent to setting the environment variable
SYMPY_USE_CACHE to 'no'.
-a, --auto-symbols (requires at least IPython 0.11)
Automatically create missing symbols. Normally, typing a name of a
Symbol that has not been instantiated first would raise NameError,
but with this option enabled, any undefined name will be
automatically created as a Symbol.
Note that this is intended only for interactive, calculator style
usage. In a script that uses SymPy, Symbols should be instantiated
at the top, so that it's clear what they are.
This will not override any names that are already defined, which
includes the single character letters represented by the mnemonic
QCOSINE (see the "Gotchas and Pitfalls" document in the
documentation). You can delete existing names by executing "del
name". If a name is defined, typing "'name' in dir()" will return True.
The Symbols that are created using this have default assumptions.
If you want to place assumptions on symbols, you should create them
using symbols() or var().
Finally, this only works in the top level namespace. So, for
example, if you define a function in isympy with an undefined
Symbol, it will not work.
See also the -i and -I options.
-i, --int-to-Integer (requires at least IPython 0.11)
Automatically wrap int literals with Integer. This makes it so that
things like 1/2 will come out as Rational(1, 2), rather than 0.5. This
works by preprocessing the source and wrapping all int literals with
Integer. Note that this will not change the behavior of int literals
assigned to variables, and it also won't change the behavior of functions
that return int literals.
If you want an int, you can wrap the literal in int(), e.g. int(3)/int(2)
gives 1.5 (with division imported from __future__).
-I, --interactive (requires at least IPython 0.11)
This is equivalent to --auto-symbols --int-to-Integer. Future options
designed for ease of interactive use may be added to this.
-D, --debug
Enable debugging output. This is the same as setting the
environment variable SYMPY_DEBUG to 'True'. The debug status is set
in the variable SYMPY_DEBUG within isympy.
-- IPython options
Additionally you can pass command line options directly to the IPython
interpreter (the standard Python shell is not supported). However you
need to add the '--' separator between two types of options, e.g the
startup banner option and the colors option. You need to enter the
options as required by the version of IPython that you are using, too:
in IPython 0.11,
$isympy -q -- --colors=NoColor
or older versions of IPython,
$isympy -q -- -colors NoColor
See also isympy --help.
"""
import os
import sys
# DO NOT IMPORT SYMPY HERE! Or the setting of the sympy environment variables
# by the command line will break.
def main():
from optparse import OptionParser
if '-h' in sys.argv or '--help' in sys.argv:
# XXX: We can't use description=__doc__ in the OptionParser call
# below because optparse line wraps it weird. The argparse module
# allows you to disable this, though.
print(__doc__) # the docstring of this module above
VERSION = None
if '--version' in sys.argv:
# We cannot import sympy before this is run, because flags like -C and
# -t set environment variables that must be set before SymPy is
# imported. The only thing we need to import it for is to get the
# version, which only matters with the --version flag.
import sympy
VERSION = sympy.__version__
usage = 'usage: isympy [options] -- [ipython options]'
parser = OptionParser(
usage=usage,
version=VERSION,
# XXX: We need a more centralized place to store the version.
# It is currently stored in sympy.__version__, but we can't yet
# import sympy at this point.
)
parser.add_option(
'-c', '--console',
dest='console',
action='store',
default=None,
choices=['ipython', 'python'],
help='select type of interactive session: ipython | python; defaults '
'to ipython if IPython is installed, otherwise python')
parser.add_option(
'-p', '--pretty',
dest='pretty',
action='store',
default=None,
choices=['unicode', 'ascii', 'no'],
help='setup pretty printing: unicode | ascii | no; defaults to '
'unicode printing if the terminal supports it, otherwise ascii')
parser.add_option(
'-t', '--types',
dest='types',
action='store',
default=None,
choices=['gmpy', 'gmpy1', 'python'],
help='setup ground types: gmpy | gmpy1 | python; defaults to gmpy if gmpy2 '
'or gmpy is installed, otherwise python')
parser.add_option(
'-o', '--order',
dest='order',
action='store',
default=None,
choices=['lex', 'grlex', 'grevlex', 'rev-lex', 'rev-grlex', 'rev-grevlex', 'old', 'none'],
help='setup ordering of terms: [rev-]lex | [rev-]grlex | [rev-]grevlex | old | none; defaults to lex')
parser.add_option(
'-q', '--quiet',
dest='quiet',
action='store_true',
default=False,
help='print only version information at startup')
parser.add_option(
'-d', '--doctest',
dest='doctest',
action='store_true',
default=False,
help='use the doctest format for output (you can just copy and paste it)')
parser.add_option(
'-C', '--no-cache',
dest='cache',
action='store_false',
default=True,
help='disable caching mechanism')
parser.add_option(
'-a', '--auto-symbols',
dest='auto_symbols',
action='store_true',
default=False,
help='automatically construct missing symbols')
parser.add_option(
'-i', '--int-to-Integer',
dest='auto_int_to_Integer',
action='store_true',
default=False,
help="automatically wrap int literals with Integer")
parser.add_option(
'-I', '--interactive',
dest='interactive',
action='store_true',
default=False,
help="equivalent to -a -i")
parser.add_option(
'-D', '--debug',
dest='debug',
action='store_true',
default=False,
help='enable debugging output')
(options, ipy_args) = parser.parse_args()
if not options.cache:
os.environ['SYMPY_USE_CACHE'] = 'no'
if options.types:
os.environ['SYMPY_GROUND_TYPES'] = options.types
if options.debug:
os.environ['SYMPY_DEBUG'] = str(options.debug)
if options.doctest:
options.pretty = 'no'
options.console = 'python'
session = options.console
if session is not None:
ipython = session == 'ipython'
else:
try:
import IPython
ipython = True
except ImportError:
if not options.quiet:
from sympy.interactive.session import no_ipython
print(no_ipython)
ipython = False
args = {
'pretty_print': True,
'use_unicode': None,
'use_latex': None,
'order': None,
'argv': ipy_args,
}
if options.pretty == 'unicode':
args['use_unicode'] = True
elif options.pretty == 'ascii':
args['use_unicode'] = False
elif options.pretty == 'no':
args['pretty_print'] = False
if options.order is not None:
args['order'] = options.order
args['quiet'] = options.quiet
args['auto_symbols'] = options.auto_symbols or options.interactive
args['auto_int_to_Integer'] = options.auto_int_to_Integer or options.interactive
from sympy.interactive import init_session
init_session(ipython, **args)
if __name__ == "__main__":
main()
| 5,044 | 0 | 23 |
3e9f24e06981b0ecd4283ddd8131951990eded76 | 252 | py | Python | src/zulu/__init__.py | dgilland/zulu | f911dd34d3ad2487edf4bb8b5c751eaef25e4f9f | [
"MIT"
] | 53 | 2016-07-18T03:13:36.000Z | 2022-02-08T01:57:15.000Z | src/zulu/__init__.py | dgilland/zulu | f911dd34d3ad2487edf4bb8b5c751eaef25e4f9f | [
"MIT"
] | 25 | 2016-07-26T17:49:17.000Z | 2020-01-15T02:46:32.000Z | src/zulu/__init__.py | dgilland/zulu | f911dd34d3ad2487edf4bb8b5c751eaef25e4f9f | [
"MIT"
] | 2 | 2016-07-21T02:01:05.000Z | 2020-01-14T11:46:43.000Z | """The zulu library."""
__version__ = "2.0.0"
from .api import create, now, parse, parse_delta, range, span_range
from .delta import Delta, to_seconds
from .parser import ISO8601, TIMESTAMP, ParseError
from .timer import Timer
from .zulu import Zulu
| 25.2 | 67 | 0.757937 | """The zulu library."""
__version__ = "2.0.0"
from .api import create, now, parse, parse_delta, range, span_range
from .delta import Delta, to_seconds
from .parser import ISO8601, TIMESTAMP, ParseError
from .timer import Timer
from .zulu import Zulu
| 0 | 0 | 0 |
f0244ab14393f7f1bd09215e73d20e7b1078d171 | 775 | py | Python | drl/agents/architectures/stateless/test_dueling.py | lucaslingle/pytorch_drl | 6b2c1142a36553ce5dcb0a5768767579676d5791 | [
"MIT"
] | null | null | null | drl/agents/architectures/stateless/test_dueling.py | lucaslingle/pytorch_drl | 6b2c1142a36553ce5dcb0a5768767579676d5791 | [
"MIT"
] | null | null | null | drl/agents/architectures/stateless/test_dueling.py | lucaslingle/pytorch_drl | 6b2c1142a36553ce5dcb0a5768767579676d5791 | [
"MIT"
] | null | null | null | import torch as tc
from drl.agents.architectures.stateless.dueling import DuelingArchitecture
from drl.utils.initializers import get_initializer
| 33.695652 | 76 | 0.707097 | import torch as tc
from drl.agents.architectures.stateless.dueling import DuelingArchitecture
from drl.utils.initializers import get_initializer
def test_dueling():
batch_size = 32
input_dim, num_actions = 512, 18
input_shape = [input_dim]
img_batch = tc.zeros(size=(batch_size, *input_shape), dtype=tc.float32)
dueling = DuelingArchitecture(
input_dim=input_dim,
output_dim=num_actions,
widening=1,
w_init=get_initializer(('zeros_', {})),
b_init=get_initializer(('zeros_', {})))
assert dueling.input_shape == input_shape
assert dueling.output_dim == num_actions
tc.testing.assert_close(
actual=dueling(img_batch),
expected=tc.zeros(size=[batch_size, num_actions], dtype=tc.float32))
| 605 | 0 | 23 |
4b049ccd70302ff5e5b678152937325762c83e11 | 2,363 | py | Python | data/PerspectiveAPI/api.py | andy-techen/better-social-media | 4cd85afe0f39447de5313cc3d70789982da18dd9 | [
"MIT"
] | null | null | null | data/PerspectiveAPI/api.py | andy-techen/better-social-media | 4cd85afe0f39447de5313cc3d70789982da18dd9 | [
"MIT"
] | null | null | null | data/PerspectiveAPI/api.py | andy-techen/better-social-media | 4cd85afe0f39447de5313cc3d70789982da18dd9 | [
"MIT"
] | null | null | null | from googleapiclient import discovery
import json
import os
from dotenv import load_dotenv
import time
load_dotenv()
# # # Testing connection
# def implicit():
# from google.cloud import storage
# # If you don't specify credentials when constructing the client, the
# # client library will look for credentials in the environment.
# storage_client = storage.Client()
# # Make an authenticated API request
# buckets = list(storage_client.list_buckets())
# print(buckets)
#Test API KEY and Response Analysis
API_KEY = os.getenv('PERSPECTIVE_API_KEY')
client = discovery.build(
"commentanalyzer",
"v1alpha1",
developerKey=API_KEY,
discoveryServiceUrl="https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1",
static_discovery=False,
)
if __name__ == '__main__':
pass
sample = '''We are not in a trade war with China,
that war was lost many years ago by the foolish,
or incompetent, people who represented the U.S.
Now we have a Trade Deficit of $500 Billion a year,
with Intellectual Property Theft of another $300 Billion.
We cannot let this continue!'''
print(score_text(text=sample)) | 31.506667 | 107 | 0.635633 | from googleapiclient import discovery
import json
import os
from dotenv import load_dotenv
import time
load_dotenv()
# # # Testing connection
# def implicit():
# from google.cloud import storage
# # If you don't specify credentials when constructing the client, the
# # client library will look for credentials in the environment.
# storage_client = storage.Client()
# # Make an authenticated API request
# buckets = list(storage_client.list_buckets())
# print(buckets)
#Test API KEY and Response Analysis
API_KEY = os.getenv('PERSPECTIVE_API_KEY')
client = discovery.build(
"commentanalyzer",
"v1alpha1",
developerKey=API_KEY,
discoveryServiceUrl="https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1",
static_discovery=False,
)
def score_text(text):
analyze_request = {
'comment': { 'text': text},
'languages': ['en'],
'requestedAttributes': {
'TOXICITY': {},
'INSULT': {},
'PROFANITY': {},
'THREAT': {},
'SEXUALLY_EXPLICIT': {},
## Additional Sentiment Analysis:
## - Depressive content
},
}
# time.sleep(1)
english = 'en'
response = client.comments().analyze(body=analyze_request).execute()
if english in response['languages']:
results = {
'TOXICITY': response['attributeScores']['TOXICITY']['summaryScore']['value'],
'INSULT': response['attributeScores']['INSULT']['summaryScore']['value'],
'PROFANITY': response['attributeScores']['PROFANITY']['summaryScore']['value'],
'THREAT': response['attributeScores']['THREAT']['summaryScore']['value'],
'SEXUALLY_EXPLICIT': response['attributeScores']['SEXUALLY_EXPLICIT']['summaryScore']['value'],
}
return results
else:
return {'TOXICITY': 0, 'INSULT': 0, 'PROFANITY': 0, 'THREAT': 0, 'SEXUALLY_EXPLICIT': 0}
if __name__ == '__main__':
pass
sample = '''We are not in a trade war with China,
that war was lost many years ago by the foolish,
or incompetent, people who represented the U.S.
Now we have a Trade Deficit of $500 Billion a year,
with Intellectual Property Theft of another $300 Billion.
We cannot let this continue!'''
print(score_text(text=sample)) | 1,145 | 0 | 23 |
dcedffa3d3a79e33199c8be73411410ff7e27faa | 15,896 | py | Python | genshin/client/components/base.py | thesadru/genshin.py | 806b8d0dd059a06605e66dead917fdf550a552bc | [
"MIT"
] | 63 | 2021-10-04T19:53:54.000Z | 2022-03-30T07:21:03.000Z | genshin/client/components/base.py | thesadru/genshin.py | 806b8d0dd059a06605e66dead917fdf550a552bc | [
"MIT"
] | 17 | 2021-11-16T20:42:52.000Z | 2022-03-31T10:11:52.000Z | genshin/client/components/base.py | thesadru/genshin.py | 806b8d0dd059a06605e66dead917fdf550a552bc | [
"MIT"
] | 10 | 2021-10-16T22:41:41.000Z | 2022-02-19T17:55:23.000Z | """Base ABC Client."""
import abc
import asyncio
import base64
import json
import logging
import os
import typing
import urllib.parse
import aiohttp.typedefs
import yarl
from genshin import constants, errors, types, utility
from genshin.client import cache as client_cache
from genshin.client import manager, routes
from genshin.models import hoyolab as hoyolab_models
from genshin.models import model as base_model
from genshin.utility import concurrency, deprecation, ds
__all__ = ["BaseClient"]
class BaseClient(abc.ABC):
"""Base ABC Client."""
__slots__ = ("cookie_manager", "cache", "_authkey", "_lang", "_region", "_default_game", "uids")
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36" # noqa: E501
logger: logging.Logger = logging.getLogger(__name__)
cookie_manager: manager.BaseCookieManager
cache: client_cache.BaseCache
_authkey: typing.Optional[str]
_lang: str
_region: types.Region
_default_game: typing.Optional[types.Game]
uids: typing.Dict[types.Game, int]
@property
def hoyolab_uid(self) -> typing.Optional[int]:
"""The logged-in user's hoyolab uid.
Returns None if not found or not applicable.
"""
return self.cookie_manager.user_id
@property
def lang(self) -> str:
"""The default language, defaults to "en-us" """
return self._lang
@lang.setter
@property
def region(self) -> types.Region:
"""The default region."""
return self._region
@region.setter
@property
def default_game(self) -> typing.Optional[types.Game]:
"""The default game."""
return self._default_game
@default_game.setter
game = default_game
@property
def uid(self) -> typing.Optional[int]:
"""UID of the default game."""
if self.default_game is None:
if len(self.uids) != 1:
return None
(self.default_game,) = self.uids.keys()
return self.uids.get(self.default_game)
@uid.setter
@property
def authkey(self) -> typing.Optional[str]:
"""The default genshin authkey used for paginators."""
return self._authkey
@authkey.setter
@property
def debug(self) -> bool:
"""Whether the debug logs are being shown in stdout"""
return logging.getLogger("genshin").level == logging.DEBUG
@debug.setter
def set_cookies(self, cookies: typing.Optional[manager.AnyCookieOrHeader] = None, **kwargs: typing.Any) -> None:
"""Parse and set cookies."""
if not bool(cookies) ^ bool(kwargs):
raise TypeError("Cannot use both positional and keyword arguments at once")
self.cookie_manager = manager.BaseCookieManager.from_cookies(cookies or kwargs)
def set_browser_cookies(self, browser: typing.Optional[str] = None) -> None:
"""Extract cookies from your browser and set them as client cookies.
Available browsers: chrome, chromium, opera, edge, firefox.
"""
self.cookie_manager = manager.BaseCookieManager.from_browser_cookies(browser)
def set_authkey(self, authkey: typing.Optional[str] = None) -> None:
"""Set an authkey for wish & transaction logs.
Accepts an authkey, a url containing an authkey or a path towards a logfile.
"""
if authkey is None or os.path.isfile(authkey):
authkey = utility.get_authkey(authkey)
else:
authkey = utility.extract_authkey(authkey) or authkey
self.authkey = authkey
def set_cache(
self,
maxsize: int = 1024,
*,
ttl: int = client_cache.HOUR,
static_ttl: int = client_cache.DAY,
) -> None:
"""Create and set a new cache."""
self.cache = client_cache.Cache(maxsize, ttl=ttl, static_ttl=static_ttl)
def set_redis_cache(
self,
url: str,
*,
ttl: int = client_cache.HOUR,
static_ttl: int = client_cache.DAY,
**redis_kwargs: typing.Any,
) -> None:
"""Create and set a new redis cache."""
import aioredis
redis = aioredis.Redis.from_url(url, **redis_kwargs) # pyright: ignore[reportUnknownMemberType]
self.cache = client_cache.RedisCache(redis, ttl=ttl, static_ttl=static_ttl)
@property
def proxy(self) -> typing.Optional[str]:
"""Proxy for http requests."""
if self.cookie_manager.proxy is None:
return None
return str(self.cookie_manager.proxy)
@proxy.setter
async def _request_hook(
self,
method: str,
url: aiohttp.typedefs.StrOrURL,
*,
params: typing.Optional[typing.Mapping[str, typing.Any]] = None,
data: typing.Any = None,
**kwargs: typing.Any,
) -> None:
"""Perform an action before a request.
Debug logging by default.
"""
url = yarl.URL(url)
if params:
params = {k: v for k, v in params.items() if k != "authkey"}
url = url.update_query(params)
if data:
self.logger.debug("%s %s\n%s", method, url, json.dumps(data, separators=(",", ":")))
else:
self.logger.debug("%s %s", method, url)
async def request(
self,
url: aiohttp.typedefs.StrOrURL,
*,
method: typing.Optional[str] = None,
params: typing.Optional[typing.Mapping[str, typing.Any]] = None,
data: typing.Any = None,
headers: typing.Optional[aiohttp.typedefs.LooseHeaders] = None,
cache: typing.Any = None,
static_cache: typing.Any = None,
**kwargs: typing.Any,
) -> typing.Mapping[str, typing.Any]:
"""Make a request and return a parsed json response."""
if cache is not None:
value = await self.cache.get(cache)
if value is not None:
return value
elif static_cache is not None:
value = await self.cache.get_static(static_cache)
if value is not None:
return value
# actual request
headers = dict(headers or {})
headers["User-Agent"] = self.USER_AGENT
if method is None:
method = "POST" if data else "GET"
if "json" in kwargs:
raise TypeError("Use data instead of json in request.")
await self._request_hook(method, url, params=params, data=data, headers=headers, **kwargs)
response = await self.cookie_manager.request(
url,
method=method,
params=params,
json=data,
headers=headers,
**kwargs,
)
# cache
if cache is not None:
await self.cache.set(cache, response)
elif static_cache is not None:
await self.cache.set_static(static_cache, response)
return response
async def request_webstatic(
self,
url: aiohttp.typedefs.StrOrURL,
*,
headers: typing.Optional[aiohttp.typedefs.LooseHeaders] = None,
cache: typing.Any = None,
**kwargs: typing.Any,
) -> typing.Any:
"""Request a static json file."""
if cache is not None:
value = await self.cache.get_static(cache)
if value is not None:
return value
url = routes.WEBSTATIC_URL.get_url().join(yarl.URL(url))
headers = dict(headers or {})
headers["User-Agent"] = self.USER_AGENT
await self._request_hook("GET", url, headers=headers, **kwargs)
async with self.cookie_manager.create_session() as session:
async with session.get(url, headers=headers, proxy=self.proxy, **kwargs) as r:
r.raise_for_status()
data = await r.json()
if cache is not None:
await self.cache.set_static(cache, data)
return data
async def request_hoyolab(
self,
url: aiohttp.typedefs.StrOrURL,
*,
lang: typing.Optional[str] = None,
region: typing.Optional[types.Region] = None,
method: typing.Optional[str] = None,
params: typing.Optional[typing.Mapping[str, typing.Any]] = None,
data: typing.Any = None,
headers: typing.Optional[aiohttp.typedefs.LooseHeaders] = None,
**kwargs: typing.Any,
) -> typing.Mapping[str, typing.Any]:
"""Make a request any hoyolab endpoint."""
if lang is not None and lang not in constants.LANGS:
raise ValueError(f"{lang} is not a valid language, must be one of: " + ", ".join(constants.LANGS))
lang = lang or self.lang
region = region or self.region
url = routes.TAKUMI_URL.get_url(region).join(yarl.URL(url))
if region == types.Region.OVERSEAS:
headers = {
"x-rpc-app_version": "1.5.0",
"x-rpc-client_type": "4",
"x-rpc-language": lang,
"ds": ds.generate_dynamic_secret(),
}
elif region == types.Region.CHINESE:
headers = {
"x-rpc-app_version": "2.11.1",
"x-rpc-client_type": "5",
"ds": ds.generate_cn_dynamic_secret(data, params),
}
else:
raise TypeError(f"{region!r} is not a valid region.")
data = await self.request(url, method=method, params=params, data=data, headers=headers, **kwargs)
return data
@manager.no_multi
async def get_game_accounts(
self,
*,
lang: typing.Optional[str] = None,
) -> typing.Sequence[hoyolab_models.GenshinAccount]:
"""Get the game accounts of the currently logged-in user."""
data = await self.request_hoyolab(
"binding/api/getUserGameRolesByCookie",
lang=lang,
cache=client_cache.cache_key("accounts", hoyolab_uid=self.hoyolab_uid),
)
return [hoyolab_models.GenshinAccount(**i) for i in data["list"]]
@deprecation.deprecated("get_game_accounts")
async def genshin_accounts(
self,
*,
lang: typing.Optional[str] = None,
) -> typing.Sequence[hoyolab_models.GenshinAccount]:
"""Get the genshin accounts of the currently logged-in user."""
accounts = await self.get_game_accounts(lang=lang)
return [account for account in accounts if account.game == types.Game.GENSHIN]
async def _update_cached_uids(self) -> None:
"""Update cached fallback uids."""
mixed_accounts = await self.get_game_accounts()
game_accounts: typing.Dict[types.Game, typing.List[hoyolab_models.GenshinAccount]] = {}
for account in mixed_accounts:
if not isinstance(account.game, types.Game): # pyright: ignore[reportUnnecessaryIsInstance]
continue
game_accounts.setdefault(account.game, []).append(account)
self.uids = {game: max(accounts, key=lambda a: a.level).uid for game, accounts in game_accounts.items()}
if len(self.uids) == 1 and self.default_game is None:
(self.default_game,) = self.uids.keys()
@concurrency.prevent_concurrency
async def _get_uid(self, game: types.Game) -> int:
"""Get a cached fallback uid."""
# TODO: use lock
if uid := self.uids.get(game):
return uid
if self.cookie_manager.multi:
raise RuntimeError("UID must be provided when using multi-cookie managers.")
await self._update_cached_uids()
if uid := self.uids.get(game):
return uid
raise errors.AccountNotFound(msg="No UID provided and account has no game account bound to it.")
async def _fetch_mi18n(self, key: str, lang: str, *, force: bool = False) -> None:
"""Update mi18n for a single url."""
if not force:
if key in base_model.APIModel._mi18n:
return
base_model.APIModel._mi18n[key] = {}
url = routes.MI18N[key]
cache_key = client_cache.cache_key("mi18n", mi18n=key, lang=lang)
data = await self.request_webstatic(url.format(lang=lang), cache=cache_key)
for k, v in data.items():
actual_key = str.lower(key + "/" + k)
base_model.APIModel._mi18n.setdefault(actual_key, {})[lang] = v
async def update_mi18n(self, langs: typing.Iterable[str] = constants.LANGS, *, force: bool = False) -> None:
"""Fetch mi18n for partially localized endpoints."""
if not force:
if base_model.APIModel._mi18n:
return
langs = tuple(langs)
coros: typing.List[typing.Awaitable[None]] = []
for key in routes.MI18N:
for lang in langs:
coros.append(self._fetch_mi18n(key, lang, force=force))
await asyncio.gather(*coros)
| 33.821277 | 148 | 0.606631 | """Base ABC Client."""
import abc
import asyncio
import base64
import json
import logging
import os
import typing
import urllib.parse
import aiohttp.typedefs
import yarl
from genshin import constants, errors, types, utility
from genshin.client import cache as client_cache
from genshin.client import manager, routes
from genshin.models import hoyolab as hoyolab_models
from genshin.models import model as base_model
from genshin.utility import concurrency, deprecation, ds
__all__ = ["BaseClient"]
class BaseClient(abc.ABC):
"""Base ABC Client."""
__slots__ = ("cookie_manager", "cache", "_authkey", "_lang", "_region", "_default_game", "uids")
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36" # noqa: E501
logger: logging.Logger = logging.getLogger(__name__)
cookie_manager: manager.BaseCookieManager
cache: client_cache.BaseCache
_authkey: typing.Optional[str]
_lang: str
_region: types.Region
_default_game: typing.Optional[types.Game]
uids: typing.Dict[types.Game, int]
def __init__(
self,
cookies: typing.Optional[manager.AnyCookieOrHeader] = None,
*,
authkey: typing.Optional[str] = None,
lang: str = "en-us",
region: types.Region = types.Region.OVERSEAS,
proxy: typing.Optional[str] = None,
game: typing.Optional[types.Game] = None,
uid: typing.Optional[int] = None,
cache: typing.Optional[client_cache.Cache] = None,
debug: bool = False,
) -> None:
self.cookie_manager = manager.BaseCookieManager.from_cookies(cookies)
self.cache = cache or client_cache.StaticCache()
self.authkey = authkey
self.lang = lang
self.region = region
self.default_game = game
self.debug = debug
self.proxy = proxy
self.uids = {}
self.uid = uid
def __repr__(self) -> str:
kwargs = dict(
lang=self.lang,
region=self.region.value,
default_game=self.default_game and self.default_game.value,
hoyolab_uid=self.hoyolab_uid,
uid=self.default_game and self.uid,
authkey=self.authkey and self.authkey[:12] + "...",
proxy=self.proxy,
debug=self.debug,
)
return f"<{type(self).__name__} {', '.join(f'{k}={v!r}' for k, v in kwargs.items() if v)}>"
@property
def hoyolab_uid(self) -> typing.Optional[int]:
"""The logged-in user's hoyolab uid.
Returns None if not found or not applicable.
"""
return self.cookie_manager.user_id
@property
def lang(self) -> str:
"""The default language, defaults to "en-us" """
return self._lang
@lang.setter
def lang(self, lang: str) -> None:
if lang not in constants.LANGS:
raise ValueError(f"{lang} is not a valid language, must be one of: " + ", ".join(constants.LANGS))
self._lang = lang
@property
def region(self) -> types.Region:
"""The default region."""
return self._region
@region.setter
def region(self, region: str) -> None:
self._region = types.Region(region)
if region == types.Region.CHINESE:
self.lang = "zh-cn"
@property
def default_game(self) -> typing.Optional[types.Game]:
"""The default game."""
return self._default_game
@default_game.setter
def default_game(self, game: typing.Optional[str]) -> None:
self._default_game = types.Game(game) if game else None
game = default_game
@property
def uid(self) -> typing.Optional[int]:
"""UID of the default game."""
if self.default_game is None:
if len(self.uids) != 1:
return None
(self.default_game,) = self.uids.keys()
return self.uids.get(self.default_game)
@uid.setter
def uid(self, uid: typing.Optional[int]) -> None:
if uid is None:
self.uids.clear()
return
self._default_game = self._default_game or utility.recognize_game(uid, region=self.region)
if self.default_game is None:
raise RuntimeError("No default game set. Cannot set uid.")
self.uids[self.default_game] = uid
@property
def authkey(self) -> typing.Optional[str]:
"""The default genshin authkey used for paginators."""
return self._authkey
@authkey.setter
def authkey(self, authkey: typing.Optional[str]) -> None:
if authkey is not None:
authkey = urllib.parse.unquote(authkey)
try:
base64.b64decode(authkey, validate=True)
except Exception as e:
raise ValueError("authkey is not a valid base64 encoded string") from e
self._authkey = authkey
@property
def debug(self) -> bool:
"""Whether the debug logs are being shown in stdout"""
return logging.getLogger("genshin").level == logging.DEBUG
@debug.setter
def debug(self, debug: bool) -> None:
logging.basicConfig()
level = logging.DEBUG if debug else logging.NOTSET
logging.getLogger("genshin").setLevel(level)
def set_cookies(self, cookies: typing.Optional[manager.AnyCookieOrHeader] = None, **kwargs: typing.Any) -> None:
"""Parse and set cookies."""
if not bool(cookies) ^ bool(kwargs):
raise TypeError("Cannot use both positional and keyword arguments at once")
self.cookie_manager = manager.BaseCookieManager.from_cookies(cookies or kwargs)
def set_browser_cookies(self, browser: typing.Optional[str] = None) -> None:
"""Extract cookies from your browser and set them as client cookies.
Available browsers: chrome, chromium, opera, edge, firefox.
"""
self.cookie_manager = manager.BaseCookieManager.from_browser_cookies(browser)
def set_authkey(self, authkey: typing.Optional[str] = None) -> None:
"""Set an authkey for wish & transaction logs.
Accepts an authkey, a url containing an authkey or a path towards a logfile.
"""
if authkey is None or os.path.isfile(authkey):
authkey = utility.get_authkey(authkey)
else:
authkey = utility.extract_authkey(authkey) or authkey
self.authkey = authkey
def set_cache(
self,
maxsize: int = 1024,
*,
ttl: int = client_cache.HOUR,
static_ttl: int = client_cache.DAY,
) -> None:
"""Create and set a new cache."""
self.cache = client_cache.Cache(maxsize, ttl=ttl, static_ttl=static_ttl)
def set_redis_cache(
self,
url: str,
*,
ttl: int = client_cache.HOUR,
static_ttl: int = client_cache.DAY,
**redis_kwargs: typing.Any,
) -> None:
"""Create and set a new redis cache."""
import aioredis
redis = aioredis.Redis.from_url(url, **redis_kwargs) # pyright: ignore[reportUnknownMemberType]
self.cache = client_cache.RedisCache(redis, ttl=ttl, static_ttl=static_ttl)
@property
def proxy(self) -> typing.Optional[str]:
"""Proxy for http requests."""
if self.cookie_manager.proxy is None:
return None
return str(self.cookie_manager.proxy)
@proxy.setter
def proxy(self, proxy: typing.Optional[aiohttp.typedefs.StrOrURL]) -> None:
self.cookie_manager.proxy = yarl.URL(proxy) if proxy else None
async def _request_hook(
self,
method: str,
url: aiohttp.typedefs.StrOrURL,
*,
params: typing.Optional[typing.Mapping[str, typing.Any]] = None,
data: typing.Any = None,
**kwargs: typing.Any,
) -> None:
"""Perform an action before a request.
Debug logging by default.
"""
url = yarl.URL(url)
if params:
params = {k: v for k, v in params.items() if k != "authkey"}
url = url.update_query(params)
if data:
self.logger.debug("%s %s\n%s", method, url, json.dumps(data, separators=(",", ":")))
else:
self.logger.debug("%s %s", method, url)
async def request(
self,
url: aiohttp.typedefs.StrOrURL,
*,
method: typing.Optional[str] = None,
params: typing.Optional[typing.Mapping[str, typing.Any]] = None,
data: typing.Any = None,
headers: typing.Optional[aiohttp.typedefs.LooseHeaders] = None,
cache: typing.Any = None,
static_cache: typing.Any = None,
**kwargs: typing.Any,
) -> typing.Mapping[str, typing.Any]:
"""Make a request and return a parsed json response."""
if cache is not None:
value = await self.cache.get(cache)
if value is not None:
return value
elif static_cache is not None:
value = await self.cache.get_static(static_cache)
if value is not None:
return value
# actual request
headers = dict(headers or {})
headers["User-Agent"] = self.USER_AGENT
if method is None:
method = "POST" if data else "GET"
if "json" in kwargs:
raise TypeError("Use data instead of json in request.")
await self._request_hook(method, url, params=params, data=data, headers=headers, **kwargs)
response = await self.cookie_manager.request(
url,
method=method,
params=params,
json=data,
headers=headers,
**kwargs,
)
# cache
if cache is not None:
await self.cache.set(cache, response)
elif static_cache is not None:
await self.cache.set_static(static_cache, response)
return response
async def request_webstatic(
self,
url: aiohttp.typedefs.StrOrURL,
*,
headers: typing.Optional[aiohttp.typedefs.LooseHeaders] = None,
cache: typing.Any = None,
**kwargs: typing.Any,
) -> typing.Any:
"""Request a static json file."""
if cache is not None:
value = await self.cache.get_static(cache)
if value is not None:
return value
url = routes.WEBSTATIC_URL.get_url().join(yarl.URL(url))
headers = dict(headers or {})
headers["User-Agent"] = self.USER_AGENT
await self._request_hook("GET", url, headers=headers, **kwargs)
async with self.cookie_manager.create_session() as session:
async with session.get(url, headers=headers, proxy=self.proxy, **kwargs) as r:
r.raise_for_status()
data = await r.json()
if cache is not None:
await self.cache.set_static(cache, data)
return data
async def request_hoyolab(
self,
url: aiohttp.typedefs.StrOrURL,
*,
lang: typing.Optional[str] = None,
region: typing.Optional[types.Region] = None,
method: typing.Optional[str] = None,
params: typing.Optional[typing.Mapping[str, typing.Any]] = None,
data: typing.Any = None,
headers: typing.Optional[aiohttp.typedefs.LooseHeaders] = None,
**kwargs: typing.Any,
) -> typing.Mapping[str, typing.Any]:
"""Make a request any hoyolab endpoint."""
if lang is not None and lang not in constants.LANGS:
raise ValueError(f"{lang} is not a valid language, must be one of: " + ", ".join(constants.LANGS))
lang = lang or self.lang
region = region or self.region
url = routes.TAKUMI_URL.get_url(region).join(yarl.URL(url))
if region == types.Region.OVERSEAS:
headers = {
"x-rpc-app_version": "1.5.0",
"x-rpc-client_type": "4",
"x-rpc-language": lang,
"ds": ds.generate_dynamic_secret(),
}
elif region == types.Region.CHINESE:
headers = {
"x-rpc-app_version": "2.11.1",
"x-rpc-client_type": "5",
"ds": ds.generate_cn_dynamic_secret(data, params),
}
else:
raise TypeError(f"{region!r} is not a valid region.")
data = await self.request(url, method=method, params=params, data=data, headers=headers, **kwargs)
return data
@manager.no_multi
async def get_game_accounts(
self,
*,
lang: typing.Optional[str] = None,
) -> typing.Sequence[hoyolab_models.GenshinAccount]:
"""Get the game accounts of the currently logged-in user."""
data = await self.request_hoyolab(
"binding/api/getUserGameRolesByCookie",
lang=lang,
cache=client_cache.cache_key("accounts", hoyolab_uid=self.hoyolab_uid),
)
return [hoyolab_models.GenshinAccount(**i) for i in data["list"]]
@deprecation.deprecated("get_game_accounts")
async def genshin_accounts(
self,
*,
lang: typing.Optional[str] = None,
) -> typing.Sequence[hoyolab_models.GenshinAccount]:
"""Get the genshin accounts of the currently logged-in user."""
accounts = await self.get_game_accounts(lang=lang)
return [account for account in accounts if account.game == types.Game.GENSHIN]
async def _update_cached_uids(self) -> None:
"""Update cached fallback uids."""
mixed_accounts = await self.get_game_accounts()
game_accounts: typing.Dict[types.Game, typing.List[hoyolab_models.GenshinAccount]] = {}
for account in mixed_accounts:
if not isinstance(account.game, types.Game): # pyright: ignore[reportUnnecessaryIsInstance]
continue
game_accounts.setdefault(account.game, []).append(account)
self.uids = {game: max(accounts, key=lambda a: a.level).uid for game, accounts in game_accounts.items()}
if len(self.uids) == 1 and self.default_game is None:
(self.default_game,) = self.uids.keys()
@concurrency.prevent_concurrency
async def _get_uid(self, game: types.Game) -> int:
"""Get a cached fallback uid."""
# TODO: use lock
if uid := self.uids.get(game):
return uid
if self.cookie_manager.multi:
raise RuntimeError("UID must be provided when using multi-cookie managers.")
await self._update_cached_uids()
if uid := self.uids.get(game):
return uid
raise errors.AccountNotFound(msg="No UID provided and account has no game account bound to it.")
async def _fetch_mi18n(self, key: str, lang: str, *, force: bool = False) -> None:
"""Update mi18n for a single url."""
if not force:
if key in base_model.APIModel._mi18n:
return
base_model.APIModel._mi18n[key] = {}
url = routes.MI18N[key]
cache_key = client_cache.cache_key("mi18n", mi18n=key, lang=lang)
data = await self.request_webstatic(url.format(lang=lang), cache=cache_key)
for k, v in data.items():
actual_key = str.lower(key + "/" + k)
base_model.APIModel._mi18n.setdefault(actual_key, {})[lang] = v
async def update_mi18n(self, langs: typing.Iterable[str] = constants.LANGS, *, force: bool = False) -> None:
"""Fetch mi18n for partially localized endpoints."""
if not force:
if base_model.APIModel._mi18n:
return
langs = tuple(langs)
coros: typing.List[typing.Awaitable[None]] = []
for key in routes.MI18N:
for lang in langs:
coros.append(self._fetch_mi18n(key, lang, force=force))
await asyncio.gather(*coros)
| 2,715 | 0 | 236 |
480820a2e79a81a714cb12022eafa163d41b1b42 | 1,424 | py | Python | tests/test_kotoba_player_py.py | eteeeeeerminal/kotoba-player-py | 7eaf97fd62114fdf744e1442b6267a4bcf931bff | [
"MIT"
] | null | null | null | tests/test_kotoba_player_py.py | eteeeeeerminal/kotoba-player-py | 7eaf97fd62114fdf744e1442b6267a4bcf931bff | [
"MIT"
] | 1 | 2021-05-16T13:58:21.000Z | 2021-05-16T14:01:54.000Z | tests/test_kotoba_player_py.py | eteeeeeerminal/kotoba-player-py | 7eaf97fd62114fdf744e1442b6267a4bcf931bff | [
"MIT"
] | null | null | null | from kotoba_player_py.exceptions import InputFormatError
import pytest
from kotoba_player_py import (
__version__, KotobaPlayer, InputFormatError
)
from kotoba_player_py.api import (
mask_noun_word
)
@pytest.fixture | 33.116279 | 91 | 0.693118 | from kotoba_player_py.exceptions import InputFormatError
import pytest
from kotoba_player_py import (
__version__, KotobaPlayer, InputFormatError
)
from kotoba_player_py.api import (
mask_noun_word
)
def test_version():
assert __version__ == '0.1.0'
@pytest.fixture
def player():
return KotobaPlayer()
def test_parrot(player: KotobaPlayer):
with pytest.raises(InputFormatError):
player.parrot("")
with pytest.raises(InputFormatError):
player.parrot(None)
assert player.parrot("お宝はいただくぜ") == "いただく! いただく!"
assert player.parrot("キトさんは、とっても可愛いです。") == "可愛い! 可愛い!"
def test_mask_noun_word(player: KotobaPlayer):
tokens = player.nlp("東京")
assert mask_noun_word(tokens[0], "hogehoge") == "hogehoge"
tokens = player.nlp("美しい")
assert mask_noun_word(tokens[0], "hogehoge") == "美しい"
tokens = player.nlp("東京")
assert mask_noun_word(tokens[0], "x", True) == "xx"
def test_masquerade(player: KotobaPlayer):
with pytest.raises(InputFormatError):
player.masquerade("", "")
with pytest.raises(InputFormatError):
player.masquerade(None, None)
assert player.masquerade("東京タワーは綺麗です。", "hoge") == "hogehogeは綺麗です。"
assert player.masquerade("東京タワーは綺麗です。", "x", True) == "xxxxxは綺麗です。"
assert player.masquerade("東京タワーは綺麗です。私の家はあっちです。", "x", True) == "xxxxxは綺麗です。私のxはあっちです。"
assert player.masquerade("美しすぎる。", "hoge") == "美しすぎる。" | 1,345 | 0 | 114 |
0631b9526d2ea5ec291656b3480b1fabbc14772e | 4,572 | py | Python | Etc/dt_example.py | wahur666/VisualLogo | 36ff0e0108882a4c45e1dd7d4454d573d4439dcd | [
"Apache-2.0"
] | null | null | null | Etc/dt_example.py | wahur666/VisualLogo | 36ff0e0108882a4c45e1dd7d4454d573d4439dcd | [
"Apache-2.0"
] | null | null | null | Etc/dt_example.py | wahur666/VisualLogo | 36ff0e0108882a4c45e1dd7d4454d573d4439dcd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
This example mirrors the 8-direction movement example here:
https://github.com/Mekire/meks-pygame-samples/blob/master/eight_dir_move.py
The difference is that this example uses delta time.
Delta time is a method of assuring that updates are unaffected by
changes in framerate.
"""
import os
import sys
import pygame as pg
CAPTION = "Delta Time"
SCREEN_SIZE = (500, 500)
TRANSPARENT = (0, 0, 0, 0)
BACKGROUND_COLOR = pg.Color("darkslategrey")
DIRECT_DICT = {pg.K_LEFT : (-1, 0),
pg.K_RIGHT : ( 1, 0),
pg.K_UP : ( 0,-1),
pg.K_DOWN : ( 0, 1)}
class Player(object):
"""This class will represent our user controlled character."""
SIZE = (100, 100)
def __init__(self, pos, speed):
"""
Aside from setting up our image and rect as seen previously,
in this example we create a new variable called true_pos.
Rects can only hold integers, so in order to preserve fractional
changes we need this new variable to hold the exact float position.
Without it, a body that moved slower than 1 pixel per frame would
never move.
"""
self.image = self.make_image()
self.rect = self.image.get_rect(center=pos)
self.true_pos = list(self.rect.center) # Exact float position.
self.speed = speed # Speed in pixels per second.
def make_image(self):
"""
Create player image. No differences from previous.
"""
image = pg.Surface(Player.SIZE).convert_alpha()
image.fill(TRANSPARENT)
rect = image.get_rect()
pg.draw.ellipse(image, pg.Color("black"), rect)
pg.draw.ellipse(image, pg.Color("tomato"), rect.inflate(-12, -12))
return image
def update(self, keys, screen_rect, dt):
"""
Update must accept a new argument dt (time delta between frames).
Adjustments to position must be multiplied by this delta.
Set the rect to true_pos once adjusted (automatically converts to int).
"""
for key in DIRECT_DICT:
if keys[key]:
self.true_pos[0] += DIRECT_DICT[key][0]*self.speed*dt
self.true_pos[1] += DIRECT_DICT[key][1]*self.speed*dt
self.rect.center = self.true_pos
self.clamp(screen_rect)
def clamp(self, screen_rect):
"""
Clamp the rect to the screen if needed and reset true_pos to the
rect position so they don't lose sync.
"""
if not screen_rect.contains(self.rect):
self.rect.clamp_ip(screen_rect)
self.true_pos = list(self.rect.center)
def draw(self, surface):
"""
Basic draw function.
"""
surface.blit(self.image, self.rect)
class App(object):
"""
Class responsible for program control flow.
"""
def event_loop(self):
"""
Basic event loop.
"""
for event in pg.event.get():
if event.type == pg.QUIT:
self.done = True
elif event.type in (pg.KEYDOWN, pg.KEYUP):
self.keys = pg.key.get_pressed()
def update(self, dt):
"""
Update must acccept and pass dt to all elements that need to update.
"""
self.player.update(self.keys, self.screen_rect, dt)
def render(self):
"""
Render all needed elements and update the display.
"""
self.screen.fill(BACKGROUND_COLOR)
self.player.draw(self.screen)
pg.display.update()
def main_loop(self):
"""
We now use the return value of the call to self.clock.tick to
get the time delta between frames.
"""
dt = 0
self.clock.tick(self.fps)
while not self.done:
self.event_loop()
self.update(dt)
self.render()
dt = self.clock.tick(self.fps)/1000.0
def main():
"""
Initialize; create an App; and start the main loop.
"""
os.environ['SDL_VIDEO_CENTERED'] = '1'
pg.init()
pg.display.set_caption(CAPTION)
pg.display.set_mode(SCREEN_SIZE)
App().main_loop()
pg.quit()
sys.exit()
if __name__ == "__main__":
main()
| 29.882353 | 79 | 0.594269 | #!/usr/bin/env python
"""
This example mirrors the 8-direction movement example here:
https://github.com/Mekire/meks-pygame-samples/blob/master/eight_dir_move.py
The difference is that this example uses delta time.
Delta time is a method of assuring that updates are unaffected by
changes in framerate.
"""
import os
import sys
import pygame as pg
CAPTION = "Delta Time"
SCREEN_SIZE = (500, 500)
TRANSPARENT = (0, 0, 0, 0)
BACKGROUND_COLOR = pg.Color("darkslategrey")
DIRECT_DICT = {pg.K_LEFT : (-1, 0),
pg.K_RIGHT : ( 1, 0),
pg.K_UP : ( 0,-1),
pg.K_DOWN : ( 0, 1)}
class Player(object):
"""This class will represent our user controlled character."""
SIZE = (100, 100)
def __init__(self, pos, speed):
"""
Aside from setting up our image and rect as seen previously,
in this example we create a new variable called true_pos.
Rects can only hold integers, so in order to preserve fractional
changes we need this new variable to hold the exact float position.
Without it, a body that moved slower than 1 pixel per frame would
never move.
"""
self.image = self.make_image()
self.rect = self.image.get_rect(center=pos)
self.true_pos = list(self.rect.center) # Exact float position.
self.speed = speed # Speed in pixels per second.
def make_image(self):
"""
Create player image. No differences from previous.
"""
image = pg.Surface(Player.SIZE).convert_alpha()
image.fill(TRANSPARENT)
rect = image.get_rect()
pg.draw.ellipse(image, pg.Color("black"), rect)
pg.draw.ellipse(image, pg.Color("tomato"), rect.inflate(-12, -12))
return image
def update(self, keys, screen_rect, dt):
"""
Update must accept a new argument dt (time delta between frames).
Adjustments to position must be multiplied by this delta.
Set the rect to true_pos once adjusted (automatically converts to int).
"""
for key in DIRECT_DICT:
if keys[key]:
self.true_pos[0] += DIRECT_DICT[key][0]*self.speed*dt
self.true_pos[1] += DIRECT_DICT[key][1]*self.speed*dt
self.rect.center = self.true_pos
self.clamp(screen_rect)
def clamp(self, screen_rect):
"""
Clamp the rect to the screen if needed and reset true_pos to the
rect position so they don't lose sync.
"""
if not screen_rect.contains(self.rect):
self.rect.clamp_ip(screen_rect)
self.true_pos = list(self.rect.center)
def draw(self, surface):
"""
Basic draw function.
"""
surface.blit(self.image, self.rect)
class App(object):
"""
Class responsible for program control flow.
"""
def __init__(self):
self.screen = pg.display.get_surface()
self.screen_rect = self.screen.get_rect()
self.clock = pg.time.Clock()
self.fps = 60
self.done = False
self.keys = pg.key.get_pressed()
self.player = Player(self.screen_rect.center, 300)
def event_loop(self):
"""
Basic event loop.
"""
for event in pg.event.get():
if event.type == pg.QUIT:
self.done = True
elif event.type in (pg.KEYDOWN, pg.KEYUP):
self.keys = pg.key.get_pressed()
def update(self, dt):
"""
Update must acccept and pass dt to all elements that need to update.
"""
self.player.update(self.keys, self.screen_rect, dt)
def render(self):
"""
Render all needed elements and update the display.
"""
self.screen.fill(BACKGROUND_COLOR)
self.player.draw(self.screen)
pg.display.update()
def main_loop(self):
"""
We now use the return value of the call to self.clock.tick to
get the time delta between frames.
"""
dt = 0
self.clock.tick(self.fps)
while not self.done:
self.event_loop()
self.update(dt)
self.render()
dt = self.clock.tick(self.fps)/1000.0
def main():
"""
Initialize; create an App; and start the main loop.
"""
os.environ['SDL_VIDEO_CENTERED'] = '1'
pg.init()
pg.display.set_caption(CAPTION)
pg.display.set_mode(SCREEN_SIZE)
App().main_loop()
pg.quit()
sys.exit()
if __name__ == "__main__":
main()
| 280 | 0 | 27 |
67b276d02afb9dfab6debdf3eeb0db6e07d1f6eb | 9,290 | py | Python | src/test/test_translator_task.py | systemslab/popper | aa3f3b5d7e1374fcd4ae2bd3b12ec9db936d0e33 | [
"MIT"
] | 179 | 2016-11-19T22:38:07.000Z | 2020-05-24T10:42:30.000Z | src/test/test_translator_task.py | systemslab/popper | aa3f3b5d7e1374fcd4ae2bd3b12ec9db936d0e33 | [
"MIT"
] | 739 | 2016-10-05T21:31:13.000Z | 2020-05-22T20:42:55.000Z | src/test/test_translator_task.py | systemslab/popper | aa3f3b5d7e1374fcd4ae2bd3b12ec9db936d0e33 | [
"MIT"
] | 51 | 2016-10-14T05:42:10.000Z | 2020-05-15T19:05:33.000Z | from box import Box
from popper.translators.translator_task import TaskTranslator
from .test_common import PopperTest
| 34.664179 | 240 | 0.376964 | from box import Box
from popper.translators.translator_task import TaskTranslator
from .test_common import PopperTest
class TestTaskTranslator(PopperTest):
GIT_VARS = {
"GIT_COMMIT": {"sh": 'git rev-parse HEAD || echo ""'},
"GIT_BRANCH": {"sh": 'git branch --show-current 2>/dev/null || echo ""'},
"GIT_SHA_SHORT": {"sh": 'git rev-parse --short HEAD 2>/dev/null || echo ""'},
"GIT_REMOTE_ORIGIN_URL": {
"sh": 'git config --get remote.origin.url || echo ""'
},
"GIT_TAG": {"sh": "git tag -l --contains HEAD 2>/dev/null | head -n 1"},
}
GIT_ENV = {
"GIT_COMMIT": "{{.GIT_COMMIT}}",
"GIT_BRANCH": "{{.GIT_BRANCH}}",
"GIT_SHA_SHORT": "{{.GIT_SHA_SHORT}}",
"GIT_REMOTE_ORIGIN_URL": "{{.GIT_REMOTE_ORIGIN_URL}}",
"GIT_TAG": "{{.GIT_TAG}}",
}
GIT_ENV_FLAGS = "--env GIT_BRANCH --env GIT_COMMIT --env GIT_REMOTE_ORIGIN_URL --env GIT_SHA_SHORT --env GIT_TAG"
def test_detect_type(self):
tt = TaskTranslator()
self.assertEqual(tt._detect_type("docker://alpine"), "docker")
self.assertEqual(tt._detect_type("docker://alpine:latest"), "docker")
self.assertEqual(tt._detect_type("sh"), "sh")
with self.assertRaises(AttributeError):
tt._detect_type("unknown")
def test_translate_sh_step(self):
tt = TaskTranslator()
# basic
self.assertEqual(
tt._translate_sh_step(
Box(
{
"id": "id",
"uses": "sh",
"runs": ["echo"],
"args": ["hello world"],
}
)
),
Box({"cmds": ["echo 'hello world'"]}),
)
# with env
self.assertEqual(
tt._translate_sh_step(
Box(
{
"id": "id",
"uses": "sh",
"runs": ["echo"],
"args": ["hello world"],
"env": {"FOO": "foo", "BAR": "bar"},
}
)
),
Box({"cmds": ["echo 'hello world'"], "env": {"FOO": "foo", "BAR": "bar"},}),
)
# missing `runs`
with self.assertRaises(AttributeError):
tt._translate_sh_step(Box({"id": "id", "uses": "sh", "args": ["hello"]}))
# without args
self.assertEqual(
tt._translate_sh_step(Box(id="id", uses="sh", runs=["echo"])),
Box({"cmds": ["echo"]}),
)
def test_get_docker_image(self):
tt = TaskTranslator()
# image name only
self.assertEqual(tt._get_docker_image("docker://alpine"), "alpine")
# image name + tag
self.assertEqual(
tt._get_docker_image("docker://alpine:latest"), "alpine:latest"
)
# path to the directly (not supported)
with self.assertRaises(AttributeError):
tt._get_docker_image("./path/to/dir")
def test_translate_docker_step(self):
tt = TaskTranslator()
# minimum
self.assertEqual(
tt._translate_docker_step(
Box({"id": "id", "uses": "docker://hello-world",}), {},
),
Box(
{
"cmds": [
"docker run --rm -i --volume {{.PWD}}:/workspace --workdir /workspace hello-world"
],
}
),
)
# args + env
self.assertEqual(
tt._translate_docker_step(
Box(
{
"id": "id",
"uses": "docker://node:14",
"args": ["index.js"],
"env": {"FOO": "foo", "BAR": "bar"},
}
),
TestTaskTranslator.GIT_ENV,
),
Box(
{
"cmds": [
f"docker run --env BAR --env FOO {TestTaskTranslator.GIT_ENV_FLAGS} --rm -i --volume {{{{.PWD}}}}:/workspace --workdir /workspace node:14 index.js"
],
"env": {"FOO": "foo", "BAR": "bar"},
}
),
)
# runs
self.assertEqual(
tt._translate_docker_step(
Box(
{
"id": "id",
"uses": "docker://alpine",
"runs": ["echo"],
"args": ["hello world"],
}
),
{},
),
Box(
{
"cmds": [
"docker run --rm -i --volume {{.PWD}}:/workspace --workdir /workspace --entrypoint echo alpine 'hello world'"
],
}
),
)
# runs (two or more elements)
self.assertEqual(
tt._translate_docker_step(
Box(
{
"id": "id",
"uses": "docker://alpine",
"runs": ["/bin/sh", "-c"],
"args": ["echo hello world"],
}
),
{},
),
Box(
{
"cmds": [
"docker run --rm -i --volume {{.PWD}}:/workspace --workdir /workspace --entrypoint /bin/sh alpine -c 'echo hello world'"
],
}
),
)
# workdir
self.assertEqual(
tt._translate_docker_step(
Box({"id": "id", "uses": "docker://hello-world", "dir": "/tmp"}), {}
),
Box(
{
"cmds": [
"docker run --rm -i --volume {{.PWD}}:/workspace --workdir /tmp hello-world"
],
}
),
)
def test_translate(self):
tt = TaskTranslator()
popper_wf_with_step_default = Box(
{
"steps": [
{
"id": "default",
"uses": "sh",
"runs": ["curl"],
"args": [
"-LO",
"https://github.com/datasets/co2-fossil-global/raw/master/global.csv",
],
}
],
}
)
with self.assertRaises(AttributeError):
tt.translate(popper_wf_with_step_default)
popper_wf_sh = Box(
{
"steps": [
{
"id": "download",
"uses": "sh",
"runs": ["curl"],
"args": [
"-LO",
"https://github.com/datasets/co2-fossil-global/raw/master/global.csv",
],
}
],
}
)
task_sh = tt.translate(popper_wf_sh)
self.assertEqual(
Box.from_yaml(task_sh),
Box(
{
"version": "3",
"vars": {"PWD": {"sh": "pwd"}, **TestTaskTranslator.GIT_VARS},
"env": {**TestTaskTranslator.GIT_ENV},
"tasks": {
"default": {"cmds": [{"task": "download"}]},
"download": {
"cmds": [
"curl -LO https://github.com/datasets/co2-fossil-global/raw/master/global.csv"
]
},
},
}
),
)
popper_wf_docker = Box(
{
"steps": [
{
"id": "download",
"uses": "docker://byrnedo/alpine-curl:0.1.8",
"args": [
"-LO",
"https://github.com/datasets/co2-fossil-global/raw/master/global.csv",
],
}
],
}
)
task_docker = tt.translate(popper_wf_docker)
self.assertEqual(
Box.from_yaml(task_docker),
Box(
{
"version": "3",
"vars": {"PWD": {"sh": "pwd"}, **TestTaskTranslator.GIT_VARS},
"env": {**TestTaskTranslator.GIT_ENV},
"tasks": {
"default": {"cmds": [{"task": "download"}]},
"download": {
"cmds": [
f"docker run {TestTaskTranslator.GIT_ENV_FLAGS} --rm -i --volume {{{{.PWD}}}}:/workspace --workdir /workspace byrnedo/alpine-curl:0.1.8 -LO https://github.com/datasets/co2-fossil-global/raw/master/global.csv"
]
},
},
}
),
)
| 8,182 | 965 | 23 |
7e2e4fd2a627180d3f3c6d71c557a343f69f0a53 | 34 | py | Python | app/contests/tests/code_test_files/trash.py | uva-slp/pico | 3a4f20ea5e9359e2e4b770442fa59ae8e0bf30ed | [
"MIT"
] | 1 | 2017-09-20T23:29:59.000Z | 2017-09-20T23:29:59.000Z | app/contests/tests/code_test_files/trash.py | uva-slp/pico | 3a4f20ea5e9359e2e4b770442fa59ae8e0bf30ed | [
"MIT"
] | null | null | null | app/contests/tests/code_test_files/trash.py | uva-slp/pico | 3a4f20ea5e9359e2e4b770442fa59ae8e0bf30ed | [
"MIT"
] | null | null | null | print(
if __name == "man:
BANANA
| 6.8 | 18 | 0.647059 | print(
if __name == "man:
BANANA
| 0 | 0 | 0 |
fb4dd0ead0a76ea3d1213b56dbed70f0035fc2b0 | 1,808 | py | Python | projectparallelprogrammeren/montecarlo_v0.py | fury106/ProjectParallelProgrammeren | fd3c198edaca5bcb19d8e665561e8cd14824e894 | [
"MIT"
] | null | null | null | projectparallelprogrammeren/montecarlo_v0.py | fury106/ProjectParallelProgrammeren | fd3c198edaca5bcb19d8e665561e8cd14824e894 | [
"MIT"
] | null | null | null | projectparallelprogrammeren/montecarlo_v0.py | fury106/ProjectParallelProgrammeren | fd3c198edaca5bcb19d8e665561e8cd14824e894 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Module projectparallelprogrammeren.montecarlo_v0
=================================================================
simulatie v0: alles in Python (op genereren van de getallen na)
"""
import math
import numpy as np
from statistics import stdev
import projectparallelprogrammeren
from projectparallelprogrammeren import montecarlo_v1
from projectparallelprogrammeren import montecarlo_v2
from projectparallelprogrammeren import montecarlo_v3
from projectparallelprogrammeren import atomen
from et_stopwatch import Stopwatch
def simulatie(n=20, m=10):
"""
Deze functie doet een simulatie van een gegeven hoeveelheid conformaties van het gegeven aantal atomen.
:param int n: Het aantal atomen dat gebruikt wordt.
:param int m: Het aantal conformaties dat gesimuleerd moet worden.
"""
with Stopwatch(message="v0: Python"):
coordinatenLaagsteE = 0
nummerRunLaagsteE = 0
LaagsteE = math.inf
totalePot = 0
gemiddelde = 0
potentialenlijst = list()
for i in range(m):
#print("Bezig met het simuleren van run", i+1, "van", m)
run = atomen.Atomen(n)
pot = run.berekenLJPot()
totalePot = totalePot + pot
gemiddelde = totalePot / (i + 1)
potentialenlijst.append(pot)
if pot < LaagsteE:
coordinatenLaagsteE = run.getCoordinaten()
nummerRunLaagsteE = i
LaagsteE = pot
print(" ")
print("----------RESULTATEN----------")
print("Run", nummerRunLaagsteE + 1,"van", m, "had de laagste totale Lennard Jones Potentiaal, namelijk:", LaagsteE)
#print("De Coordinaten van de atomen van deze run zijn:", coordinatenLaagsteE)
print("De gemiddelde potentiaal:", gemiddelde)
print("De standaardafwijking is:", stdev(potentialenlijst))
"""montecarlo_v1.simulatie(n, m)
montecarlo_v2.simulatie(n, m)
montecarlo_v3.simulatie(n, m)"""
#eof
| 31.172414 | 116 | 0.714049 | # -*- coding: utf-8 -*-
"""
Module projectparallelprogrammeren.montecarlo_v0
=================================================================
simulatie v0: alles in Python (op genereren van de getallen na)
"""
import math
import numpy as np
from statistics import stdev
import projectparallelprogrammeren
from projectparallelprogrammeren import montecarlo_v1
from projectparallelprogrammeren import montecarlo_v2
from projectparallelprogrammeren import montecarlo_v3
from projectparallelprogrammeren import atomen
from et_stopwatch import Stopwatch
def simulatie(n=20, m=10):
"""
Deze functie doet een simulatie van een gegeven hoeveelheid conformaties van het gegeven aantal atomen.
:param int n: Het aantal atomen dat gebruikt wordt.
:param int m: Het aantal conformaties dat gesimuleerd moet worden.
"""
with Stopwatch(message="v0: Python"):
coordinatenLaagsteE = 0
nummerRunLaagsteE = 0
LaagsteE = math.inf
totalePot = 0
gemiddelde = 0
potentialenlijst = list()
for i in range(m):
#print("Bezig met het simuleren van run", i+1, "van", m)
run = atomen.Atomen(n)
pot = run.berekenLJPot()
totalePot = totalePot + pot
gemiddelde = totalePot / (i + 1)
potentialenlijst.append(pot)
if pot < LaagsteE:
coordinatenLaagsteE = run.getCoordinaten()
nummerRunLaagsteE = i
LaagsteE = pot
print(" ")
print("----------RESULTATEN----------")
print("Run", nummerRunLaagsteE + 1,"van", m, "had de laagste totale Lennard Jones Potentiaal, namelijk:", LaagsteE)
#print("De Coordinaten van de atomen van deze run zijn:", coordinatenLaagsteE)
print("De gemiddelde potentiaal:", gemiddelde)
print("De standaardafwijking is:", stdev(potentialenlijst))
"""montecarlo_v1.simulatie(n, m)
montecarlo_v2.simulatie(n, m)
montecarlo_v3.simulatie(n, m)"""
#eof
| 0 | 0 | 0 |
fb6e37f128873af57493c80c3f8cfa9a2e7b7cbc | 5,467 | py | Python | qiniuManager/test/command_test.py | hellflame/qiniu | 8f40c70fd6eb3178ac1410ca46f3584661cf994b | [
"MIT"
] | null | null | null | qiniuManager/test/command_test.py | hellflame/qiniu | 8f40c70fd6eb3178ac1410ca46f3584661cf994b | [
"MIT"
] | null | null | null | qiniuManager/test/command_test.py | hellflame/qiniu | 8f40c70fd6eb3178ac1410ca46f3584661cf994b | [
"MIT"
] | null | null | null | # coding=utf8
import unittest
import string
import random
import shlex
from qiniuManager.run import *
if __name__ == '__main__':
unittest.main(verbosity=2)
| 41.416667 | 113 | 0.659777 | # coding=utf8
import unittest
import string
import random
import shlex
from qiniuManager.run import *
class ParserTest(unittest.TestCase):
def setUp(self):
_, self.parser = parser()
@staticmethod
def generate_random_target(length):
target = string.ascii_letters + string.digits + ' '
return ''.join([random.choice(target) for _ in range(length)]).replace("'", "\\\'").replace("\"", "\\\"")
def test_version(self):
self.assertTrue(self.parser.parse_args(['-v']).version)
self.assertTrue(self.parser.parse_args(['--version']).version)
def test_export(self):
args = self.parser.parse_args(shlex.split("-x"))
self.assertEqual(args.export, [None])
def test_export_with_space(self):
rand = self.generate_random_target(20)
self.assertListEqual(self.parser.parse_args(shlex.split("-x '{}'".format(rand))).export, [rand])
def test_file(self):
rand = self.generate_random_target(50)
self.assertEqual(self.parser.parse_args(shlex.split("'{}'".format(rand))).file, rand)
def test_file_space(self):
ran1, ran2 = self.generate_random_target(50), self.generate_random_target(50)
parse = self.parser.parse_args(shlex.split("'{}' '{}'".format(ran1, ran2)))
self.assertListEqual([parse.file, parse.space], [ran1, ran2])
def test_key(self):
ak, sk = self.generate_random_target(50), self.generate_random_target(50)
parse = self.parser.parse_args(shlex.split("-k '{}' '{}'".format(ak, sk)))
self.assertEqual(parse.key, [ak, sk])
parse = self.parser.parse_args(shlex.split("--key '{}' '{}'".format(ak, sk)))
self.assertEqual(parse.key, [ak, sk])
def test_remove(self):
name = self.generate_random_target(10)
parse = self.parser.parse_args(shlex.split("-r '{}'".format(name)))
self.assertTrue(parse.remove)
parse = self.parser.parse_args(shlex.split("--remove '{}'".format(name)))
self.assertTrue(parse.remove)
def test_force_remove(self):
name = self.generate_random_target(10)
parse = self.parser.parse_args(shlex.split("-rf '{}'".format(name)))
self.assertTrue(parse.force_remove)
parse = self.parser.parse_args(shlex.split("--force-remove '{}'".format(name)))
self.assertTrue(parse.force_remove)
def test_revert(self):
self.assertFalse(self.parser.parse_args(["--revert"]).revert)
def test_size(self):
self.assertTrue(self.parser.parse_args(["--size"]).size)
def test_list(self):
space = self.generate_random_target(10)
parse = self.parser.parse_args(shlex.split("-l '{}'".format(space)))
self.assertListEqual(parse.list, [space])
parse = self.parser.parse_args(["-l"])
self.assertListEqual(parse.list, [None])
parse = self.parser.parse_args(shlex.split("--list '{}'".format(space)))
self.assertListEqual(parse.list, [space])
parse = self.parser.parse_args(["--list"])
self.assertListEqual(parse.list, [None])
def test_gt_lt(self):
self.assertTrue(self.parser.parse_args(['-gt']).greater)
self.assertTrue(self.parser.parse_args(['-lt']).littler)
def test_find_list(self):
self.assertTrue(self.parser.parse_args(['-f']).find)
self.assertTrue(self.parser.parse_args(['--find']).find)
def test_list_all(self):
self.assertTrue(self.parser.parse_args(['-la']).list_all)
self.assertTrue(self.parser.parse_args(['--list-all']).list_all)
def test_list_debug(self):
space = self.generate_random_target(10)
self.assertListEqual(self.parser.parse_args(['-ld']).list_debug, [None])
self.assertListEqual(self.parser.parse_args(['-ld', space]).list_debug, [space])
def test_check_space(self):
space = self.generate_random_target(10)
self.assertListEqual(self.parser.parse_args(['-s']).space_check, [None])
self.assertListEqual(self.parser.parse_args(['-s', space]).space_check, [space])
def test_space_alias(self):
alias = self.generate_random_target(10)
self.assertEqual(self.parser.parse_args(shlex.split("--alias '{}'".format(alias))).alias, alias)
def test_remove_space(self):
target = self.generate_random_target(10)
self.assertEqual(self.parser.parse_args(shlex.split("-sr '{}'".format(target))).space_remove, target)
def test_rename(self):
self.assertEqual(self.parser.parse_args(['-rn']).rename, None)
name = self.generate_random_target(10)
self.assertEqual(self.parser.parse_args(shlex.split("-rn '{}'".format(name))).rename, name)
def test_rename_debug(self):
name = self.generate_random_target(10)
self.assertEqual(self.parser.parse_args(['-rd']).rename_debug, None)
self.assertEqual(self.parser.parse_args(shlex.split("-rd '{}'".format(name))).rename_debug, name)
def test_download(self):
self.assertTrue(self.parser.parse_args(['-d']).download)
self.assertTrue(self.parser.parse_args(['--download']).download)
def test_download_debug(self):
self.assertTrue(self.parser.parse_args(['-dd']).download_debug)
def test_directory_choice(self):
d = self.generate_random_target(10)
self.assertEqual(self.parser.parse_args(shlex.split("-t '{}'".format(d))).target, d)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 4,573 | 707 | 23 |
c95a2270f100d93dd3f3e5f2ca7cac7015dd6a7a | 173 | py | Python | phashlib/__init__.py | fake-name/phashlib | 80acf8a8358452952782c120f063f767bf703196 | [
"BSD-2-Clause"
] | null | null | null | phashlib/__init__.py | fake-name/phashlib | 80acf8a8358452952782c120f063f767bf703196 | [
"BSD-2-Clause"
] | null | null | null | phashlib/__init__.py | fake-name/phashlib | 80acf8a8358452952782c120f063f767bf703196 | [
"BSD-2-Clause"
] | null | null | null | # scanner init
from .hashFile import ImageHash
from .hashFile import phash
from .hashFile import hashFile
from .hashFile import getHashDict
from .hashFile import getMd5Hash | 24.714286 | 33 | 0.83237 | # scanner init
from .hashFile import ImageHash
from .hashFile import phash
from .hashFile import hashFile
from .hashFile import getHashDict
from .hashFile import getMd5Hash | 0 | 0 | 0 |
36df6bbfb43aa0bf86f90c7e3f073796ecb7d4af | 1,054 | py | Python | Q071.py | Linchin/python_leetcode_git | 3d08ab04bbdbd2ce268f33c501fbb149662872c7 | [
"MIT"
] | null | null | null | Q071.py | Linchin/python_leetcode_git | 3d08ab04bbdbd2ce268f33c501fbb149662872c7 | [
"MIT"
] | null | null | null | Q071.py | Linchin/python_leetcode_git | 3d08ab04bbdbd2ce268f33c501fbb149662872c7 | [
"MIT"
] | null | null | null | """
71
medium
simplify path
"""
path1 = "/home/"
path2 = "/../"
path3 = "/home//foo/"
path4 = "/a/./b/../../c/"
path5 = "/../"
path6 = "/..."
path7 = "/a//b////c/d//././/.."
sol = Solution()
print(sol.simplifyPath(path7))
| 20.269231 | 53 | 0.393738 | """
71
medium
simplify path
"""
class Solution:
def simplifyPath(self, path: str) -> str:
# into the maze of corner cases
# wrong answers
stack = []
curr = ""
dots = ""
for i in range(len(path)):
if path[i] == "/":
if curr:
stack.append(curr)
curr = ""
if dots == ".." and stack:
stack.pop()
elif len(dots) >= 3:
stack.append(dots)
dots = ""
elif path[i].isalpha() or path[i] == "_":
curr += path[i]
elif path[i] == ".":
dots += "."
if len(dots) == 2 and stack:
stack.pop()
if len(dots) >= 3:
stack.append(dots)
return "/" + "/".join(stack)
path1 = "/home/"
path2 = "/../"
path3 = "/home//foo/"
path4 = "/a/./b/../../c/"
path5 = "/../"
path6 = "/..."
path7 = "/a//b////c/d//././/.."
sol = Solution()
print(sol.simplifyPath(path7))
| 782 | -6 | 49 |
965862a2e7a222a0fac93bad4cede9a434791a2c | 3,711 | py | Python | test/integration/045_test_severity_tests/test_severity.py | tomasfarias/dbt-core | ed5df342ca5d99c5e6971ee6d11c8cf3e6e263b3 | [
"Apache-2.0"
] | 3,156 | 2017-03-05T09:59:23.000Z | 2021-06-30T01:27:52.000Z | test/integration/045_test_severity_tests/test_severity.py | tomasfarias/dbt-core | ed5df342ca5d99c5e6971ee6d11c8cf3e6e263b3 | [
"Apache-2.0"
] | 2,608 | 2017-02-27T15:39:40.000Z | 2021-06-30T01:49:20.000Z | test/integration/045_test_severity_tests/test_severity.py | tomasfarias/dbt-core | ed5df342ca5d99c5e6971ee6d11c8cf3e6e263b3 | [
"Apache-2.0"
] | 693 | 2017-03-13T03:04:49.000Z | 2021-06-25T15:57:41.000Z | from test.integration.base import DBTIntegrationTest, use_profile
| 39.478723 | 100 | 0.621126 | from test.integration.base import DBTIntegrationTest, use_profile
class TestSeverity(DBTIntegrationTest):
@property
def schema(self):
return "severity_045"
@property
def models(self):
return "models"
@property
def project_config(self):
return {
'config-version': 2,
'seed-paths': ['seeds'],
'test-paths': ['tests'],
'seeds': {
'quote_columns': False,
},
}
def run_dbt_with_vars(self, cmd, strict_var, *args, **kwargs):
cmd.extend(['--vars',
'{{test_run_schema: {}, strict: {}}}'.format(self.unique_schema(), strict_var)])
return self.run_dbt(cmd, *args, **kwargs)
@use_profile('postgres')
def test_postgres_severity_warnings(self):
self.run_dbt_with_vars(['seed'], 'false')
self.run_dbt_with_vars(['run'], 'false')
results = self.run_dbt_with_vars(
['test', '--select', 'test_type:generic'], 'false')
self.assertEqual(len(results), 2)
self.assertEqual(results[0].status, 'warn')
self.assertEqual(results[0].failures, 2)
self.assertEqual(results[1].status, 'warn')
self.assertEqual(results[1].failures, 2)
@use_profile('postgres')
def test_postgres_severity_rendered_errors(self):
self.run_dbt_with_vars(['seed'], 'false')
self.run_dbt_with_vars(['run'], 'false')
results = self.run_dbt_with_vars(
['test', '--select', 'test_type:generic'], 'true', expect_pass=False)
self.assertEqual(len(results), 2)
self.assertEqual(results[0].status, 'fail')
self.assertEqual(results[0].failures, 2)
self.assertEqual(results[1].status, 'fail')
self.assertEqual(results[1].failures, 2)
@use_profile('postgres')
def test_postgres_severity_warnings_strict(self):
self.run_dbt_with_vars(['seed'], 'false')
self.run_dbt_with_vars(['run'], 'false')
results = self.run_dbt_with_vars(
['test', '--select', 'test_type:generic'], 'false', expect_pass=True)
self.assertEqual(len(results), 2)
self.assertEqual(results[0].status, 'warn')
self.assertEqual(results[0].failures, 2)
self.assertEqual(results[1].status, 'warn')
self.assertEqual(results[1].failures, 2)
@use_profile('postgres')
def test_postgres_data_severity_warnings(self):
self.run_dbt_with_vars(['seed'], 'false')
self.run_dbt_with_vars(['run'], 'false')
results = self.run_dbt_with_vars(
['test', '--select', 'test_type:singular'], 'false')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].status, 'warn')
self.assertEqual(results[0].failures, 2)
@use_profile('postgres')
def test_postgres_data_severity_rendered_errors(self):
self.run_dbt_with_vars(['seed'], 'false')
self.run_dbt_with_vars(['run'], 'false')
results = self.run_dbt_with_vars(
['test', '--select', 'test_type:singular'], 'true', expect_pass=False)
self.assertEqual(len(results), 1)
self.assertEqual(results[0].status, 'fail')
self.assertEqual(results[0].failures, 2)
@use_profile('postgres')
def test_postgres_data_severity_warnings_strict(self):
self.run_dbt_with_vars(['seed'], 'false')
self.run_dbt_with_vars(['run'], 'false')
results = self.run_dbt_with_vars(
['test', '--select', 'test_type:singular'], 'false', expect_pass=True)
self.assertEqual(len(results), 1)
self.assertTrue(results[0].status, 'fail')
self.assertEqual(results[0].failures, 2)
| 3,118 | 503 | 23 |
984fde5f3dfef9e521720e9272a43d4f529ce4f7 | 24,768 | py | Python | tests/test_31_auth_service.py | asymworks/jadetree-backend | 5764d9971ef3fdc85b0b9cd51fad82076f464ae4 | [
"BSD-3-Clause"
] | 7 | 2021-11-02T05:58:58.000Z | 2022-03-04T22:16:20.000Z | tests/test_31_auth_service.py | asymworks/jadetree-backend | 5764d9971ef3fdc85b0b9cd51fad82076f464ae4 | [
"BSD-3-Clause"
] | 5 | 2021-01-27T14:18:01.000Z | 2022-03-04T22:03:49.000Z | tests/test_31_auth_service.py | asymworks/jadetree-backend | 5764d9971ef3fdc85b0b9cd51fad82076f464ae4 | [
"BSD-3-Clause"
] | null | null | null | """Test Authentication and Authorization Service."""
import datetime
import re
from arrow import utcnow
from flask import current_app
import pytest # noqa: F401
from jadetree.domain.models import User
from jadetree.exc import AuthError, DomainError, JwtPayloadError, NoResults
from jadetree.mail import mail
from jadetree.service import auth as auth_service
from jadetree.service.auth import JWT_SUBJECT_BEARER_TOKEN
def test_register_user_adds_user(session):
"""Ensure user is added when register_user is called."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u is not None
assert isinstance(u, User)
assert u.id > 0
assert len(session.query(User).all()) == 1
assert u.email == 'test@jadetree.io'
assert u.pw_hash is not None
assert u.pw_hash != 'hunter2JT'
assert u.uid_hash is not None
assert u.currency is None
assert u.active is False
assert u.confirmed is False
def test_register_user_throws_duplicate_email(session):
"""Ensure two users with the same email cannot be registered."""
auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'test@jadetree.io', 'hunter2JTa', 'Test User')
assert len(session.query(User).all()) == 1
assert 'already exists' in str(exc_data.value)
def test_register_user_throws_bad_email(session):
"""Ensure invalid email addresses are rejected."""
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'jadetree', 'hunter2JT', 'Test User')
assert len(session.query(User).all()) == 0
assert 'Invalid Email Address' in str(exc_data.value)
def test_register_user_throws_bad_pw_short(session):
"""Ensure passwords which are too short are rejected."""
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'test@jadetree.io', 'aBc5', 'Test User')
assert len(session.query(User).all()) == 0
assert 'Password' in str(exc_data.value)
assert 'at least 8 characters' in str(exc_data.value)
def test_register_user_throws_bad_pw_lowercase(session):
"""Ensure passwords with no lowercase letter are rejected."""
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'test@jadetree.io', 'HUNTER2JT', 'Test User')
assert len(session.query(User).all()) == 0
assert str(exc_data.value) == 'Password must contain a lower-case letter'
def test_register_user_throws_bad_pw_uppercase(session):
"""Ensure passwords with no uppercase letter are rejected."""
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'test@jadetree.io', 'hunter2jt', 'Test User')
assert len(session.query(User).all()) == 0
assert str(exc_data.value) == 'Password must contain an upper-case letter'
def test_register_user_throws_bad_pw_number(session):
"""Ensure passwords with no number are rejected."""
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'test@jadetree.io', 'hunter_JT', 'Test User')
assert len(session.query(User).all()) == 0
assert str(exc_data.value) == 'Password must contain a number'
def test_register_user_throws_no_pw_public(app, session, monkeypatch):
"""Ensure a password is required in public mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'test@jadetree.io', '', 'Test User')
assert len(session.query(User).all()) == 0
assert 'Password must be provided' in str(exc_data.value)
def test_register_user_no_pw_personal(app, session, monkeypatch):
"""Ensure a password is not required in personal mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'personal')
u = auth_service.register_user(session, 'test@jadetree.io', '', 'Test User')
assert u is not None
assert u.id > 0
def test_register_user_no_pw_family(app, session, monkeypatch):
"""Ensure a password is not required in family mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'family')
u = auth_service.register_user(session, 'test@jadetree.io', '', 'Test User')
assert u is not None
assert u.id > 0
def test_register_user_throws_bad_pw_personal(app, session, monkeypatch):
"""Ensure bad passwords are rejected in personal mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'personal')
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'test@jadetree.io', 'hunter_JT', 'Test User')
assert len(session.query(User).all()) == 0
assert str(exc_data.value) == 'Password must contain a number'
def test_register_user_throws_personal_mode(app, session, monkeypatch):
"""Ensure a second user cannot be registered in Personal mode."""
auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
with app.app_context():
monkeypatch.setitem(current_app.config, '_JT_SERVER_MODE', 'personal')
with pytest.raises(DomainError) as exc_data:
auth_service.register_user(session, 'test2@jadetree.io', 'hunter2JT', 'Test User 2')
assert len(session.query(User).all()) == 1
assert str(exc_data.value) == 'Cannot register users when the server mode is set to Personal'
def test_register_user_confirmed_family_mode(app, session, monkeypatch):
"""Ensure new users are automatically confirmed in Family mode."""
auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
with app.app_context():
monkeypatch.setitem(current_app.config, '_JT_SERVER_MODE', 'family')
u = auth_service.register_user(session, 'test2@jadetree.io', 'hunter2JT', 'Test User 2')
assert u.active is True
assert u.confirmed is True
def test_register_user_sends_email_public(app, session, monkeypatch):
"""Ensure a Registration Confirmation email is sent to the user."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
with app.app_context():
u = None
with mail.record_messages() as outbox:
u = auth_service.register_user(
session, 'test@jadetree.io', 'hunter2JT', 'Test User'
)
assert len(outbox) == 1
assert outbox[0].subject == '[Jade Tree] Confirm Your Registration'
# Load Tokens from Email
m = re.search(r'confirm\?token=([A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*)', outbox[0].body)
assert m is not None
confirm_token = str(m.group(1))
m = re.search(r'cancel\?token=([A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*)', outbox[0].body)
assert m is not None
cancel_token = str(m.group(1))
# Check Tokens
confirm_payload = auth_service.decodeJwt(current_app, confirm_token, leeway=10)
assert 'email' in confirm_payload
assert 'uid' in confirm_payload
assert confirm_payload['email'] == u.email
assert confirm_payload['uid'] == u.uid_hash
cancel_payload = auth_service.decodeJwt(current_app, cancel_token, leeway=10)
assert 'email' in cancel_payload
assert cancel_payload['email'] == u.email
def test_resend_email_changes_uid_hash(app, session, monkeypatch):
"""Ensure resending a Confirmation Email changes the UID hash."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
with app.app_context():
u = None
with mail.record_messages() as outbox:
u = auth_service.register_user(
session, 'test@jadetree.io', 'hunter2JT', 'Test User'
)
uid_hash_1 = u.uid_hash
u2 = auth_service.resend_confirmation(session, 'test@jadetree.io')
assert u2.uid_hash != uid_hash_1
assert len(outbox) == 2
assert outbox[0].subject == '[Jade Tree] Confirm Your Registration'
assert outbox[1].subject == '[Jade Tree] Confirm Your Registration'
# Load Tokens from Emails
m = re.search(r'confirm\?token=([A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*)', outbox[0].body)
assert m is not None
confirm_token_1 = str(m.group(1))
m = re.search(r'confirm\?token=([A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*)', outbox[1].body)
assert m is not None
confirm_token_2 = str(m.group(1))
# Check Tokens
assert confirm_token_1 != confirm_token_2
confirm_payload = auth_service.decodeJwt(current_app, confirm_token_2, leeway=10)
assert 'email' in confirm_payload
assert 'uid' in confirm_payload
assert confirm_payload['email'] == u.email
assert confirm_payload['uid'] == u2.uid_hash
def test_register_user_no_email_personal(app, session, monkeypatch):
"""Ensure a confirmation email is not sent in Personal mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'personal')
with app.app_context():
with mail.record_messages() as outbox:
auth_service.register_user(
session, 'test@jadetree.io', 'hunter2JT', 'Test User'
)
assert len(outbox) == 0
def test_register_user_no_email_family(app, session, monkeypatch):
"""Ensure a confirmation email is not sent in Family mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'family')
with app.app_context():
with mail.record_messages() as outbox:
auth_service.register_user(
session, 'test@jadetree.io', 'hunter2JT', 'Test User'
)
assert len(outbox) == 0
def test_register_user_no_email_config(app, session, monkeypatch):
"""Ensure a confirmation email is not sent when CONFIRM_REGISTRATION_EMAIL is set."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
monkeypatch.setitem(app.config, 'CONFIRM_REGISTRATION_EMAIL', False)
with app.app_context():
with mail.record_messages() as outbox:
auth_service.register_user(
session, 'test@jadetree.io', 'hunter2JT', 'Test User'
)
assert len(outbox) == 0
def test_confirm_user(session):
"""Ensure a user can be confirmed."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
assert u.uid_hash is not None
assert u.active is False
assert u.confirmed is False
assert u.confirmed_at is None
u2 = auth_service.confirm_user(session, u.uid_hash, 'test@jadetree.io')
assert u2 == u
assert u2.active is True
assert u2.confirmed is True
assert u2.confirmed is not None
def test_confirm_user_sends_email(app, session, monkeypatch):
"""Ensure a Welcome email is sent after confirmation."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
with app.app_context():
with mail.record_messages() as outbox:
u = auth_service.register_user(
session, 'test@jadetree.io', 'hunter2JT', 'Test User'
)
auth_service.confirm_user(session, u.uid_hash, 'test@jadetree.io')
assert len(outbox) == 2
assert outbox[0].subject == '[Jade Tree] Confirm Your Registration'
assert outbox[1].subject == '[Jade Tree] Welcome to Jade Tree'
def test_confirm_user_personal(app, session, monkeypatch):
"""Ensure a user is automatically confirmed in personal mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'personal')
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
assert u.uid_hash is not None
assert u.active is True
assert u.confirmed is True
assert u.confirmed_at is not None
def test_confirm_user_family(app, session, monkeypatch):
"""Ensure a user is automatically confirmed in family mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'family')
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
assert u.uid_hash is not None
assert u.active is True
assert u.confirmed is True
assert u.confirmed_at is not None
def test_confirm_user_auto(app, session, monkeypatch):
"""Ensure a user can be confirmed in public mode with no email."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
monkeypatch.setitem(app.config, 'CONFIRM_REGISTRATION_EMAIL', False)
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
assert u.uid_hash is not None
assert u.active is True
assert u.confirmed is True
assert u.confirmed_at is not None
def test_confirm_user_not_exists(session):
"""Ensure a non-existent user is not confirmed."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
with pytest.raises(NoResults) as exc_data:
auth_service.confirm_user(session, '0000', None)
assert str(exc_data.value) == 'Could not find a user with the given hash'
assert u.active is False
assert u.confirmed is False
def test_confirm_user_wrong_email(session):
"""Ensure a user cannot be confirmed with a mismatched email."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
with pytest.raises(ValueError) as exc_data:
auth_service.confirm_user(session, u.uid_hash, 'test@bad.io')
assert 'Email address does not match' in str(exc_data.value)
assert u.active is False
assert u.confirmed is False
def test_confirm_user_already_confirmed(session):
"""Ensure a user already confirmed cannot be confirmed again."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
assert u.active is False
assert u.confirmed is False
# Set confirmed to True as in a user who had confirmed but then was
# deactivated
u.confirmed = True
u.confirmed_at = utcnow()
with pytest.raises(DomainError) as exc_data:
auth_service.confirm_user(session, u.uid_hash, 'test@jadetree.io')
assert 'already confirmed' in str(exc_data.value)
assert u.active is False
assert u.confirmed is True
def test_confirm_user_bad_token_no_uid(app, session):
"""Ensure a token without a uid subject is rejected."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
token = auth_service.encodeJwt(
app,
subject=auth_service.JWT_SUBJECT_CONFIRM_EMAIL,
email='test@jadetree.io'
)
with pytest.raises(JwtPayloadError) as exc_data:
auth_service.confirm_user_with_token(session, token)
assert 'uid claim' in str(exc_data.value)
assert u.active is False
assert u.confirmed is False
def test_confirm_user_bad_token_no_email(app, session):
"""Ensure a token without an email subject is rejected."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
token = auth_service.encodeJwt(
app,
subject=auth_service.JWT_SUBJECT_CONFIRM_EMAIL,
uid=u.uid_hash
)
with pytest.raises(JwtPayloadError) as exc_data:
auth_service.confirm_user_with_token(session, token)
assert 'email claim' in str(exc_data.value)
assert u.active is False
assert u.confirmed is False
def test_cancel_user_bad_token_no_email(app, session):
"""Ensure a token without an email subject is rejected."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
token = auth_service.encodeJwt(
app,
subject=auth_service.JWT_SUBJECT_CANCEL_EMAIL,
)
with pytest.raises(JwtPayloadError) as exc_data:
auth_service.cancel_registration_with_token(session, token)
assert 'email claim' in str(exc_data.value)
assert u.active is False
assert u.confirmed is False
def test_get_user(session):
"""Ensure a user can be looked up by ID."""
nu = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert nu.id == 1
u = session.query(User).filter(User.id == nu.id).one_or_none()
assert auth_service.get_user(session, u.id) == nu
def test_get_user_not_exists(session):
"""Ensure a non-existent User ID returns None."""
assert auth_service.get_user(session, 1) is None
def test_get_user_invalid(session):
"""Ensure an invalid User ID returns None."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
assert auth_service.get_user(session, 'one') is None
def test_load_user_by_hash(session):
"""Ensure a user can be looked up by ID Hash."""
nu = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert nu.id == 1
u = session.query(User).filter(User.id == nu.id).one_or_none()
assert u.uid_hash is not None
assert auth_service.load_user_by_hash(session, u.uid_hash) == nu
def test_load_user_by_hash_not_exists(session):
"""Ensure an ID hash which does not exist returns None."""
assert auth_service.load_user_by_hash(session, '00000000000000000000000000000000') is None
def test_load_user_by_hash_invalid(session):
"""Ensure an invalid ID hash returns None."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
assert auth_service.load_user_by_hash(session, 'one') is None
def test_invalidate_uid_hash(session):
"""Ensure the ID hash can be changed to invalidate login sessions."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
old_id = u.id
old_hash = u.uid_hash
nu = auth_service.invalidate_uid_hash(session, old_hash)
assert nu.id == old_id
assert nu.uid_hash != old_hash
def test_invalidate_uid_hash_invalid(session):
"""Ensure an error is raised when invalidating a non-existent ID hash."""
auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
with pytest.raises(NoResults, match='Could not find a user'):
auth_service.invalidate_uid_hash(session, 'xxx')
def test_load_user_by_email(session):
"""Ensure a user can be looked up by email address."""
nu = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert nu.id == 1
assert auth_service.load_user_by_email(session, 'test@jadetree.io') == nu
def test_load_user_by_token(session):
"""Ensure a user can be looked up by JSON Web Token."""
nu = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert nu.id == 1
# load_user_by_token is hardcoded for JWT_SUBJECT_BEARER_TOKEN
token = auth_service.generate_user_token(nu, JWT_SUBJECT_BEARER_TOKEN)
assert auth_service.load_user_by_token(session, token) == nu
def test_load_user_by_token_no_uid(app, session):
"""Ensure a JWT missing the user ID hash key is rejected."""
nu = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert nu.id == 1
# load_user_by_token is hardcoded for JWT_SUBJECT_BEARER_TOKEN
token = auth_service.encodeJwt(
app,
subject=JWT_SUBJECT_BEARER_TOKEN,
exp=datetime.datetime.utcnow() + datetime.timedelta(minutes=1),
)
with pytest.raises(JwtPayloadError, match='Missing uid key') as excinfo:
auth_service.load_user_by_token(session, token)
assert excinfo.value.payload_key == 'uid'
def test_change_user_password(app, session, monkeypatch):
"""Ensure a user password can be changed."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
u_hash = u.uid_hash
assert u.check_password('hunter2JT')
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
u2 = auth_service.confirm_user(session, u.uid_hash, 'test@jadetree.io')
rv = auth_service.change_password(
session,
u2.uid_hash,
'hunter2JT',
'aSecu43Pa55w0rd'
)
assert 'token' in rv
assert 'user' in rv
u3 = auth_service.load_user_by_email(session, 'test@jadetree.io')
assert u3.check_password('aSecu43Pa55w0rd')
assert u_hash != u3.uid_hash
def test_change_user_password_keep_hash(app, session, monkeypatch):
"""Ensure a user password can be changed without changing ID hash."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
u_hash = u.uid_hash
assert u.check_password('hunter2JT')
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
u2 = auth_service.confirm_user(session, u.uid_hash, 'test@jadetree.io')
rv = auth_service.change_password(
session,
u2.uid_hash,
'hunter2JT',
'aSecu43Pa55w0rd',
logout_sessions=False,
)
assert 'token' in rv
assert 'user' in rv
u3 = auth_service.load_user_by_email(session, 'test@jadetree.io')
assert u3.check_password('aSecu43Pa55w0rd')
assert u_hash == u3.uid_hash
def test_change_user_password_invalid_hash(app, session, monkeypatch):
"""Ensure a user password with an invalid User ID hash is rejected."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
with pytest.raises(NoResults) as excinfo:
auth_service.change_password(
session,
'0',
'hunter2JT',
'aSecu43Pa55w0rd',
logout_sessions=False,
)
assert 'Could not find' in str(excinfo.value)
def test_change_user_password_inactive(app, session, monkeypatch):
"""Ensure an inactive user cannot change their password."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
with pytest.raises(AuthError) as excinfo:
auth_service.change_password(
session,
u.uid_hash,
'hunter2JT',
'aSecu43Pa55w0rd',
logout_sessions=False,
)
assert 'is not active' in str(excinfo.value)
def test_user_list_personal(app, session, monkeypatch):
"""Ensure a list of authorized users can be generated in Personal mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'personal')
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
users = auth_service.auth_user_list(session)
assert len(users) == 1
assert users[0] == u
def test_user_list_family(app, session, monkeypatch):
"""Ensure a list of authorized users can be generated in Family mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'family')
u1 = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
u2 = auth_service.register_user(session, 'test2@jadetree.io', 'hunter2JT', 'Test User 2')
users = auth_service.auth_user_list(session)
assert len(users) == 2
assert users[0] == u1
assert users[1] == u2
def test_user_list_public(app, session, monkeypatch):
"""Ensure the authorized user list is not available in Public mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
with pytest.raises(DomainError):
auth_service.auth_user_list(session)
def test_unconfirmed_user_cannot_log_in(app, session, monkeypatch):
"""Ensure an unconfirmed user cannot log in."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
with pytest.raises(AuthError) as exc_info:
auth_service.login_user(session, 'test@jadetree.io', 'hunter2JT')
assert 'confirmed registration' in str(exc_info.value)
def test_inactive_user_cannot_log_in(app, session, monkeypatch):
"""Ensure an inactivated user cannot log in."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
auth_service.confirm_user(session, u.uid_hash, 'test@jadetree.io')
monkeypatch.setattr(u, 'active', False)
with pytest.raises(AuthError) as exc_info:
auth_service.login_user(session, 'test@jadetree.io', 'hunter2JT')
assert 'active' in str(exc_info.value)
| 37.81374 | 116 | 0.691497 | """Test Authentication and Authorization Service."""
import datetime
import re
from arrow import utcnow
from flask import current_app
import pytest # noqa: F401
from jadetree.domain.models import User
from jadetree.exc import AuthError, DomainError, JwtPayloadError, NoResults
from jadetree.mail import mail
from jadetree.service import auth as auth_service
from jadetree.service.auth import JWT_SUBJECT_BEARER_TOKEN
def test_register_user_adds_user(session):
"""Ensure user is added when register_user is called."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u is not None
assert isinstance(u, User)
assert u.id > 0
assert len(session.query(User).all()) == 1
assert u.email == 'test@jadetree.io'
assert u.pw_hash is not None
assert u.pw_hash != 'hunter2JT'
assert u.uid_hash is not None
assert u.currency is None
assert u.active is False
assert u.confirmed is False
def test_register_user_throws_duplicate_email(session):
"""Ensure two users with the same email cannot be registered."""
auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'test@jadetree.io', 'hunter2JTa', 'Test User')
assert len(session.query(User).all()) == 1
assert 'already exists' in str(exc_data.value)
def test_register_user_throws_bad_email(session):
"""Ensure invalid email addresses are rejected."""
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'jadetree', 'hunter2JT', 'Test User')
assert len(session.query(User).all()) == 0
assert 'Invalid Email Address' in str(exc_data.value)
def test_register_user_throws_bad_pw_short(session):
"""Ensure passwords which are too short are rejected."""
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'test@jadetree.io', 'aBc5', 'Test User')
assert len(session.query(User).all()) == 0
assert 'Password' in str(exc_data.value)
assert 'at least 8 characters' in str(exc_data.value)
def test_register_user_throws_bad_pw_lowercase(session):
"""Ensure passwords with no lowercase letter are rejected."""
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'test@jadetree.io', 'HUNTER2JT', 'Test User')
assert len(session.query(User).all()) == 0
assert str(exc_data.value) == 'Password must contain a lower-case letter'
def test_register_user_throws_bad_pw_uppercase(session):
"""Ensure passwords with no uppercase letter are rejected."""
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'test@jadetree.io', 'hunter2jt', 'Test User')
assert len(session.query(User).all()) == 0
assert str(exc_data.value) == 'Password must contain an upper-case letter'
def test_register_user_throws_bad_pw_number(session):
"""Ensure passwords with no number are rejected."""
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'test@jadetree.io', 'hunter_JT', 'Test User')
assert len(session.query(User).all()) == 0
assert str(exc_data.value) == 'Password must contain a number'
def test_register_user_throws_no_pw_public(app, session, monkeypatch):
"""Ensure a password is required in public mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'test@jadetree.io', '', 'Test User')
assert len(session.query(User).all()) == 0
assert 'Password must be provided' in str(exc_data.value)
def test_register_user_no_pw_personal(app, session, monkeypatch):
"""Ensure a password is not required in personal mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'personal')
u = auth_service.register_user(session, 'test@jadetree.io', '', 'Test User')
assert u is not None
assert u.id > 0
def test_register_user_no_pw_family(app, session, monkeypatch):
"""Ensure a password is not required in family mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'family')
u = auth_service.register_user(session, 'test@jadetree.io', '', 'Test User')
assert u is not None
assert u.id > 0
def test_register_user_throws_bad_pw_personal(app, session, monkeypatch):
"""Ensure bad passwords are rejected in personal mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'personal')
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'test@jadetree.io', 'hunter_JT', 'Test User')
assert len(session.query(User).all()) == 0
assert str(exc_data.value) == 'Password must contain a number'
def test_register_user_throws_personal_mode(app, session, monkeypatch):
"""Ensure a second user cannot be registered in Personal mode."""
auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
with app.app_context():
monkeypatch.setitem(current_app.config, '_JT_SERVER_MODE', 'personal')
with pytest.raises(DomainError) as exc_data:
auth_service.register_user(session, 'test2@jadetree.io', 'hunter2JT', 'Test User 2')
assert len(session.query(User).all()) == 1
assert str(exc_data.value) == 'Cannot register users when the server mode is set to Personal'
def test_register_user_confirmed_family_mode(app, session, monkeypatch):
"""Ensure new users are automatically confirmed in Family mode."""
auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
with app.app_context():
monkeypatch.setitem(current_app.config, '_JT_SERVER_MODE', 'family')
u = auth_service.register_user(session, 'test2@jadetree.io', 'hunter2JT', 'Test User 2')
assert u.active is True
assert u.confirmed is True
def test_register_user_sends_email_public(app, session, monkeypatch):
"""Ensure a Registration Confirmation email is sent to the user."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
with app.app_context():
u = None
with mail.record_messages() as outbox:
u = auth_service.register_user(
session, 'test@jadetree.io', 'hunter2JT', 'Test User'
)
assert len(outbox) == 1
assert outbox[0].subject == '[Jade Tree] Confirm Your Registration'
# Load Tokens from Email
m = re.search(r'confirm\?token=([A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*)', outbox[0].body)
assert m is not None
confirm_token = str(m.group(1))
m = re.search(r'cancel\?token=([A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*)', outbox[0].body)
assert m is not None
cancel_token = str(m.group(1))
# Check Tokens
confirm_payload = auth_service.decodeJwt(current_app, confirm_token, leeway=10)
assert 'email' in confirm_payload
assert 'uid' in confirm_payload
assert confirm_payload['email'] == u.email
assert confirm_payload['uid'] == u.uid_hash
cancel_payload = auth_service.decodeJwt(current_app, cancel_token, leeway=10)
assert 'email' in cancel_payload
assert cancel_payload['email'] == u.email
def test_resend_email_changes_uid_hash(app, session, monkeypatch):
"""Ensure resending a Confirmation Email changes the UID hash."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
with app.app_context():
u = None
with mail.record_messages() as outbox:
u = auth_service.register_user(
session, 'test@jadetree.io', 'hunter2JT', 'Test User'
)
uid_hash_1 = u.uid_hash
u2 = auth_service.resend_confirmation(session, 'test@jadetree.io')
assert u2.uid_hash != uid_hash_1
assert len(outbox) == 2
assert outbox[0].subject == '[Jade Tree] Confirm Your Registration'
assert outbox[1].subject == '[Jade Tree] Confirm Your Registration'
# Load Tokens from Emails
m = re.search(r'confirm\?token=([A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*)', outbox[0].body)
assert m is not None
confirm_token_1 = str(m.group(1))
m = re.search(r'confirm\?token=([A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*)', outbox[1].body)
assert m is not None
confirm_token_2 = str(m.group(1))
# Check Tokens
assert confirm_token_1 != confirm_token_2
confirm_payload = auth_service.decodeJwt(current_app, confirm_token_2, leeway=10)
assert 'email' in confirm_payload
assert 'uid' in confirm_payload
assert confirm_payload['email'] == u.email
assert confirm_payload['uid'] == u2.uid_hash
def test_register_user_no_email_personal(app, session, monkeypatch):
"""Ensure a confirmation email is not sent in Personal mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'personal')
with app.app_context():
with mail.record_messages() as outbox:
auth_service.register_user(
session, 'test@jadetree.io', 'hunter2JT', 'Test User'
)
assert len(outbox) == 0
def test_register_user_no_email_family(app, session, monkeypatch):
"""Ensure a confirmation email is not sent in Family mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'family')
with app.app_context():
with mail.record_messages() as outbox:
auth_service.register_user(
session, 'test@jadetree.io', 'hunter2JT', 'Test User'
)
assert len(outbox) == 0
def test_register_user_no_email_config(app, session, monkeypatch):
"""Ensure a confirmation email is not sent when CONFIRM_REGISTRATION_EMAIL is set."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
monkeypatch.setitem(app.config, 'CONFIRM_REGISTRATION_EMAIL', False)
with app.app_context():
with mail.record_messages() as outbox:
auth_service.register_user(
session, 'test@jadetree.io', 'hunter2JT', 'Test User'
)
assert len(outbox) == 0
def test_confirm_user(session):
"""Ensure a user can be confirmed."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
assert u.uid_hash is not None
assert u.active is False
assert u.confirmed is False
assert u.confirmed_at is None
u2 = auth_service.confirm_user(session, u.uid_hash, 'test@jadetree.io')
assert u2 == u
assert u2.active is True
assert u2.confirmed is True
assert u2.confirmed is not None
def test_confirm_user_sends_email(app, session, monkeypatch):
"""Ensure a Welcome email is sent after confirmation."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
with app.app_context():
with mail.record_messages() as outbox:
u = auth_service.register_user(
session, 'test@jadetree.io', 'hunter2JT', 'Test User'
)
auth_service.confirm_user(session, u.uid_hash, 'test@jadetree.io')
assert len(outbox) == 2
assert outbox[0].subject == '[Jade Tree] Confirm Your Registration'
assert outbox[1].subject == '[Jade Tree] Welcome to Jade Tree'
def test_confirm_user_personal(app, session, monkeypatch):
"""Ensure a user is automatically confirmed in personal mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'personal')
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
assert u.uid_hash is not None
assert u.active is True
assert u.confirmed is True
assert u.confirmed_at is not None
def test_confirm_user_family(app, session, monkeypatch):
"""Ensure a user is automatically confirmed in family mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'family')
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
assert u.uid_hash is not None
assert u.active is True
assert u.confirmed is True
assert u.confirmed_at is not None
def test_confirm_user_auto(app, session, monkeypatch):
"""Ensure a user can be confirmed in public mode with no email."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
monkeypatch.setitem(app.config, 'CONFIRM_REGISTRATION_EMAIL', False)
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
assert u.uid_hash is not None
assert u.active is True
assert u.confirmed is True
assert u.confirmed_at is not None
def test_confirm_user_not_exists(session):
"""Ensure a non-existent user is not confirmed."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
with pytest.raises(NoResults) as exc_data:
auth_service.confirm_user(session, '0000', None)
assert str(exc_data.value) == 'Could not find a user with the given hash'
assert u.active is False
assert u.confirmed is False
def test_confirm_user_wrong_email(session):
"""Ensure a user cannot be confirmed with a mismatched email."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
with pytest.raises(ValueError) as exc_data:
auth_service.confirm_user(session, u.uid_hash, 'test@bad.io')
assert 'Email address does not match' in str(exc_data.value)
assert u.active is False
assert u.confirmed is False
def test_confirm_user_already_confirmed(session):
"""Ensure a user already confirmed cannot be confirmed again."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
assert u.active is False
assert u.confirmed is False
# Set confirmed to True as in a user who had confirmed but then was
# deactivated
u.confirmed = True
u.confirmed_at = utcnow()
with pytest.raises(DomainError) as exc_data:
auth_service.confirm_user(session, u.uid_hash, 'test@jadetree.io')
assert 'already confirmed' in str(exc_data.value)
assert u.active is False
assert u.confirmed is True
def test_confirm_user_bad_token_no_uid(app, session):
"""Ensure a token without a uid subject is rejected."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
token = auth_service.encodeJwt(
app,
subject=auth_service.JWT_SUBJECT_CONFIRM_EMAIL,
email='test@jadetree.io'
)
with pytest.raises(JwtPayloadError) as exc_data:
auth_service.confirm_user_with_token(session, token)
assert 'uid claim' in str(exc_data.value)
assert u.active is False
assert u.confirmed is False
def test_confirm_user_bad_token_no_email(app, session):
"""Ensure a token without an email subject is rejected."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
token = auth_service.encodeJwt(
app,
subject=auth_service.JWT_SUBJECT_CONFIRM_EMAIL,
uid=u.uid_hash
)
with pytest.raises(JwtPayloadError) as exc_data:
auth_service.confirm_user_with_token(session, token)
assert 'email claim' in str(exc_data.value)
assert u.active is False
assert u.confirmed is False
def test_cancel_user_bad_token_no_email(app, session):
"""Ensure a token without an email subject is rejected."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
token = auth_service.encodeJwt(
app,
subject=auth_service.JWT_SUBJECT_CANCEL_EMAIL,
)
with pytest.raises(JwtPayloadError) as exc_data:
auth_service.cancel_registration_with_token(session, token)
assert 'email claim' in str(exc_data.value)
assert u.active is False
assert u.confirmed is False
def test_get_user(session):
"""Ensure a user can be looked up by ID."""
nu = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert nu.id == 1
u = session.query(User).filter(User.id == nu.id).one_or_none()
assert auth_service.get_user(session, u.id) == nu
def test_get_user_not_exists(session):
"""Ensure a non-existent User ID returns None."""
assert auth_service.get_user(session, 1) is None
def test_get_user_invalid(session):
"""Ensure an invalid User ID returns None."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
assert auth_service.get_user(session, 'one') is None
def test_load_user_by_hash(session):
"""Ensure a user can be looked up by ID Hash."""
nu = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert nu.id == 1
u = session.query(User).filter(User.id == nu.id).one_or_none()
assert u.uid_hash is not None
assert auth_service.load_user_by_hash(session, u.uid_hash) == nu
def test_load_user_by_hash_not_exists(session):
"""Ensure an ID hash which does not exist returns None."""
assert auth_service.load_user_by_hash(session, '00000000000000000000000000000000') is None
def test_load_user_by_hash_invalid(session):
"""Ensure an invalid ID hash returns None."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
assert auth_service.load_user_by_hash(session, 'one') is None
def test_invalidate_uid_hash(session):
"""Ensure the ID hash can be changed to invalidate login sessions."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
old_id = u.id
old_hash = u.uid_hash
nu = auth_service.invalidate_uid_hash(session, old_hash)
assert nu.id == old_id
assert nu.uid_hash != old_hash
def test_invalidate_uid_hash_invalid(session):
"""Ensure an error is raised when invalidating a non-existent ID hash."""
auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
with pytest.raises(NoResults, match='Could not find a user'):
auth_service.invalidate_uid_hash(session, 'xxx')
def test_load_user_by_email(session):
"""Ensure a user can be looked up by email address."""
nu = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert nu.id == 1
assert auth_service.load_user_by_email(session, 'test@jadetree.io') == nu
def test_load_user_by_token(session):
"""Ensure a user can be looked up by JSON Web Token."""
nu = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert nu.id == 1
# load_user_by_token is hardcoded for JWT_SUBJECT_BEARER_TOKEN
token = auth_service.generate_user_token(nu, JWT_SUBJECT_BEARER_TOKEN)
assert auth_service.load_user_by_token(session, token) == nu
def test_load_user_by_token_no_uid(app, session):
"""Ensure a JWT missing the user ID hash key is rejected."""
nu = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert nu.id == 1
# load_user_by_token is hardcoded for JWT_SUBJECT_BEARER_TOKEN
token = auth_service.encodeJwt(
app,
subject=JWT_SUBJECT_BEARER_TOKEN,
exp=datetime.datetime.utcnow() + datetime.timedelta(minutes=1),
)
with pytest.raises(JwtPayloadError, match='Missing uid key') as excinfo:
auth_service.load_user_by_token(session, token)
assert excinfo.value.payload_key == 'uid'
def test_change_user_password(app, session, monkeypatch):
"""Ensure a user password can be changed."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
u_hash = u.uid_hash
assert u.check_password('hunter2JT')
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
u2 = auth_service.confirm_user(session, u.uid_hash, 'test@jadetree.io')
rv = auth_service.change_password(
session,
u2.uid_hash,
'hunter2JT',
'aSecu43Pa55w0rd'
)
assert 'token' in rv
assert 'user' in rv
u3 = auth_service.load_user_by_email(session, 'test@jadetree.io')
assert u3.check_password('aSecu43Pa55w0rd')
assert u_hash != u3.uid_hash
def test_change_user_password_keep_hash(app, session, monkeypatch):
"""Ensure a user password can be changed without changing ID hash."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
u_hash = u.uid_hash
assert u.check_password('hunter2JT')
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
u2 = auth_service.confirm_user(session, u.uid_hash, 'test@jadetree.io')
rv = auth_service.change_password(
session,
u2.uid_hash,
'hunter2JT',
'aSecu43Pa55w0rd',
logout_sessions=False,
)
assert 'token' in rv
assert 'user' in rv
u3 = auth_service.load_user_by_email(session, 'test@jadetree.io')
assert u3.check_password('aSecu43Pa55w0rd')
assert u_hash == u3.uid_hash
def test_change_user_password_invalid_hash(app, session, monkeypatch):
"""Ensure a user password with an invalid User ID hash is rejected."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
with pytest.raises(NoResults) as excinfo:
auth_service.change_password(
session,
'0',
'hunter2JT',
'aSecu43Pa55w0rd',
logout_sessions=False,
)
assert 'Could not find' in str(excinfo.value)
def test_change_user_password_inactive(app, session, monkeypatch):
"""Ensure an inactive user cannot change their password."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
with pytest.raises(AuthError) as excinfo:
auth_service.change_password(
session,
u.uid_hash,
'hunter2JT',
'aSecu43Pa55w0rd',
logout_sessions=False,
)
assert 'is not active' in str(excinfo.value)
def test_user_list_personal(app, session, monkeypatch):
"""Ensure a list of authorized users can be generated in Personal mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'personal')
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
users = auth_service.auth_user_list(session)
assert len(users) == 1
assert users[0] == u
def test_user_list_family(app, session, monkeypatch):
"""Ensure a list of authorized users can be generated in Family mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'family')
u1 = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
u2 = auth_service.register_user(session, 'test2@jadetree.io', 'hunter2JT', 'Test User 2')
users = auth_service.auth_user_list(session)
assert len(users) == 2
assert users[0] == u1
assert users[1] == u2
def test_user_list_public(app, session, monkeypatch):
"""Ensure the authorized user list is not available in Public mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
with pytest.raises(DomainError):
auth_service.auth_user_list(session)
def test_unconfirmed_user_cannot_log_in(app, session, monkeypatch):
"""Ensure an unconfirmed user cannot log in."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
with pytest.raises(AuthError) as exc_info:
auth_service.login_user(session, 'test@jadetree.io', 'hunter2JT')
assert 'confirmed registration' in str(exc_info.value)
def test_inactive_user_cannot_log_in(app, session, monkeypatch):
"""Ensure an inactivated user cannot log in."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
auth_service.confirm_user(session, u.uid_hash, 'test@jadetree.io')
monkeypatch.setattr(u, 'active', False)
with pytest.raises(AuthError) as exc_info:
auth_service.login_user(session, 'test@jadetree.io', 'hunter2JT')
assert 'active' in str(exc_info.value)
| 0 | 0 | 0 |
5a3558a4e4e85fe8c19de89983db6a8b9d4b653a | 168 | py | Python | Others/nikkei/nikkei2019-qual/a.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 2 | 2020-06-12T09:54:23.000Z | 2021-05-04T01:34:07.000Z | Others/nikkei/nikkei2019-qual/a.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 961 | 2020-06-23T07:26:22.000Z | 2022-03-31T21:34:52.000Z | Others/nikkei/nikkei2019-qual/a.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
if __name__ == '__main__':
main()
| 15.272727 | 40 | 0.446429 | # -*- coding: utf-8 -*-
def main():
n, a, b = map(int, input().split())
print(min(a, b), max(0, a + b - n))
if __name__ == '__main__':
main()
| 72 | 0 | 25 |
2cecf13f2e03af177c86a12e714fa35166b94c11 | 227 | py | Python | teams/admin.py | alex-phillips/snipt | e47a632316362b4df76bf389e7c74acdee5ba4e1 | [
"MIT"
] | null | null | null | teams/admin.py | alex-phillips/snipt | e47a632316362b4df76bf389e7c74acdee5ba4e1 | [
"MIT"
] | null | null | null | teams/admin.py | alex-phillips/snipt | e47a632316362b4df76bf389e7c74acdee5ba4e1 | [
"MIT"
] | null | null | null | from django.contrib import admin
from teams.models import Team
admin.site.register(Team, TeamAdmin)
| 22.7 | 59 | 0.718062 | from django.contrib import admin
from teams.models import Team
class TeamAdmin(admin.ModelAdmin):
list_display = ('name', 'owner', 'created', 'modified')
ordering = ('-created',)
admin.site.register(Team, TeamAdmin)
| 0 | 102 | 23 |
277c4d993e1f5e5760f1cdc1bfe7c05ee5ed8cb5 | 421 | py | Python | examples/ipc/server.py | Oxel40/python-helpers | eb81073f7abe575b86c9c96f7b669060f7ab564b | [
"MIT"
] | null | null | null | examples/ipc/server.py | Oxel40/python-helpers | eb81073f7abe575b86c9c96f7b669060f7ab564b | [
"MIT"
] | null | null | null | examples/ipc/server.py | Oxel40/python-helpers | eb81073f7abe575b86c9c96f7b669060f7ab564b | [
"MIT"
] | null | null | null | import helpers.ipc as ipc
import numpy as np
from enum import Enum
address = ('localhost', 6000)
router = ipc.Router(address, authkey=b'test')
@router.expose
@router.expose
@router.expose
router.serve()
| 13.15625 | 45 | 0.646081 | import helpers.ipc as ipc
import numpy as np
from enum import Enum
class en(Enum):
A = 1
B = 2
C = 3
address = ('localhost', 6000)
router = ipc.Router(address, authkey=b'test')
@router.expose
def test(one, two, three):
return (three, two, one)
@router.expose
def arrmax(arr: np.ndarray):
return arr.max()
@router.expose
def enBTest(enu):
return [x == enu.B for x in en]
router.serve()
| 94 | 24 | 89 |
4c501b1d014d183845eabbe6c2f8385cfd2ebd9d | 1,593 | py | Python | Problem_6/problem_6.py | vaxherra/algorithmic-problems | 8966a220bc5ee4c82f01ff81f95a464ad43fc660 | [
"MIT"
] | null | null | null | Problem_6/problem_6.py | vaxherra/algorithmic-problems | 8966a220bc5ee4c82f01ff81f95a464ad43fc660 | [
"MIT"
] | null | null | null | Problem_6/problem_6.py | vaxherra/algorithmic-problems | 8966a220bc5ee4c82f01ff81f95a464ad43fc660 | [
"MIT"
] | null | null | null | import random
def get_min_max(ints):
"""
Return a tuple(min, max) out of list of unsorted integers.
Args:
ints(list): list of integers containing one or more integers
"""
if len(ints) == 0:
return None, None
min_v, max_v = ints[0], ints[0]
for num in ints[1:]:
# in case null/empty values (not integers or floats), omit this element
if not(type(num) == int or type(num) == float):
continue
if num < min_v:
min_v = num
if num > max_v:
max_v = num
return min_v, max_v
# Example Test Case of Ten Integers
l = [i for i in range(0, 10)] # a list containing 0 - 9
random.shuffle(l)
print("Pass" if ((0, 9) == get_min_max(l)) else "Fail")
# Pass
# My test cases
# CASE #1 (edge case): Empty list, does not have min/max : returns a tuple of (None,None)
print(get_min_max([]) == (None, None))
# True
# CASE #2: List containing Null values,
alist = [1, 2, 3, None, 4, None, 5]
print(get_min_max(alist) == (1, 5))
# True
# CASE #3 List of all None values
aList = [None for i in range(10)]
print(get_min_max(aList) == (None, None))
# True
# CASE #4 (edge case): big input of randomly shuffled 20 million integers
a = [i for i in range(int(-1e6), int(1e6)+1, 1)]
random.shuffle(a)
print(get_min_max(a) == (-int(1e6), int(1e6)))
# True
# CASE #5 (edge case): input of identical numbers: min==max
a = [22 for i in range(100)]
print(get_min_max(a) == (22, 22))
# True
# CASE #6: a simple test case
a = [5, 4, 2, 1, -10, 12, 15, 235]
print(get_min_max(a) == (-10, 235))
# True
| 23.776119 | 89 | 0.608286 | import random
def get_min_max(ints):
"""
Return a tuple(min, max) out of list of unsorted integers.
Args:
ints(list): list of integers containing one or more integers
"""
if len(ints) == 0:
return None, None
min_v, max_v = ints[0], ints[0]
for num in ints[1:]:
# in case null/empty values (not integers or floats), omit this element
if not(type(num) == int or type(num) == float):
continue
if num < min_v:
min_v = num
if num > max_v:
max_v = num
return min_v, max_v
# Example Test Case of Ten Integers
l = [i for i in range(0, 10)] # a list containing 0 - 9
random.shuffle(l)
print("Pass" if ((0, 9) == get_min_max(l)) else "Fail")
# Pass
# My test cases
# CASE #1 (edge case): Empty list, does not have min/max : returns a tuple of (None,None)
print(get_min_max([]) == (None, None))
# True
# CASE #2: List containing Null values,
alist = [1, 2, 3, None, 4, None, 5]
print(get_min_max(alist) == (1, 5))
# True
# CASE #3 List of all None values
aList = [None for i in range(10)]
print(get_min_max(aList) == (None, None))
# True
# CASE #4 (edge case): big input of randomly shuffled 20 million integers
a = [i for i in range(int(-1e6), int(1e6)+1, 1)]
random.shuffle(a)
print(get_min_max(a) == (-int(1e6), int(1e6)))
# True
# CASE #5 (edge case): input of identical numbers: min==max
a = [22 for i in range(100)]
print(get_min_max(a) == (22, 22))
# True
# CASE #6: a simple test case
a = [5, 4, 2, 1, -10, 12, 15, 235]
print(get_min_max(a) == (-10, 235))
# True
| 0 | 0 | 0 |
de755d841b0851b65aa5223ebc2104ea54d12c09 | 677 | py | Python | leetcode/3.Longest-Substring-Without-Repeating-Characters/Longest Substring Without Repeating Characters.py | chengjinlee/leetcode | 6bd8dc71ea406536fc1b53aa10f1e967d002ee7a | [
"MIT"
] | null | null | null | leetcode/3.Longest-Substring-Without-Repeating-Characters/Longest Substring Without Repeating Characters.py | chengjinlee/leetcode | 6bd8dc71ea406536fc1b53aa10f1e967d002ee7a | [
"MIT"
] | null | null | null | leetcode/3.Longest-Substring-Without-Repeating-Characters/Longest Substring Without Repeating Characters.py | chengjinlee/leetcode | 6bd8dc71ea406536fc1b53aa10f1e967d002ee7a | [
"MIT"
] | null | null | null | #Codeing-Utf8
| 32.238095 | 64 | 0.435746 | #Codeing-Utf8
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
start = 0 #记录子字符串的起点
maxlen = #记录最长子字符串的长度
dict = {} #用字典记录所有不重复的字符
for i in range(len(s)): #进行赋值
dict[s[i]] = -1
for i in range(len(s)):
if dict[s[i]] != -1: #存在重复的字符
while start <= dict[s[i]]: #利用循环记录重复字符的个数
dict[s[start]] = -1 #将重复的字符之前的字符的标记都置为-1
start += 1
if i - start + 1 > maxlen: maxlen = i - start + 1
dict[s[i]] = i
return maxlen
| 0 | 789 | 22 |
880c025a29ed4c5d5713e1b1820e2645e5eda3b0 | 1,127 | py | Python | api/v2/views/image_version.py | xuhang57/atmosphere | f53fea2a74ee89ccc8852906799b1d9a7e9178b7 | [
"BSD-3-Clause"
] | null | null | null | api/v2/views/image_version.py | xuhang57/atmosphere | f53fea2a74ee89ccc8852906799b1d9a7e9178b7 | [
"BSD-3-Clause"
] | null | null | null | api/v2/views/image_version.py | xuhang57/atmosphere | f53fea2a74ee89ccc8852906799b1d9a7e9178b7 | [
"BSD-3-Clause"
] | null | null | null | from rest_framework import filters
import django_filters
from core.models import ApplicationVersion as ImageVersion
from api.v2.views.base import AuthOptionalViewSet
from api.v2.serializers.details import ImageVersionSerializer
class ImageVersionViewSet(AuthOptionalViewSet):
"""
API endpoint that allows instance actions to be viewed or edited.
"""
queryset = ImageVersion.objects.all()
serializer_class = ImageVersionSerializer
search_fields = ('application__id', 'application__created_by__username')
ordering_fields = ('start_date',)
ordering = ('start_date',)
filter_class = ImageVersionFilter
filter_backends = (filters.OrderingFilter, filters.DjangoFilterBackend)
| 33.147059 | 79 | 0.766637 | from rest_framework import filters
import django_filters
from core.models import ApplicationVersion as ImageVersion
from api.v2.views.base import AuthOptionalViewSet
from api.v2.serializers.details import ImageVersionSerializer
class ImageVersionFilter(django_filters.FilterSet):
image_id = django_filters.CharFilter('application__id')
created_by = django_filters.CharFilter('application__created_by__username')
class Meta:
model = ImageVersion
fields = ['image_id', 'created_by']
class ImageVersionViewSet(AuthOptionalViewSet):
"""
API endpoint that allows instance actions to be viewed or edited.
"""
queryset = ImageVersion.objects.all()
serializer_class = ImageVersionSerializer
search_fields = ('application__id', 'application__created_by__username')
ordering_fields = ('start_date',)
ordering = ('start_date',)
filter_class = ImageVersionFilter
filter_backends = (filters.OrderingFilter, filters.DjangoFilterBackend)
def get_queryset(self):
request_user = self.request.user
return ImageVersion.current_machines(request_user)
| 102 | 260 | 50 |
0589bed8eb7ebb2ae174ce5e263299dc945d7b37 | 1,908 | py | Python | src/wrapper/joystick.py | KrbDevelopment/LCD-Matrix | f5064ace1bb42b7e8cacbf91434bf0064a6812a6 | [
"MIT"
] | null | null | null | src/wrapper/joystick.py | KrbDevelopment/LCD-Matrix | f5064ace1bb42b7e8cacbf91434bf0064a6812a6 | [
"MIT"
] | null | null | null | src/wrapper/joystick.py | KrbDevelopment/LCD-Matrix | f5064ace1bb42b7e8cacbf91434bf0064a6812a6 | [
"MIT"
] | null | null | null | import RPi.GPIO as GPIO
class Keymap:
"""BCM keymap for joystick buttons"""
UP = 6
DOWN = 19
LEFT = 5
RIGHT = 26
PRESS = 13
| 30.285714 | 85 | 0.643606 | import RPi.GPIO as GPIO
class Keymap:
"""BCM keymap for joystick buttons"""
UP = 6
DOWN = 19
LEFT = 5
RIGHT = 26
PRESS = 13
class Joystick:
def __init__(self, up, down, left, right, click):
self.up = up
self.down = down
self.left = left
self.right = right
self.click = click
self.event_up = None
self.event_down = None
self.event_left = None
self.event_right = None
self.event_click = None
self._setup_keys()
self._setup_events()
def _setup_keys(self):
GPIO.setup(self.up, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.down, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.left, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.right, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.click, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def _setup_events(self):
if self.event_up is not None:
GPIO.add_event_detect(self.up, GPIO.RISING, callback=self.event_up)
if self.event_down is not None:
GPIO.add_event_detect(self.down, GPIO.RISING, callback=self.event_down)
if self.event_left is not None:
GPIO.add_event_detect(self.left, GPIO.RISING, callback=self.event_left)
if self.event_right is not None:
GPIO.add_event_detect(self.right, GPIO.RISING, callback=self.event_right)
if self.event_click is not None:
GPIO.add_event_detect(self.click, GPIO.RISING, callback=self.event_click)
def is_pressed_up(self):
return GPIO.input(self.up)
def is_pressed_down(self):
return GPIO.input(self.down)
def is_pressed_left(self):
return GPIO.input(self.left)
def is_pressed_right(self):
return GPIO.input(self.right)
def is_pressed_click(self):
return GPIO.input(self.click)
| 1,525 | -6 | 238 |
c06b560fe7039c3f0eff10ffd490984fd0affdec | 2,129 | py | Python | app/lms/views/get_user.py | SaurabhPanja/redcarpet-lms | 6f3104a8b94fb27f0af57bc88f38c4929cfd03e8 | [
"MIT"
] | null | null | null | app/lms/views/get_user.py | SaurabhPanja/redcarpet-lms | 6f3104a8b94fb27f0af57bc88f38c4929cfd03e8 | [
"MIT"
] | null | null | null | app/lms/views/get_user.py | SaurabhPanja/redcarpet-lms | 6f3104a8b94fb27f0af57bc88f38c4929cfd03e8 | [
"MIT"
] | null | null | null | from django.http import JsonResponse
from lms.models import User
from rest_framework import status
import jwt
from app.settings import SECRET_KEY
from datetime import datetime, timedelta
import os
import sys
import json
import re
from django.core.exceptions import ValidationError
from pprint import pprint
from django.db.models import Q
from django.shortcuts import redirect
from django.urls import reverse
from lms.utils import *
@authorize_user | 30.414286 | 101 | 0.553781 | from django.http import JsonResponse
from lms.models import User
from rest_framework import status
import jwt
from app.settings import SECRET_KEY
from datetime import datetime, timedelta
import os
import sys
import json
import re
from django.core.exceptions import ValidationError
from pprint import pprint
from django.db.models import Q
from django.shortcuts import redirect
from django.urls import reverse
from lms.utils import *
@authorize_user
def get_user(request, id):
if request.method == "GET":
error = False
try:
encoded = request.META.get('HTTP_AUTHORIZATION', '')
email, role = decode_token(encoded)
queried_user = User.objects.filter(pk=id)
if queried_user.exists():
queried_user = queried_user.first()
else:
return JsonResponse(
{
'message' : 'No user found. Bad Request',
}, status=status.HTTP_400_BAD_REQUEST)
# dprint(queried_user.email)
if role == 'admin':
return JsonResponse({
'user' : json_user_obj(queried_user)
}, status=status.HTTP_200_OK)
if role == 'agent' and ( queried_user.role == 'customer' or queried_user.email == email):
return JsonResponse({
'user' : json_user_obj(queried_user)
}, status=status.HTTP_200_OK)
if role == 'customer' and queried_user.email == email:
return JsonResponse({
'user' : json_user_obj(queried_user)
}, status=status.HTTP_200_OK)
return JsonResponse(
{
'message' : 'Forbidden request'
}, status=status.HTTP_403_FORBIDDEN
)
except Exception as e:
dprint(e)
error = True
excpetion_log()
if error:
return internal_server_error()
else:
return bad_request() | 1,658 | 0 | 22 |
cc11208d8f68e92d2bf0b7a5d45a04af655caaf3 | 6,719 | py | Python | nrf5_mesh/tools/dfu/bootloader_verify.py | aberke/city-science-bike-swarm | 797e803014fc0c3878016309a62460a736140958 | [
"MIT"
] | 15 | 2019-02-25T20:25:29.000Z | 2021-02-27T17:57:38.000Z | nrf5_mesh/tools/dfu/bootloader_verify.py | aberke/city-science-bike-swarm | 797e803014fc0c3878016309a62460a736140958 | [
"MIT"
] | 3 | 2020-02-21T22:35:38.000Z | 2020-10-05T02:25:30.000Z | nrf5_mesh/tools/dfu/bootloader_verify.py | aberke/city-science-bike-swarm | 797e803014fc0c3878016309a62460a736140958 | [
"MIT"
] | 5 | 2019-06-29T21:03:57.000Z | 2021-06-15T06:16:20.000Z | # Copyright (c) 2010 - 2020, Nordic Semiconductor ASA
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Nordic Semiconductor ASA nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import binascii
import subprocess
import shlex
import sys
import serial
import time
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Please provide the serial number of your device")
print_usage()
exit(1)
if len(sys.argv) < 3:
print("Please provide the COM port of your device")
print_usage()
exit(1)
try:
int(sys.argv[1])
except:
print("Invalid serial number " + sys.argv[1])
print_usage()
exit(1)
bootloader_addr = read_uicr(sys.argv[1])
read_device_page(sys.argv[1])
reset_device(sys.argv[1], sys.argv[2])
echo(sys.argv[2])
print("\nBootloader verification OK.")
| 37.327778 | 117 | 0.656943 | # Copyright (c) 2010 - 2020, Nordic Semiconductor ASA
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Nordic Semiconductor ASA nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import binascii
import subprocess
import shlex
import sys
import serial
import time
def read_serial_event(s):
evt = ""
while True:
length = s.read()
if ord(length) > 0:
evt = s.read(ord(length))
return evt
def print_usage():
print("")
print("Usage:\tbootloader_verify.py <Segger serial-number> <COM-port>")
def nrfjprog(args):
process = subprocess.Popen(shlex.split("nrfjprog " + args), stdout=subprocess.PIPE)
out, err = process.communicate()
if process != None and process.returncode == 2:
print("Couldn't find nrfjprog, exiting.")
exit(2)
if process == None or process.returncode != 0:
print("Error calling nrfjprog with arguments " + args + ".")
print(out)
exit(2)
return out
def read_uicr(serial_number):
sys.stdout.write("Reading UICR..\t\t\t")
read = nrfjprog("-s " + serial_number + " --memrd 0x10001014 --n 4 --w 32").strip()
bootloader_addr = str(read).split()[1]
if bootloader_addr == "FFFFFFFF":
print("ERROR: UICR NOT SET.")
print("Checkpoints:")
print("\tHave you flashed the bootloader with nrfjprog?")
print("\tDid you flash the Softdevice BEFORE the bootloader?")
exit(1)
read = nrfjprog("-s " + serial_number + " --memrd 0x" + bootloader_addr + " --n 4 --w 32").strip()
bootloader_vector_pointer = str(read).split()[1]
if bootloader_vector_pointer < "20000000":
print("ERROR: Bootloader vector pointer invalid.")
print("Checkpoints:")
print("\tHave you flashed the bootloader with nrfjprog?")
print("\tDid you flash the Softdevice BEFORE the bootloader?")
print("\tDid you erase the device before programming all the hex-files?")
exit(1)
if bootloader_vector_pointer == "FFFFFFFF":
print("ERROR: Bootloader not present.")
print("Checkpoints:")
print("\tHave you flashed the bootloader with nrfjprog?")
print("\tDid you flash the Softdevice BEFORE the bootloader?")
print("\tDid you erase the device before programming all the hex-files?")
exit(1)
print("OK.")
return bootloader_addr
def read_device_page(serial_number):
# need to know the flash size to get the device page. Can read this from the FICR:
ficr = str(nrfjprog("-s " + serial_number + " --memrd 0x10000010 --n 8 --w 32").strip()).split()[1:]
code_page_size = int(ficr[0], 16)
code_page_count = int(ficr[1], 16)
device_page_location = code_page_size * (code_page_count - 1)
sys.stdout.write("Reading Device page..\t\t")
device_page = nrfjprog("-s " + serial_number + " --memrd " + hex(device_page_location) + " --n 4 --w 32").strip()
device_page_header = str(device_page).split()[1]
if device_page_header == "FFFFFFFF":
print("ERROR: DEVICE PAGE NOT PRESENT.")
print("Checkpoints:")
print("\tHave you flashed the device page?")
exit(1)
if device_page_header != "08080104":
print("ERROR: DEVICE PAGE INVALID.")
print("Checkpoints:")
print("\tDid you erase the device before programming all the hex-files?")
exit(1)
print("OK.")
return device_page_header
def reset_device(serial_number, port):
sys.stdout.write("Resetting device..\t\t")
try:
s = serial.Serial(port, 115200, rtscts = True)
except:
print("ERROR: Could not open COM port " + port)
exit(1)
nrfjprog("-s " + serial_number + " --reset")
time.sleep(0.2)
response = read_serial_event(s)
if b"\x81\x02\x00" == response[:3]:
print("OK (In application)")
elif not b"\x81\x01\x00" in response:
print("ERROR: Invalid start sequence from bootloader: " + binascii.hexlify(response))
print("Checkpoints:")
print("\tHave you flashed the bootloader with nrfjprog?")
print("\tDoes your bootloader have serial communication enabled?")
s.close()
exit(1)
else:
print("OK.")
time.sleep(0.1)
s.close()
def echo(port):
sys.stdout.write("Checking serial connection..\t")
try:
s = serial.Serial(port, 115200, rtscts = True)
except:
print("ERROR: Could not open COM port " + port)
exit(1)
s.write(b"\x03\x02\xaa\xbb")
time.sleep(0.1)
if not s.read(4).startswith(b"\x03\x82\xaa\xbb"):
print("ERROR: Invalid response!")
print("Checkpoints:")
s.close()
exit(1)
s.close()
print("OK.")
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Please provide the serial number of your device")
print_usage()
exit(1)
if len(sys.argv) < 3:
print("Please provide the COM port of your device")
print_usage()
exit(1)
try:
int(sys.argv[1])
except:
print("Invalid serial number " + sys.argv[1])
print_usage()
exit(1)
bootloader_addr = read_uicr(sys.argv[1])
read_device_page(sys.argv[1])
reset_device(sys.argv[1], sys.argv[2])
echo(sys.argv[2])
print("\nBootloader verification OK.")
| 4,286 | 0 | 161 |
f73c725d0cdd3666e8953a2d622f89e473ce65e4 | 1,166 | py | Python | ms_deisotope/spectrum_graph.py | mstim/ms_deisotope | 29f4f466e92e66b65a2d21eca714aa627caa21db | [
"Apache-2.0"
] | 18 | 2017-09-01T12:26:12.000Z | 2022-02-23T02:31:29.000Z | ms_deisotope/spectrum_graph.py | mstim/ms_deisotope | 29f4f466e92e66b65a2d21eca714aa627caa21db | [
"Apache-2.0"
] | 19 | 2017-03-12T20:40:36.000Z | 2022-03-31T22:50:47.000Z | ms_deisotope/spectrum_graph.py | mstim/ms_deisotope | 29f4f466e92e66b65a2d21eca714aa627caa21db | [
"Apache-2.0"
] | 14 | 2016-05-06T02:25:30.000Z | 2022-03-31T14:40:06.000Z | from ms_deisotope._c.spectrum_graph import (
PathFinder,
MassWrapper,
PeakGroupNode,
PeakNode,
NodeBase,
Path,
SpectrumGraph)
amino_acids = [
MassWrapper('G', 57.02146372057),
MassWrapper('A', 71.03711378471),
MassWrapper('S', 87.03202840427),
MassWrapper('P', 97.05276384884999),
MassWrapper('V', 99.06841391299),
MassWrapper('T', 101.04767846841),
MassWrapper('C', 103.00918478471),
MassWrapper('J', 113.08406397713),
MassWrapper('N', 114.04292744114),
MassWrapper('D', 115.02694302383),
MassWrapper('Q', 128.05857750528),
MassWrapper('K', 128.094963014),
MassWrapper('E', 129.04259308797),
MassWrapper('M', 131.04048491299),
MassWrapper('H', 137.05891185845),
MassWrapper('F', 147.06841391299),
MassWrapper('R', 156.1011110236),
MassWrapper('Y', 163.06332853255),
MassWrapper('W', 186.07931294986),
]
| 30.684211 | 74 | 0.680103 | from ms_deisotope._c.spectrum_graph import (
PathFinder,
MassWrapper,
PeakGroupNode,
PeakNode,
NodeBase,
Path,
SpectrumGraph)
amino_acids = [
MassWrapper('G', 57.02146372057),
MassWrapper('A', 71.03711378471),
MassWrapper('S', 87.03202840427),
MassWrapper('P', 97.05276384884999),
MassWrapper('V', 99.06841391299),
MassWrapper('T', 101.04767846841),
MassWrapper('C', 103.00918478471),
MassWrapper('J', 113.08406397713),
MassWrapper('N', 114.04292744114),
MassWrapper('D', 115.02694302383),
MassWrapper('Q', 128.05857750528),
MassWrapper('K', 128.094963014),
MassWrapper('E', 129.04259308797),
MassWrapper('M', 131.04048491299),
MassWrapper('H', 137.05891185845),
MassWrapper('F', 147.06841391299),
MassWrapper('R', 156.1011110236),
MassWrapper('Y', 163.06332853255),
MassWrapper('W', 186.07931294986),
]
def find_paths(peaks, components=None, error_tolerance=1e-5, merge=False):
if components is None:
components = amino_acids
sequencer = PathFinder(components, error_tolerance)
paths = sequencer.paths(peaks, merge=merge)
return paths
| 234 | 0 | 23 |
64b796283abd0ea351e03bd6097b94b4b6fdc97a | 3,708 | py | Python | benchmarking_platform_p3/configuration_file_I.py | mc-robinson/benchmarking_platform_p23 | 200e6883cab30ab3dd2ddba0ecc48d1c570e8e2c | [
"Unlicense"
] | 5 | 2020-03-02T18:34:14.000Z | 2021-08-28T07:49:16.000Z | benchmarking_platform_p3/configuration_file_I.py | mc-robinson/benchmarking_platform_p23 | 200e6883cab30ab3dd2ddba0ecc48d1c570e8e2c | [
"Unlicense"
] | null | null | null | benchmarking_platform_p3/configuration_file_I.py | mc-robinson/benchmarking_platform_p23 | 200e6883cab30ab3dd2ddba0ecc48d1c570e8e2c | [
"Unlicense"
] | 1 | 2020-08-11T03:02:29.000Z | 2020-08-11T03:02:29.000Z | #
# $Id$
#
# configuration file for benchmarking platform
#
#
# Copyright (c) 2013, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# hardcoded global variables
list_num_query_mols = [5, 10, 20]
num_reps = 50 # number of repetitions
percent_dcy = 0.2 # percentage of decoys used for training
p_value = 0.05 # confidence level for statistical analysis
# collection of data sets
muv_ids = [
466,
548,
600,
644,
652,
689,
692,
712,
713,
733,
737,
810,
832,
846,
852,
858,
859,
]
# muv_ids = [859]
dud_ids = [
"ace",
"ache",
"ar",
"cdk2",
"cox2",
"dhfr",
"egfr",
"er_agonist",
"fgfr1",
"fxa",
"gpb",
"gr",
"hivrt",
"inha",
"na",
"p38",
"parp",
"pdgfrb",
"sahh",
"src",
"vegfr2",
]
# dud_ids = ["vegfr2"]
chembl_ids = [
11359,
28,
11536,
8,
10434,
12670,
20014,
234,
12261,
12209,
25,
36,
43,
219,
130,
105,
11336,
20174,
126,
11225,
12252,
11682,
134,
116,
11265,
10475,
12679,
10579,
11575,
18061,
237,
276,
11534,
10198,
10498,
12911,
12968,
100579,
100126,
10378,
10417,
10752,
10773,
11631,
10927,
11085,
11442,
11279,
11488,
12840,
]
# chembl_ids = [12840]
set_data = {}
set_data["MUV"] = dict(
fullname="MUV",
ids=muv_ids,
prefix="aid",
suffix="_actives.sdf",
dcy_prefix="aid",
dcy_suffix="_decoys.sdf",
propName="PUBCHEM_COMPOUND_CID",
dcy_propName="PUBCHEM_COMPOUND_CID",
)
set_data["DUD"] = dict(
fullname="DUD",
ids=dud_ids,
prefix="",
suffix="_clustered_3D_MM.sdf",
dcy_prefix="DUD_",
dcy_suffix="_decoys_ID_pass_MWPass_I_MM.sdf",
propName="id",
dcy_propName="Mol_Title",
)
set_data["ChEMBL"] = dict(
fullname="ChEMBL-sereina/diverse_100",
ids=chembl_ids,
prefix="Target_no_",
suffix=".sdf",
dcy_name="decoys_10000_zinc.sdf",
propName="Name",
dcy_propName="_Name",
)
| 21.433526 | 76 | 0.639428 | #
# $Id$
#
# configuration file for benchmarking platform
#
#
# Copyright (c) 2013, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# hardcoded global variables
list_num_query_mols = [5, 10, 20]
num_reps = 50 # number of repetitions
percent_dcy = 0.2 # percentage of decoys used for training
p_value = 0.05 # confidence level for statistical analysis
# collection of data sets
muv_ids = [
466,
548,
600,
644,
652,
689,
692,
712,
713,
733,
737,
810,
832,
846,
852,
858,
859,
]
# muv_ids = [859]
dud_ids = [
"ace",
"ache",
"ar",
"cdk2",
"cox2",
"dhfr",
"egfr",
"er_agonist",
"fgfr1",
"fxa",
"gpb",
"gr",
"hivrt",
"inha",
"na",
"p38",
"parp",
"pdgfrb",
"sahh",
"src",
"vegfr2",
]
# dud_ids = ["vegfr2"]
chembl_ids = [
11359,
28,
11536,
8,
10434,
12670,
20014,
234,
12261,
12209,
25,
36,
43,
219,
130,
105,
11336,
20174,
126,
11225,
12252,
11682,
134,
116,
11265,
10475,
12679,
10579,
11575,
18061,
237,
276,
11534,
10198,
10498,
12911,
12968,
100579,
100126,
10378,
10417,
10752,
10773,
11631,
10927,
11085,
11442,
11279,
11488,
12840,
]
# chembl_ids = [12840]
set_data = {}
set_data["MUV"] = dict(
fullname="MUV",
ids=muv_ids,
prefix="aid",
suffix="_actives.sdf",
dcy_prefix="aid",
dcy_suffix="_decoys.sdf",
propName="PUBCHEM_COMPOUND_CID",
dcy_propName="PUBCHEM_COMPOUND_CID",
)
set_data["DUD"] = dict(
fullname="DUD",
ids=dud_ids,
prefix="",
suffix="_clustered_3D_MM.sdf",
dcy_prefix="DUD_",
dcy_suffix="_decoys_ID_pass_MWPass_I_MM.sdf",
propName="id",
dcy_propName="Mol_Title",
)
set_data["ChEMBL"] = dict(
fullname="ChEMBL-sereina/diverse_100",
ids=chembl_ids,
prefix="Target_no_",
suffix=".sdf",
dcy_name="decoys_10000_zinc.sdf",
propName="Name",
dcy_propName="_Name",
)
| 0 | 0 | 0 |
be2752c1c105c9e85d125d843959e412ce28c2f2 | 238 | py | Python | test.py | jmhuer/DJITelloAutonomy2 | d276f7727d3a14fadb54c04d0771839bbe3ae14c | [
"MIT"
] | null | null | null | test.py | jmhuer/DJITelloAutonomy2 | d276f7727d3a14fadb54c04d0771839bbe3ae14c | [
"MIT"
] | null | null | null | test.py | jmhuer/DJITelloAutonomy2 | d276f7727d3a14fadb54c04d0771839bbe3ae14c | [
"MIT"
] | null | null | null | import cv2
camera = cv2.VideoCapture(0)
camera.set(3, 1280)
while True:
ret, image = camera.read()
cv2.imshow('Webcam', image)
if cv2.waitKey(0) & 0xFF == ord('q'):
break
camera.release()
cv2.destroyAllWindows()
| 14 | 41 | 0.634454 | import cv2
camera = cv2.VideoCapture(0)
camera.set(3, 1280)
while True:
ret, image = camera.read()
cv2.imshow('Webcam', image)
if cv2.waitKey(0) & 0xFF == ord('q'):
break
camera.release()
cv2.destroyAllWindows()
| 0 | 0 | 0 |
d073f6b78b438a73b30cc80d16ab50c08aaa3b4f | 959 | py | Python | ml/statistics.py | vibour/emotion-theme-recognition | 9d67d88d3b672006379114d474a1aaef6538b9be | [
"MIT"
] | 7 | 2021-11-23T15:41:21.000Z | 2022-03-16T08:10:26.000Z | ml/statistics.py | vibour/emotion-theme-recognition | 9d67d88d3b672006379114d474a1aaef6538b9be | [
"MIT"
] | null | null | null | ml/statistics.py | vibour/emotion-theme-recognition | 9d67d88d3b672006379114d474a1aaef6538b9be | [
"MIT"
] | null | null | null | """Calculate mean and std of data"""
import math
from typing import Dict
import torch
from tqdm import tqdm
import ml.utils
from ml.experiment import Experiment
def calculate_stats(exp: Experiment) -> Dict[str, float]:
"""Calculate mean and std of data"""
avg = ml.utils.RunningAverage()
sq_avg = ml.utils.RunningAverage()
proc = exp.preprocessor
if proc is None:
with tqdm(total=len(exp.dls["train"]), desc="calculating stats",
ncols=100) as tqbar:
for data, _ in exp.dls["train"]:
with torch.no_grad():
data = data.to(exp.device)
output = proc(data)
avg.update(output.mean().item())
sq_avg.update((output**2).mean().item())
tqbar.set_postfix(mean=f"{avg():05.3f}")
tqbar.update()
mean = avg()
return {"mean": mean, "std": math.sqrt(sq_avg() - mean**2)}
| 23.975 | 68 | 0.586027 | """Calculate mean and std of data"""
import math
from typing import Dict
import torch
from tqdm import tqdm
import ml.utils
from ml.experiment import Experiment
def calculate_stats(exp: Experiment) -> Dict[str, float]:
"""Calculate mean and std of data"""
avg = ml.utils.RunningAverage()
sq_avg = ml.utils.RunningAverage()
proc = exp.preprocessor
if proc is None:
def proc(arg):
return arg
with tqdm(total=len(exp.dls["train"]), desc="calculating stats",
ncols=100) as tqbar:
for data, _ in exp.dls["train"]:
with torch.no_grad():
data = data.to(exp.device)
output = proc(data)
avg.update(output.mean().item())
sq_avg.update((output**2).mean().item())
tqbar.set_postfix(mean=f"{avg():05.3f}")
tqbar.update()
mean = avg()
return {"mean": mean, "std": math.sqrt(sq_avg() - mean**2)}
| 16 | 0 | 31 |
7841965a569d4d2b02cd4d588131dbf92bae4896 | 822 | py | Python | day04/partone.py | ephraim/adventofcode | 52bc215b29c120d7c6f74413acb9091503d6f6ec | [
"MIT"
] | null | null | null | day04/partone.py | ephraim/adventofcode | 52bc215b29c120d7c6f74413acb9091503d6f6ec | [
"MIT"
] | null | null | null | day04/partone.py | ephraim/adventofcode | 52bc215b29c120d7c6f74413acb9091503d6f6ec | [
"MIT"
] | null | null | null | import os
import sys
import re
if __name__ == "__main__":
data = None
wdir = os.path.dirname(sys.argv[0])
with open(os.path.join(wdir, "input.txt")) as f:
data = f.readlines()
passports = 0
pp = {}
for d in data:
d = d.strip("\n")
if not d:
if validate(pp):
passports += 1
pp = {}
continue
for i in d.split(" "):
tmp = i.split(":")
pp[tmp[0]] = tmp[1]
if validate(pp):
passports += 1
print("valid passports: %d"%passports)
| 21.631579 | 68 | 0.454988 | import os
import sys
import re
if __name__ == "__main__":
data = None
wdir = os.path.dirname(sys.argv[0])
with open(os.path.join(wdir, "input.txt")) as f:
data = f.readlines()
def validate(pp):
needed = [ "byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid" ]
valid = True
for k in needed:
if k not in pp.keys():
valid = False
break
return valid
passports = 0
pp = {}
for d in data:
d = d.strip("\n")
if not d:
if validate(pp):
passports += 1
pp = {}
continue
for i in d.split(" "):
tmp = i.split(":")
pp[tmp[0]] = tmp[1]
if validate(pp):
passports += 1
print("valid passports: %d"%passports)
| 220 | 0 | 27 |
ba6c51062c3cac058f91e598b07f579f1f6b5c4e | 18,279 | py | Python | app/utils.py | calcutec/flask-burtonblog | 23b02f81f7772f2f50a382bb885b4fb008e4f815 | [
"BSD-3-Clause"
] | null | null | null | app/utils.py | calcutec/flask-burtonblog | 23b02f81f7772f2f50a382bb885b4fb008e4f815 | [
"BSD-3-Clause"
] | null | null | null | app/utils.py | calcutec/flask-burtonblog | 23b02f81f7772f2f50a382bb885b4fb008e4f815 | [
"BSD-3-Clause"
] | null | null | null | import datetime
from collections import OrderedDict
from app import app, db
from flask.ext.login import current_user
from config import ALLOWED_EXTENSIONS
from rauth import OAuth2Service
import json
import urllib2
from flask import request, redirect, url_for, render_template, g, jsonify, abort, flash
from flask.ext.login import login_user
from models import User, Post, ExifStats
from forms import CommentForm
from .emails import follower_notification
from .form_processor import UploadFormProcessor, LoginFormProcessor, PhotosFormProcessor,\
SignupFormProcessor, UpdateFormProcessor, CommentFormProcessor
| 41.637813 | 122 | 0.577986 | import datetime
from collections import OrderedDict
from app import app, db
from flask.ext.login import current_user
from config import ALLOWED_EXTENSIONS
from rauth import OAuth2Service
import json
import urllib2
from flask import request, redirect, url_for, render_template, g, jsonify, abort, flash
from flask.ext.login import login_user
from models import User, Post, ExifStats
from forms import CommentForm
from .emails import follower_notification
from .form_processor import UploadFormProcessor, LoginFormProcessor, PhotosFormProcessor,\
SignupFormProcessor, UpdateFormProcessor, CommentFormProcessor
class BasePage(object):
def __init__(self, title=None, nickname=None, category="latest", post_id=None, form=None, person=None):
self.posts = None
self.form = form
self.nickname = nickname
self.post_id = post_id
self.assets = dict()
self.assets['category'] = category
if title:
self.assets['title'] = title
else:
self.assets['title'] = request.endpoint
if self.nickname is not None:
self.assets['person'] = User.query.filter_by(nickname=self.nickname).first()
elif person is not None:
self.assets['person'] = person
self.get_entity()
if self.assets['category'] in ["login", "upload", "update", "signup", "comment", "updatephoto"]:
self.get_rendered_form()
if self.assets['category'] == "comment":
self.get_posts()
elif self.assets['category'] in ["follow", "unfollow", "votes"]:
pass
else:
self.get_posts()
def get_entity(self):
entity = None
if self.assets['category'] == "login":
entity = "login"
elif self.assets['category'] == "upload":
entity = "upload"
elif self.assets['category'] == "comment":
entity = "photo"
elif request.endpoint == 'home':
entity = 'home'
elif request.endpoint == 'photos':
if self.post_id:
entity = "photo"
else:
entity = "photos"
elif request.endpoint == 'members' and 'person' in self.assets and self.assets['person'] == g.user:
entity = "author"
elif request.endpoint == 'members' and 'person' in self.assets and self.assets['person'] != g.user:
entity = "member"
elif request.endpoint == 'members':
entity = "members"
self.assets['entity'] = entity
def get_rendered_form(self):
processor_dict = {
"upload": UploadFormProcessor,
"signup": SignupFormProcessor,
"photo": PhotosFormProcessor,
"updatephoto": PhotosFormProcessor,
"update": UpdateFormProcessor,
"login": LoginFormProcessor,
"comment": CommentFormProcessor
}
self.assets['body_form'] = processor_dict[self.assets['category']](page=self).rendered_form
def get_posts(self):
posts_dict = {
"home": {'obj': "post", 'filter': {'writing_type': 'op-ed'}},
"photo": {'obj': "post", 'filter': {'id': self.post_id}},
"photos": {'obj': "post", 'filter': {'writing_type': 'entry'}},
"author": {'obj': "post", 'filter': {'author': current_user}},
"members": {'obj': "user"},
}
if 'person' in self.assets:
posts_dict['member'] = {'obj': "post", 'filter': {'author': self.assets['person']}}
posts_dict = posts_dict[self.assets['entity']]
if posts_dict['obj'] == "user":
posts = User.query.order_by(User.last_seen.desc()).all()
else:
posts = Post.query.filter_by(**posts_dict['filter']).order_by(Post.timestamp.desc())
# Get count of photos in each category owned by the above entities (author, member, photos, home)
category_counts = dict()
for value in db.session.query(Post.category).filter_by(**posts_dict['filter']).distinct():
category_counts[value[0]] = int(db.session.query(Post).filter_by(**posts_dict['filter'])
.filter(Post.category == value[0]).count())
self.assets['category_counts'] = category_counts
if self.assets['category'] in ["all", "vote", "follow", "unfollow", "comment"] or self.assets['category'] is None:
self.posts = posts
elif self.assets['category'] == "latest":
self.posts = posts[0:10]
else:
self.posts = posts.filter_by(category=self.assets['category'])
def get_asset(self, template=None, context=None):
if request.is_xhr:
asset = jsonify(context)
else:
if template is None:
asset = render_template(self.assets['title'] + ".html", **context)
else:
asset = render_template(template, **context)
return asset
def render(self):
if request.is_xhr:
response = dict()
response['success'] = True
response['authenticated'] = g.user.is_authenticated()
if 'category' in self.assets:
response['category'] = self.assets['category']
if g.user.is_authenticated():
response['usernickname'] = g.user.nickname
response['userid'] = g.user.id
if 'collection' in self.assets:
response['collection'] = self.assets['collection']
elif 'category' in self.assets and self.assets['category'] in ["follow", "unfollow"]:
response['user'] = self.assets['person'].json_view()
elif 'category' in self.assets and self.assets['category'] == 'vote':
response['photo'] = self.posts[0].json_view()
elif 'category' in self.assets and self.assets['category'] == 'comment':
response['comment'] = self.assets['body_form']
elif 'body_form' in self.assets:
response['uploadForm'] = self.assets['body_form']
return json.dumps(response)
else:
context = {'assets': self.assets}
page = render_template("base.html", **context)
return page
def __str__(self):
return "%s has %s posts" % (self.assets['title'], self.posts.count())
class PhotoPage(BasePage):
def __init__(self, *args, **kwargs):
super(PhotoPage, self).__init__(*args, **kwargs)
if self.assets['category'] not in ["login", "upload", "update", "signup"]:
self.get_page_assets()
def get_page_assets(self):
if self.assets['entity'] == "home":
if request.is_xhr:
pass
else:
home_context = {'hello': "hello world"}
self.assets['main_entry'] = self.get_asset(template="home_page.html", context=home_context)
elif self.assets['entity'] == "photo":
if request.is_xhr:
if self.assets['category'] == "vote":
self.vote()
else:
if self.assets['category'] == "vote":
self.vote()
main_photo_context = {'post': self.posts[0]}
self.assets['photo_id'] = self.posts[0].id
self.assets['main_entry'] = self.get_asset(template="photo_detail.html", context=main_photo_context)
self.assets['category'] = 'comment'
form = CommentForm()
exifdata = ExifStats.query.filter_by(post_id=self.posts[0].id).first()
if exifdata:
exif_dict = dict((col, getattr(exifdata, col)) for col in exifdata.__table__.columns.keys())
exif_dict = OrderedDict(sorted(exif_dict.items()))
else:
exif_dict = OrderedDict([
('DateTime', datetime.datetime(2013, 11, 30, 10, 27, 8)),
('ExposureProgram', u'Aperture priority'), ('FNumber', u'2.6'),
('FocalLength', u'3.7'), ('FocalLengthIn35mmFilm', None),
('LensModel', None), ('Make', u'SAMSUNG'),
('Model', u'SGH-T999'), ('Orientation', u'top-left'),
('PhotographicSensitivity', None),
('Sharpness', None),
('ShutterSpeedValue', None),
('id', 731),
('post_id', 120)])
story_context = {'post': self.posts[0], 'form': form, 'exifFields': exif_dict}
self.assets['archives'] = render_template("story_detail.html", **story_context)
elif self.assets['entity'] == "photos":
if request.is_xhr:
self.assets['collection'] = [i.json_view() for i in self.posts]
else:
if type(self.posts) == list and len(self.posts) > 0:
main_photo_context = {'post': self.posts[0], 'assets': self.assets}
archive_photos_context = {'posts': self.posts[1:], 'assets': self.assets}
self.assets['main_entry'] = self.get_asset(template="main_entry.html", context=main_photo_context)
self.assets['archives'] = self.get_asset(template="archives.html", context=archive_photos_context)
elif self.posts and self.posts.count() > 0:
main_photo_context = {'post': self.posts[0], 'assets': self.assets}
archive_photos_context = {'posts': self.posts[1:], 'assets': self.assets}
self.assets['main_entry'] = self.get_asset(template="main_entry.html", context=main_photo_context)
self.assets['archives'] = self.get_asset(template="archives.html", context=archive_photos_context)
def vote(self):
post_id = self.post_id
user_id = g.user.id
if not post_id:
abort(404)
post = Post.query.get_or_404(int(post_id))
post.vote(user_id=user_id)
def __str__(self):
return "This is the %s page" % self.assets['title']
class MembersPage(BasePage):
def __init__(self, *args, **kwargs):
super(MembersPage, self).__init__(*args, **kwargs)
if self.assets['category'] not in ["login", "upload", "update", "signup"]:
self.get_page_assets()
def get_page_assets(self):
if self.assets['entity'] == "author" or self.assets['entity'] == "member":
if request.is_xhr:
if self.assets['category'] == "follow":
self.follow()
self.assets['followers'] = self.assets['person'].followed.count()
self.assets['followed'] = self.assets['person'].followers.count()
if self.assets['category'] == "unfollow":
self.unfollow()
self.assets['followers'] = self.assets['person'].followed.count()
self.assets['followed'] = self.assets['person'].followers.count()
else:
user_context = {'post': self.assets['person']}
self.assets['main_entry'] = self.get_asset(template='person.html', context=user_context)
archive_photos_context = {'posts': self.posts, 'assets': self.assets}
self.assets['archives'] = self.get_asset(template="archives.html", context=archive_photos_context)
if self.assets['category'] == "follow":
self.follow()
if self.assets['category'] == "unfollow":
self.unfollow()
elif self.assets['entity'] == "members":
if request.is_xhr:
self.assets['collection'] = [i.json_view() for i in self.posts]
else:
members_context = {'posts': self.posts, 'assets': self.assets}
self.assets['archives'] = self.get_asset(template="members.html", context=members_context)
def follow(self):
user = self.assets['person']
if user is None:
flash('User %s not found.' % self.nickname)
return redirect(url_for('home'))
if user == g.user:
flash('You can\'t follow yourself!')
return redirect(redirect_url())
u = g.user.follow(user)
if u is None:
flash('Cannot follow %s.' % self.nickname)
return redirect(redirect_url())
db.session.add(u)
db.session.commit()
flash('You are now following %s.' % self.nickname)
follower_notification(user, g.user)
def unfollow(self):
user = self.assets['person']
if user is None:
flash('User %s not found.' % self.nickname)
return redirect(redirect_url())
if user == g.user:
flash('You can\'t unfollow yourself!')
return redirect(redirect_url())
u = g.user.unfollow(user)
if u is None:
flash('Cannot unfollow %s.' % self.nickname)
return redirect(redirect_url())
db.session.add(u)
db.session.commit()
flash('You have stopped following %s.' % self.nickname)
def __str__(self):
return "This is the %s page" % self.assets['title']
class SignupPage(BasePage):
def __init__(self, *args, **kwargs):
super(SignupPage, self).__init__(*args, **kwargs)
self.get_page_assets()
def get_page_assets(self):
pass
def save_user(self, form):
newuser = User(form.firstname.data, form.email.data, firstname=form.firstname.data, lastname=form.lastname.data,
password=form.password.data, photo="profile.jpg")
db.session.add(newuser)
db.session.commit()
login_user(newuser)
return newuser
def __str__(self):
return "This is the %s page" % self.assets['title']
class LoginPage(BasePage):
def __init__(self, *args, **kwargs):
super(LoginPage, self).__init__(*args, **kwargs)
def login_returning_user(self, form):
returninguser = User.query.filter_by(email=form.email.data).first()
login_user(returninguser)
return returninguser
def __str__(self):
return "This is the %s page" % self.assets['title']
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
class OAuthSignIn(object):
providers = None
def __init__(self, provider_name):
self.provider_name = provider_name
credentials = app.config['OAUTH_CREDENTIALS'][provider_name]
self.consumer_id = credentials['id']
self.consumer_secret = credentials['secret']
def authorize(self):
pass
def callback(self):
pass
def get_callback_url(self):
# next_url = request.args.get('next') or "/people"
# return url_for('login', provider=self.provider_name, next=next_url, _external=True) Redirect to original page
return url_for('login', provider=self.provider_name, _external=True)
@classmethod
def get_provider(cls, provider_name):
if cls.providers is None:
cls.providers = {}
for provider_class in cls.__subclasses__():
provider = provider_class()
cls.providers[provider.provider_name] = provider
return cls.providers[provider_name]
class FacebookSignIn(OAuthSignIn):
def __init__(self):
super(FacebookSignIn, self).__init__('facebook')
self.service = OAuth2Service(
name='facebook',
client_id=self.consumer_id,
client_secret=self.consumer_secret,
authorize_url='https://graph.facebook.com/oauth/authorize',
access_token_url='https://graph.facebook.com/oauth/access_token',
base_url='https://graph.facebook.com/'
)
def authorize(self):
return redirect(self.service.get_authorize_url(
scope='email',
response_type='code',
redirect_uri=self.get_callback_url())
)
def callback(self):
if 'code' not in request.args:
return None, None, None
oauth_session = self.service.get_auth_session(
data={'code': request.args['code'],
'grant_type': 'authorization_code',
'redirect_uri': self.get_callback_url()}
)
me = oauth_session.get('me').json()
nickname = me.get('email').split('@')[0]
nickname = User.make_valid_nickname(nickname)
nickname = User.make_unique_nickname(nickname)
return nickname, me.get('email')
class GoogleSignIn(OAuthSignIn):
def __init__(self):
super(GoogleSignIn, self).__init__('google')
googleinfo = urllib2.urlopen('https://accounts.google.com/.well-known/openid-configuration')
google_params = json.load(googleinfo)
self.service = OAuth2Service(
name='google',
client_id=self.consumer_id,
client_secret=self.consumer_secret,
authorize_url=google_params.get('authorization_endpoint'),
base_url=google_params.get('userinfo_endpoint'),
access_token_url=google_params.get('token_endpoint')
)
def authorize(self):
return redirect(self.service.get_authorize_url(
scope='email',
response_type='code',
redirect_uri=self.get_callback_url())
)
def callback(self):
if 'code' not in request.args:
return None, None, None
oauth_session = self.service.get_auth_session(
data={'code': request.args['code'],
'grant_type': 'authorization_code',
'redirect_uri': self.get_callback_url()},
decoder=json.loads
)
me = oauth_session.get('').json()
nickname = me['name']
nickname = User.make_valid_nickname(nickname)
nickname = User.make_unique_nickname(nickname)
return nickname, me['email']
def redirect_url(default='home'):
return request.args.get('next') or \
request.referrer or \
url_for(default)
| 16,424 | 227 | 1,006 |
e2fa303c1a689d78ea83cd9d4061355ca64bc22b | 981 | py | Python | easyjsonparser/null.py | xatavian/easyjsonparser | 688e698ca2e19075f84f76e6cb05a4bec5ae304c | [
"MIT"
] | 3 | 2019-06-13T17:42:25.000Z | 2021-09-04T17:56:13.000Z | easyjsonparser/null.py | xatavian/easyjsonparser | 688e698ca2e19075f84f76e6cb05a4bec5ae304c | [
"MIT"
] | null | null | null | easyjsonparser/null.py | xatavian/easyjsonparser | 688e698ca2e19075f84f76e6cb05a4bec5ae304c | [
"MIT"
] | null | null | null | from .value import _Value, _ValueInstance, _raise_bad_value_error
from .helper import Empty
| 31.645161 | 99 | 0.638124 | from .value import _Value, _ValueInstance, _raise_bad_value_error
from .helper import Empty
class Null(_Value):
def compute_instance_type(self):
result_type = type("NullInstance",
(_NullInstance, ),
self._default_value_instance_params())
return result_type
def check_params(self):
if self.default is not None:
_raise_bad_value_error(self.default, "Default value of a Null value can only be None.")
super().check_params()
class _NullInstance(_ValueInstance):
def compute_to_json(self):
return "null"
def check_and_sanitize_input(self, value):
if value is None:
return value
elif type(self) is type(value):
return value.value
elif value is not Empty:
_raise_bad_value_error(value, self.__property_name__, "None expected")
else:
return super().check_and_sanitize_input(value)
| 722 | 13 | 152 |
6fc139c628fff968c9bbedb45a1d87648ceaa785 | 1,226 | py | Python | 90.py | StormyaP/Voith_Hackathon | 3e7fdca617bec41172892b5960fa394c9a7643dc | [
"MIT"
] | null | null | null | 90.py | StormyaP/Voith_Hackathon | 3e7fdca617bec41172892b5960fa394c9a7643dc | [
"MIT"
] | null | null | null | 90.py | StormyaP/Voith_Hackathon | 3e7fdca617bec41172892b5960fa394c9a7643dc | [
"MIT"
] | null | null | null | import csv
import numpy as np
if __name__ == '__main__':
main()
print("End") | 35.028571 | 88 | 0.588907 | import csv
import numpy as np
def main():
filename = 'data/test_bench_validate.csv'
with open(filename, newline='') as csvfile:
testbench_list = list(csv.reader(csvfile, delimiter=',', quotechar='|'))
filename = 'data/assembly_validate.csv'
with open(filename, newline='') as csvfile:
assembly_list = list(csv.reader(csvfile, delimiter=',', quotechar='|'))
filename = 'data/field_failure_validate.csv'
with open(filename, newline='') as csvfile:
failure_list = list(csv.reader(csvfile, delimiter=',', quotechar='|'))
for row in testbench_list:
for r1 in assembly_list:
if row[3] == r1[1]:
row.append(r1[2:])
for r2 in failure_list:
if r2[1] == row[3]:
with open("data/90_validate_neg.csv", 'a', newline='') as outputcsvfile:
writer1 = csv.writer(outputcsvfile, delimiter=',')
writer1.writerow(row)
with open("data/90_validate_pos.csv", 'a', newline='') as outputcsvfile:
writer1 = csv.writer(outputcsvfile, delimiter=',')
writer1.writerow(row)
if __name__ == '__main__':
main()
print("End") | 1,116 | 0 | 23 |