content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Copyright (c) 2018, 2019, 2020 Nordic Semiconductor ASA
# Copyright 2018, 2019 Foundries.io Ltd
#
# SPDX-License-Identifier: Apache-2.0
'''
Parser and abstract data types for west manifests.
'''
import configparser
import enum
import errno
import logging
import os
from pathlib import PurePosixPath, Path
import re
import shlex
import subprocess
import sys
from typing import Any, Callable, Dict, Iterable, List, NoReturn, \
NamedTuple, Optional, Set, Tuple, TYPE_CHECKING, Union
from packaging.version import parse as parse_version
import pykwalify.core
import yaml
from west import util
from west.util import PathType
import west.configuration as cfg
#
# Public constants
#
#: Index in a Manifest.projects attribute where the `ManifestProject`
#: instance for the workspace is stored.
MANIFEST_PROJECT_INDEX = 0
#: A git revision which points to the most recent `Project` update.
MANIFEST_REV_BRANCH = 'manifest-rev'
#: A fully qualified reference to `MANIFEST_REV_BRANCH`.
QUAL_MANIFEST_REV_BRANCH = 'refs/heads/' + MANIFEST_REV_BRANCH
#: Git ref space used by west for internal purposes.
QUAL_REFS_WEST = 'refs/west/'
#: The latest manifest schema version supported by this west program.
#:
#: This value changes when a new version of west includes new manifest
#: file features not supported by earlier versions of west.
SCHEMA_VERSION = '0.10'
# MAINTAINERS:
#
# If you want to update the schema version, you need to make sure that
# it has the exact same value as west.version.__version__ when the
# next release is cut.
#
# Internal helpers
#
# Type aliases
# The value of a west-commands as passed around during manifest
# resolution. It can become a list due to resolving imports, even
# though it's just a str in each individual file right now.
WestCommandsType = Union[str, List[str]]
# Type for the importer callback passed to the manifest constructor.
# (ImportedContentType is just an alias for what it gives back.)
ImportedContentType = Optional[Union[str, List[str]]]
ImporterType = Callable[['Project', str], ImportedContentType]
# Type for an import map filter function, which takes a Project and
# returns a bool. The various allowlists and blocklists are used to
# create these filter functions. A None value is treated as a function
# which always returns True.
ImapFilterFnType = Optional[Callable[['Project'], bool]]
# A list of group names to enable and disable, like ['+foo', '-bar'].
GroupFilterType = List[str]
# A list of group names belonging to a project, like ['foo', 'bar']
GroupsType = List[str]
# The parsed contents of a manifest YAML file as returned by _load(),
# after sanitychecking with validate().
ManifestDataType = Union[str, Dict]
# Logging
_logger = logging.getLogger(__name__)
# Type for the submodule value passed through the manifest file.
class Submodule(NamedTuple):
'''Represents a Git submodule within a project.'''
path: str
name: Optional[str] = None
# Submodules may be a list of values or a bool.
SubmodulesType = Union[List[Submodule], bool]
# Manifest locating, parsing, loading, etc.
_DEFAULT_REV = 'master'
_WEST_YML = 'west.yml'
_SCHEMA_PATH = os.path.join(os.path.dirname(__file__), "manifest-schema.yml")
_SCHEMA_VER = parse_version(SCHEMA_VERSION)
_EARLIEST_VER_STR = '0.6.99' # we introduced the version feature after 0.6
_VALID_SCHEMA_VERS = [_EARLIEST_VER_STR, '0.7', '0.8', '0.9', SCHEMA_VERSION]
# Manifest import handling
_RESERVED_GROUP_RE = re.compile(r'(^[+-]|[\s,:])')
_INVALID_PROJECT_NAME_RE = re.compile(r'([/\\])')
#
# Public functions
#
def manifest_path() -> str:
'''Absolute path of the manifest file in the current workspace.
Exceptions raised:
- `west.util.WestNotFound` if called from outside of a west
workspace
- `MalformedConfig` if the configuration file has no
``manifest.path`` key
- ``FileNotFoundError`` if no manifest file exists as determined by
``manifest.path`` and ``manifest.file``
'''
(mpath, mname) = _mpath()
ret = os.path.join(util.west_topdir(), mpath, mname)
# It's kind of annoying to manually instantiate a FileNotFoundError.
# This seems to be the best way.
if not os.path.isfile(ret):
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), ret)
return ret
def validate(data: Any) -> None:
'''Validate manifest data
Raises an exception if the manifest data is not valid for loading
by this version of west. (Actually attempting to load the data may
still fail if the it contains imports which cannot be resolved.)
:param data: YAML manifest data as a string or object
'''
if isinstance(data, str):
as_str = data
data = _load(data)
if not isinstance(data, dict):
raise MalformedManifest(f'{as_str} is not a YAML dictionary')
elif not isinstance(data, dict):
raise TypeError(f'{data} has type {type(data)}, '
'expected valid manifest data')
if 'manifest' not in data:
raise MalformedManifest('manifest data contains no "manifest" key')
data = data['manifest']
# Make sure this version of west can load this manifest data.
# This has to happen before the schema check -- later schemas
# may incompatibly extend this one.
if 'version' in data:
# As a convenience for the user, convert floats to strings.
# This avoids forcing them to write:
#
# version: "0.8"
#
# by explicitly allowing:
#
# version: 0.8
if not isinstance(data['version'], str):
min_version_str = str(data['version'])
casted_to_str = True
else:
min_version_str = data['version']
casted_to_str = False
min_version = parse_version(min_version_str)
if min_version > _SCHEMA_VER:
raise ManifestVersionError(min_version_str)
if min_version_str not in _VALID_SCHEMA_VERS:
msg = (f'invalid version {min_version_str}; must be one of: ' +
', '.join(_VALID_SCHEMA_VERS))
if casted_to_str:
msg += ('. Do you need to quote the value '
'(e.g. "0.10" instead of 0.10)?')
raise MalformedManifest(msg)
try:
pykwalify.core.Core(source_data=data,
schema_files=[_SCHEMA_PATH]).validate()
except pykwalify.errors.SchemaError as se:
raise MalformedManifest(se.msg) from se
# A 'raw' element in a project 'groups:' or manifest 'group-filter:' list,
# as it is parsed from YAML, before conversion to string.
RawGroupType = Union[str, int, float]
def is_group(raw_group: RawGroupType) -> bool:
'''Is a 'raw' project group value 'raw_group' valid?
Valid groups are strings that don't contain whitespace, commas
(","), or colons (":"), and do not start with "-" or "+".
As a special case, groups may also be nonnegative numbers, to
avoid forcing users to quote these values in YAML files.
:param raw_group: the group value to check
'''
# Implementation notes:
#
# - not starting with "-" because "-foo" means "disable group
# foo", and not starting with "+" because "+foo" means
# "enable group foo".
#
# - no commas because that's a separator character in
# manifest.group-filter and 'west update --group-filter'
#
# - no whitespace mostly to guarantee that printing
# comma-separated lists of groups won't cause 'word' breaks
# in 'west list' pipelines to cut(1) or similar
#
# - no colons to reserve some namespace for potential future
# use; we might want to do something like
# "--group-filter=path-prefix:foo" to create additional logical
# groups based on the workspace layout or other metadata
return ((raw_group >= 0) if isinstance(raw_group, (float, int)) else
bool(raw_group and not _RESERVED_GROUP_RE.search(raw_group)))
#
# Exception types
#
class MalformedManifest(Exception):
'''Manifest parsing failed due to invalid data.
'''
class MalformedConfig(Exception):
'''The west configuration was malformed in a way that made a
manifest operation fail.
'''
class ManifestImportFailed(Exception):
'''An operation required to resolve a manifest failed.
Attributes:
- ``project``: the Project instance with the missing manifest data
- ``filename``: the missing file, as a str
'''
class ManifestVersionError(Exception):
'''The manifest required a version of west more recent than the
current version.
'''
#
# The main Manifest class and its public helper types, like Project
# and ImportFlag.
#
class ImportFlag(enum.IntFlag):
'''Bit flags for handling imports when resolving a manifest.
Note that any "path-prefix:" values set in an "import:" still take
effect for the project itself even when IGNORE or IGNORE_PROJECTS are
given. For example, in this manifest::
manifest:
projects:
- name: foo
import:
path-prefix: bar
Project 'foo' has path 'bar/foo' regardless of whether IGNORE or
IGNORE_PROJECTS is given. This ensures the Project has the same path
attribute as it normally would if imported projects weren't being
ignored.
'''
#: The default value, 0, reads the file system to resolve
#: "self: import:", and runs git to resolve a "projects:" import.
DEFAULT = 0
#: Ignore projects added via "import:" in "self:" and "projects:"
IGNORE = 1
#: Always invoke importer callback for "projects:" imports
FORCE_PROJECTS = 2
#: Ignore projects added via "import:" : in "projects:" only;
#: including any projects added via "import:" : in "self:"
IGNORE_PROJECTS = 4
class Project:
'''Represents a project defined in a west manifest.
Attributes:
- ``name``: project's unique name
- ``url``: project fetch URL
- ``revision``: revision to fetch from ``url`` when the
project is updated
- ``path``: relative path to the project within the workspace
(i.e. from ``topdir`` if that is set)
- ``abspath``: absolute path to the project in the native path name
format (or ``None`` if ``topdir`` is)
- ``posixpath``: like ``abspath``, but with slashes (``/``) as
path separators
- ``clone_depth``: clone depth to fetch when first cloning the
project, or ``None`` (the revision should not be a SHA
if this is used)
- ``west_commands``: list of YAML files where extension commands in
the project are declared
- ``topdir``: the top level directory of the west workspace
the project is part of, or ``None``
- ``remote_name``: the name of the remote which should be set up
when the project is being cloned (default: 'origin')
- ``groups``: the project's groups (as a list) as given in the manifest.
If the manifest data contains no groups for the project, this is
an empty list.
- ``submodules``: the project's submodules configuration; either
a list of Submodule objects, or a boolean.
- ``userdata``: the parsed 'userdata' field in the manifest, or None
'''
def __init__(self, name: str, url: str,
revision: Optional[str] = None,
path: Optional[PathType] = None,
submodules: SubmodulesType = False,
clone_depth: Optional[int] = None,
west_commands: Optional[WestCommandsType] = None,
topdir: Optional[PathType] = None,
remote_name: Optional[str] = None,
groups: Optional[GroupsType] = None,
userdata: Optional[Any] = None):
'''Project constructor.
If *topdir* is ``None``, then absolute path attributes
(``abspath`` and ``posixpath``) will also be ``None``.
:param name: project's ``name:`` attribute in the manifest
:param url: fetch URL
:param revision: fetch revision
:param path: path (relative to topdir), or None for *name*
:param submodules: submodules to pull within the project
:param clone_depth: depth to use for initial clone
:param west_commands: path to a west commands specification YAML
file in the project, relative to its base directory,
or list of these
:param topdir: the west workspace's top level directory
:param remote_name: the name of the remote which should be
set up if the project is being cloned (default: 'origin')
:param groups: a list of groups found in the manifest data for
the project, after conversion to str and validation.
'''
self.name = name
self.url = url
self.submodules = submodules
self.revision = revision or _DEFAULT_REV
self.clone_depth = clone_depth
self.path = os.fspath(path or name)
self.west_commands = _west_commands_list(west_commands)
self.topdir = os.fspath(topdir) if topdir else None
self.remote_name = remote_name or 'origin'
self.groups: GroupsType = groups or []
self.userdata: Any = userdata
@property
@path.setter
@property
@property
@property
def as_dict(self) -> Dict:
'''Return a representation of this object as a dict, as it
would be parsed from an equivalent YAML manifest.
'''
ret: Dict = {}
ret['name'] = self.name
ret['url'] = self.url
ret['revision'] = self.revision
if self.path != self.name:
ret['path'] = self.path
if self.clone_depth:
ret['clone-depth'] = self.clone_depth
if self.west_commands:
ret['west-commands'] = \
_west_commands_maybe_delist(self.west_commands)
if self.groups:
ret['groups'] = self.groups
if self.userdata:
ret['userdata'] = self.userdata
return ret
#
# Git helpers
#
def git(self, cmd: Union[str, List[str]],
extra_args: Iterable[str] = (),
capture_stdout: bool = False,
capture_stderr: bool = False,
check: bool = True,
cwd: Optional[PathType] = None) -> subprocess.CompletedProcess:
'''Run a git command in the project repository.
:param cmd: git command as a string (or list of strings)
:param extra_args: sequence of additional arguments to pass to
the git command (useful mostly if *cmd* is a string).
:param capture_stdout: if True, git's standard output is
captured in the ``CompletedProcess`` instead of being
printed.
:param capture_stderr: Like *capture_stdout*, but for standard
error. Use with caution: this may prevent error messages
from being shown to the user.
:param check: if given, ``subprocess.CalledProcessError`` is
raised if git finishes with a non-zero return code
:param cwd: directory to run git in (default: ``self.abspath``)
'''
if isinstance(cmd, str):
cmd_list = shlex.split(cmd)
else:
cmd_list = list(cmd)
extra_args = list(extra_args)
if cwd is None:
if self.abspath is not None:
cwd = self.abspath
else:
raise ValueError('no abspath; cwd must be given')
elif sys.version_info < (3, 6, 1) and not isinstance(cwd, str):
# Popen didn't accept a PathLike cwd on Windows until
# python v3.7; this was backported onto cpython v3.6.1,
# though. West currently supports "python 3.6", though, so
# in the unlikely event someone is running 3.6.0 on
# Windows, do the right thing.
cwd = os.fspath(cwd)
args = ['git'] + cmd_list + extra_args
cmd_str = util.quote_sh_list(args)
_logger.debug(f"running '{cmd_str}' in {cwd}")
popen = subprocess.Popen(
args, cwd=cwd,
stdout=subprocess.PIPE if capture_stdout else None,
stderr=subprocess.PIPE if capture_stderr else None)
stdout, stderr = popen.communicate()
# We use logger style % formatting here to avoid the
# potentially expensive overhead of formatting long
# stdout/stderr strings if the current log level isn't DEBUG,
# which is the usual case.
_logger.debug('"%s" exit code: %d stdout: %r stderr: %r',
cmd_str, popen.returncode, stdout, stderr)
if check and popen.returncode:
raise subprocess.CalledProcessError(popen.returncode, cmd_list,
output=stdout, stderr=stderr)
else:
return subprocess.CompletedProcess(popen.args, popen.returncode,
stdout, stderr)
def sha(self, rev: str, cwd: Optional[PathType] = None) -> str:
'''Get the SHA for a project revision.
:param rev: git revision (HEAD, v2.0.0, etc.) as a string
:param cwd: directory to run command in (default:
self.abspath)
'''
# Though we capture stderr, it will be available as the stderr
# attribute in the CalledProcessError raised by git() in
# Python 3.5 and above if this call fails.
cp = self.git(f'rev-parse {rev}^{{commit}}', capture_stdout=True,
cwd=cwd, capture_stderr=True)
# Assumption: SHAs are hex values and thus safe to decode in ASCII.
# It'll be fun when we find out that was wrong and how...
return cp.stdout.decode('ascii').strip()
def is_ancestor_of(self, rev1: str, rev2: str,
cwd: Optional[PathType] = None) -> bool:
'''Check if 'rev1' is an ancestor of 'rev2' in this project.
Returns True if rev1 is an ancestor commit of rev2 in the
given project; rev1 and rev2 can be anything that resolves to
a commit. (If rev1 and rev2 refer to the same commit, the
return value is True, i.e. a commit is considered an ancestor
of itself.) Returns False otherwise.
:param rev1: commit that could be the ancestor of *rev2*
:param rev2: commit that could be a descendant or *rev1*
:param cwd: directory to run command in (default:
``self.abspath``)
'''
rc = self.git(f'merge-base --is-ancestor {rev1} {rev2}',
check=False, cwd=cwd).returncode
if rc == 0:
return True
elif rc == 1:
return False
else:
raise RuntimeError(f'unexpected git merge-base result {rc}')
def is_up_to_date_with(self, rev: str,
cwd: Optional[PathType] = None) -> bool:
'''Check if the project is up to date with *rev*, returning
``True`` if so.
This is equivalent to ``is_ancestor_of(rev, 'HEAD',
cwd=cwd)``.
:param rev: base revision to check if project is up to date
with.
:param cwd: directory to run command in (default:
``self.abspath``)
'''
return self.is_ancestor_of(rev, 'HEAD', cwd=cwd)
def is_up_to_date(self, cwd: Optional[PathType] = None) -> bool:
'''Check if the project HEAD is up to date with the manifest.
This is equivalent to ``is_up_to_date_with(self.revision,
cwd=cwd)``.
:param cwd: directory to run command in (default:
``self.abspath``)
'''
return self.is_up_to_date_with(self.revision, cwd=cwd)
def is_cloned(self, cwd: Optional[PathType] = None) -> bool:
'''Returns ``True`` if ``self.abspath`` looks like a git
repository's top-level directory, and ``False`` otherwise.
:param cwd: directory to run command in (default:
``self.abspath``)
'''
if not self.abspath or not os.path.isdir(self.abspath):
return False
# --is-inside-work-tree doesn't require that the directory is
# the top-level directory of a Git repository. Use --show-cdup
# instead, which prints an empty string (i.e., just a newline,
# which we strip) for the top-level directory.
_logger.debug(f'{self.name}: checking if cloned')
res = self.git('rev-parse --show-cdup', check=False, cwd=cwd,
capture_stderr=True, capture_stdout=True)
return not (res.returncode or res.stdout.strip())
def read_at(self, path: PathType, rev: Optional[str] = None,
cwd: Optional[PathType] = None) -> bytes:
'''Read file contents in the project at a specific revision.
:param path: relative path to file in this project
:param rev: revision to read *path* from (default: ``self.revision``)
:param cwd: directory to run command in (default: ``self.abspath``)
'''
if rev is None:
rev = self.revision
cp = self.git(['show', f'{rev}:{os.fspath(path)}'],
capture_stdout=True, capture_stderr=True, cwd=cwd)
return cp.stdout
def listdir_at(self, path: PathType, rev: Optional[str] = None,
cwd: Optional[PathType] = None,
encoding: Optional[str] = None) -> List[str]:
'''List of directory contents in the project at a specific revision.
The return value is the directory contents as a list of files and
subdirectories.
:param path: relative path to file in this project
:param rev: revision to read *path* from (default: ``self.revision``)
:param cwd: directory to run command in (default: ``self.abspath``)
:param encoding: directory contents encoding (default: 'utf-8')
'''
if rev is None:
rev = self.revision
if encoding is None:
encoding = 'utf-8'
# git-ls-tree -z means we get NUL-separated output with no quoting
# of the file names. Using 'git-show' or 'git-cat-file -p'
# wouldn't work for files with special characters in their names.
out = self.git(['ls-tree', '-z', f'{rev}:{os.fspath(path)}'], cwd=cwd,
capture_stdout=True, capture_stderr=True).stdout
# A tab character separates the SHA from the file name in each
# NUL-separated entry.
return [f.decode(encoding).split('\t', 1)[1]
for f in out.split(b'\x00') if f]
# FIXME: this whole class should just go away. See #327.
class ManifestProject(Project):
'''Represents the manifest repository as a `Project`.
Meaningful attributes:
- ``name``: the string ``"manifest"``
- ``topdir``: the top level directory of the west workspace
the manifest project controls, or ``None``
- ``path``: relative path to the manifest repository within the
workspace, or ``None`` (i.e. from ``topdir`` if that is set)
- ``abspath``: absolute path to the manifest repository in the
native path name format (or ``None`` if ``topdir`` is)
- ``posixpath``: like ``abspath``, but with slashes (``/``) as
path separators
- ``west_commands``:``west_commands:`` key in the manifest's
``self:`` map. This may be a list of such if the self
section imports multiple additional files with west commands.
Other readable attributes included for Project compatibility:
- ``url``: the empty string; the west manifest is not
version-controlled by west itself, even though 'west init'
can fetch a manifest repository from a Git remote
- ``revision``: ``"HEAD"``
- ``clone_depth``: ``None``, because there's no URL
- ``groups``: the empty list
'''
def __init__(self, path: Optional[PathType] = None,
west_commands: Optional[WestCommandsType] = None,
topdir: Optional[PathType] = None):
'''
:param path: Relative path to the manifest repository in the
west workspace, if known.
:param west_commands: path to a west commands specification YAML
file in the project, relative to its base directory,
or list of these
:param topdir: Root of the west workspace the manifest
project is inside. If not given, all absolute path
attributes (abspath and posixpath) will be None.
'''
self.name: str = 'manifest'
# Pretending that this is a Project, even though it's not (#327)
self.url: str = ''
self.submodules = False
self.revision: str = 'HEAD'
self.clone_depth: Optional[int] = None
self.groups = []
# The following type: ignore is necessary since every Project
# actually has a non-None _path attribute, so the parent class
# defines its type as 'str', where here we need it to be
# an Optional[str].
self._path = os.fspath(path) if path else None # type: ignore
# Path related attributes
self.topdir: Optional[str] = os.fspath(topdir) if topdir else None
self._abspath: Optional[str] = None
self._posixpath: Optional[str] = None
# Extension commands.
self.west_commands = _west_commands_list(west_commands)
@property
def as_dict(self) -> Dict:
'''Return a representation of this object as a dict, as it would be
parsed from an equivalent YAML manifest.'''
ret: Dict = {}
if self.path:
ret['path'] = self.path
if self.west_commands:
ret['west-commands'] = \
_west_commands_maybe_delist(self.west_commands)
return ret
class Manifest:
'''The parsed contents of a west manifest file.
'''
@staticmethod
def from_file(source_file: Optional[PathType] = None,
**kwargs) -> 'Manifest':
'''Manifest object factory given a source YAML file.
The default behavior is to find the current west workspace's
manifest file and resolve it.
Results depend on the keyword arguments given in *kwargs*:
- If both *source_file* and *topdir* are given, the
returned Manifest object is based on the data in
*source_file*, rooted at *topdir*. The configuration
variable ``manifest.path`` is ignored in this case, though
``manifest.group-filter`` will still be read if it exists.
This allows parsing a manifest file "as if" its project
hierarchy were rooted at another location in the system.
- If neither *source_file* nor *topdir* is given, the file
system is searched for *topdir*. That workspace's
``manifest.path`` configuration option is used to find
*source_file*, ``topdir/<manifest.path>/<manifest.file>``.
- If only *source_file* is given, *topdir* is found
starting there. The directory containing *source_file*
doesn't have to be ``manifest.path`` in this case.
- If only *topdir* is given, that workspace's
``manifest.path`` is used to find *source_file*.
Exceptions raised:
- `west.util.WestNotFound` if no *topdir* can be found
- `MalformedManifest` if *source_file* contains invalid
data
- `ManifestVersionError` if this version of west is too
old to parse the manifest.
- `MalformedConfig` if ``manifest.path`` is needed and
can't be read
- ``ValueError`` if *topdir* is given but is not a west
workspace root
:param source_file: source file to load
:param kwargs: Manifest.__init__ keyword arguments
'''
topdir = kwargs.get('topdir')
if topdir is None:
if source_file is None:
# neither source_file nor topdir: search the filesystem
# for the workspace and use its manifest.path.
topdir = util.west_topdir()
(mpath, mname) = _mpath(topdir=topdir)
kwargs.update({
'topdir': topdir,
'source_file': os.path.join(topdir, mpath, mname),
'manifest_path': mpath
})
else:
# Just source_file: find topdir starting there.
# We need source_file in kwargs as that's what gets used below.
kwargs.update({
'source_file': source_file,
'topdir':
util.west_topdir(start=os.path.dirname(source_file))
})
elif source_file is None:
# Just topdir.
# Verify topdir is a real west workspace root.
msg = f'topdir {topdir} is not a west workspace root'
try:
real_topdir = util.west_topdir(start=topdir, fall_back=False)
except util.WestNotFound:
raise ValueError(msg)
if Path(topdir) != Path(real_topdir):
raise ValueError(f'{msg}; but {real_topdir} is')
# Read manifest.path from topdir/.west/config, and use it
# to locate source_file.
(mpath, mname) = _mpath(topdir=topdir)
source_file = os.path.join(topdir, mpath, mname)
kwargs.update({
'source_file': source_file,
'manifest_path': mpath,
})
else:
# Both source_file and topdir.
kwargs['source_file'] = source_file
return Manifest(**kwargs)
@staticmethod
def from_data(source_data: ManifestDataType, **kwargs) -> 'Manifest':
'''Manifest object factory given parsed YAML data.
This factory does not read any configuration files.
Letting the return value be ``m``. Results then depend on
keyword arguments in *kwargs*:
- Unless *topdir* is given, all absolute paths in ``m``,
like ``m.projects[1].abspath``, are ``None``.
- Relative paths, like ``m.projects[1].path``, are taken
from *source_data*.
- If ``source_data['manifest']['self']['path']`` is not
set, then ``m.projects[MANIFEST_PROJECT_INDEX].abspath``
will be set to *manifest_path* if given.
Returns the same exceptions as the Manifest constructor.
:param source_data: parsed YAML data as a Python object, or a
string with unparsed YAML data
:param kwargs: Manifest.__init__ keyword arguments
'''
kwargs.update({'source_data': source_data})
return Manifest(**kwargs)
def __init__(self, source_file: Optional[PathType] = None,
source_data: Optional[ManifestDataType] = None,
manifest_path: Optional[PathType] = None,
topdir: Optional[PathType] = None,
importer: Optional[ImporterType] = None,
import_flags: ImportFlag = ImportFlag.DEFAULT,
**kwargs: Dict[str, Any]):
'''
Using `from_file` or `from_data` is usually easier than direct
instantiation.
Instance attributes:
- ``projects``: sequence of `Project`
- ``topdir``: west workspace top level directory, or
None
- ``path``: path to the manifest file itself, or None
- ``has_imports``: bool, True if the manifest contains
an "import:" attribute in "self:" or "projects:"; False
otherwise
- ``group_filter``: a group filter value equivalent to
the resolved manifest's "group-filter:", along with any
values from imported manifests. This value may be simpler
than the actual input data.
Exactly one of *source_file* and *source_data* must be given.
If *source_file* is given:
- If *topdir* is too, ``projects`` is rooted there.
- Otherwise, *topdir* is found starting at *source_file*.
If *source_data* is given:
- If *topdir* is too, ``projects`` is rooted there.
- Otherwise, there is no root: ``projects[i].abspath`` and
other absolute path attributes are ``None``.
- If ``source_data['manifest']['self']['path']`` is unset,
*manifest_path* is used as a fallback.
The *importer* kwarg, if given, is a callable. It is called
when *source_file* requires importing manifest data that
aren't found locally. It will be called as:
``importer(project, file)``
where ``project`` is a `Project` and ``file`` is the missing
file. The file's contents at refs/heads/manifest-rev should
usually be returned, potentially after fetching the project's
revision from its remote URL and updating that ref.
The return value should be a string containing manifest data,
or a list of strings if ``file`` is a directory containing
YAML files. A return value of None will cause the import to be
ignored.
Exceptions raised:
- `MalformedManifest`: if the manifest data is invalid
- `ManifestImportFailed`: if the manifest could not be
resolved due to import errors
- `ManifestVersionError`: if this version of west is too
old to parse the manifest
- `WestNotFound`: if *topdir* was needed and not found
- ``ValueError``: for other invalid arguments
:param source_file: YAML file containing manifest data
:param source_data: parsed YAML data as a Python object, or a
string containing unparsed YAML data
:param manifest_path: fallback `ManifestProject` ``path``
attribute
:param topdir: used as the west workspace top level
directory
:param importer: callback to resolve missing manifest import
data
:param import_flags: bit mask, controls import resolution
'''
if source_file and source_data:
raise ValueError('both source_file and source_data were given')
if not _flags_ok(import_flags):
raise ValueError(f'bad import_flags {import_flags:x}')
self.path: Optional[str] = None
'''Path to the file containing the manifest, or None if
created from data rather than the file system.
'''
if source_file:
source_file = Path(source_file)
source_data = source_file.read_text()
self.path = os.path.abspath(source_file)
if not source_data:
self._malformed('manifest contains no data')
if isinstance(source_data, str):
source_data = _load(source_data)
# Validate the manifest. Wrap a couple of the exceptions with
# extra context about the problematic file in case of errors,
# to help debugging.
try:
validate(source_data)
except ManifestVersionError as mv:
raise ManifestVersionError(mv.version, file=source_file) from mv
except MalformedManifest as mm:
self._malformed(mm.args[0], parent=mm)
except TypeError as te:
self._malformed(te.args[0], parent=te)
# The above validate() and exception handling block's job is
# to ensure this, but pacify the type checker in a way that
# crashes if something goes wrong with that.
assert isinstance(source_data, dict)
self._projects: List[Project] = []
'''Sequence of `Project` objects representing manifest
projects.
Index 0 (`MANIFEST_PROJECT_INDEX`) contains a
`ManifestProject` representing the manifest repository. The
rest of the sequence contains projects in manifest file order
(or resolution order if the manifest contains imports).
'''
self.topdir: Optional[str] = None
'''The west workspace's top level directory, or None.'''
if topdir:
self.topdir = os.fspath(topdir)
self.has_imports: bool = False
# This will be overwritten in _load() as needed.
self.group_filter: GroupFilterType = []
# Private state which backs self.group_filter. This also
# gets overwritten as needed.
self._disabled_groups: Set[str] = set()
# Stash the importer and flags in instance attributes. These
# don't change as we recurse, so they don't belong in _import_ctx.
self._importer: ImporterType = importer or _default_importer
self._import_flags = import_flags
ctx: Optional[_import_ctx] = \
kwargs.get('import-context') # type: ignore
if ctx is None:
ctx = _import_ctx(projects={},
group_filter=[],
imap_filter=None,
path_prefix=Path('.'))
else:
assert isinstance(ctx, _import_ctx)
if manifest_path:
mpath: Optional[Path] = Path(manifest_path)
else:
mpath = None
self._load(source_data['manifest'], mpath, ctx)
def get_projects(self,
# any str name is also a PathType
project_ids: Iterable[PathType],
allow_paths: bool = True,
only_cloned: bool = False) -> List[Project]:
'''Get a list of `Project` objects in the manifest from
*project_ids*.
If *project_ids* is empty, a copy of ``self.projects``
attribute is returned as a list. Otherwise, the returned list
has projects in the same order as *project_ids*.
``ValueError`` is raised if:
- *project_ids* contains unknown project IDs
- (with *only_cloned*) an uncloned project was found
The ``ValueError`` *args* attribute is a 2-tuple with a list
of unknown *project_ids* at index 0, and a list of uncloned
`Project` objects at index 1.
:param project_ids: a sequence of projects, identified by name
or (absolute or relative) path. Names are matched first; path
checking can be disabled with *allow_paths*.
:param allow_paths: if false, *project_ids* is assumed to contain
names only, not paths
:param only_cloned: raise an exception for uncloned projects
'''
projects = list(self.projects)
unknown: List[PathType] = [] # project_ids with no Projects
uncloned: List[Project] = [] # if only_cloned, the uncloned Projects
ret: List[Project] = [] # result list of resolved Projects
# If no project_ids are specified, use all projects.
if not project_ids:
if only_cloned:
uncloned = [p for p in projects if not p.is_cloned()]
if uncloned:
raise ValueError(unknown, uncloned)
return projects
# Otherwise, resolve each of the project_ids to a project,
# returning the result or raising ValueError.
for pid in project_ids:
project: Optional[Project] = None
if isinstance(pid, str):
project = self._projects_by_name.get(pid)
if project is None and allow_paths:
project = self._projects_by_rpath.get(Path(pid).resolve())
if project is None:
unknown.append(pid)
continue
ret.append(project)
if only_cloned and not project.is_cloned():
uncloned.append(project)
if unknown or (only_cloned and uncloned):
raise ValueError(unknown, uncloned)
return ret
def as_dict(self) -> Dict:
'''Returns a dict representing self, fully resolved.
The value is "resolved" in that the result is as if all
projects had been defined in a single manifest without any
import attributes.
'''
return self._as_dict_helper()
def as_frozen_dict(self) -> Dict:
'''Returns a dict representing self, but frozen.
The value is "frozen" in that all project revisions are the
full SHAs pointed to by `QUAL_MANIFEST_REV_BRANCH` references.
Raises ``RuntimeError`` if a project SHA can't be resolved.
'''
return self._as_dict_helper(pdict=pdict)
def as_yaml(self, **kwargs) -> str:
'''Returns a YAML representation for self, fully resolved.
The value is "resolved" in that the result is as if all
projects had been defined in a single manifest without any
import attributes.
:param kwargs: passed to yaml.safe_dump()
'''
return yaml.safe_dump(self.as_dict(), **kwargs)
def as_frozen_yaml(self, **kwargs) -> str:
'''Returns a YAML representation for self, but frozen.
The value is "frozen" in that all project revisions are the
full SHAs pointed to by `QUAL_MANIFEST_REV_BRANCH` references.
Raises ``RuntimeError`` if a project SHA can't be resolved.
:param kwargs: passed to yaml.safe_dump()
'''
return yaml.safe_dump(self.as_frozen_dict(), **kwargs)
@property
def is_active(self, project: Project,
extra_filter: Optional[Iterable[str]] = None) -> bool:
'''Is a project active?
Projects with empty 'project.groups' lists are always active.
Otherwise, if any group in 'project.groups' is enabled by this
manifest's 'group-filter:' list (and the
'manifest.group-filter' local configuration option, if we have
a workspace), returns True.
Otherwise, i.e. if all of the project's groups are disabled,
this returns False.
"Inactive" projects should generally be considered absent from
the workspace for purposes like updating it, listing projects,
etc.
:param project: project to check
:param extra_filter: an optional additional group filter
'''
if not project.groups:
# Projects without any groups are always active, so just
# exit early. Note that this happens to treat the
# ManifestProject as though it's always active. This is
# important for keeping it in the 'west list' output for
# now.
return True
# Load manifest.group-filter from the configuration file if we
# haven't already. Only do this once so we don't hit the file
# system for every project when looping over the manifest.
cfg_gf = self._config_group_filter
# Figure out what the disabled groups are. Skip reallocation
# if possible.
if cfg_gf or extra_filter is not None:
disabled_groups = set(self._disabled_groups)
if cfg_gf:
_update_disabled_groups(disabled_groups, cfg_gf)
if extra_filter is not None:
extra_filter = self._validated_group_filter(None,
list(extra_filter))
_update_disabled_groups(disabled_groups, extra_filter)
else:
disabled_groups = self._disabled_groups
return any(group not in disabled_groups for group in project.groups)
@property
| [
2,
15069,
357,
66,
8,
2864,
11,
13130,
11,
12131,
35834,
311,
5314,
40990,
49599,
198,
2,
15069,
2864,
11,
13130,
4062,
1678,
13,
952,
12052,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
... | 2.440328 | 17,948 |
import csv
| [
11748,
269,
21370,
628
] | 3 | 4 |
from cleo import Command
import logging
from dcmrtstruct2nii.facade.dcmrtstruct2nii import list_rt_structs
class ListStructs(Command):
"""
List structures in RT Struct
list
{--r|rtstruct= : Path to DICOM RT Struct file}
"""
| [
6738,
1190,
78,
1330,
9455,
198,
11748,
18931,
198,
198,
6738,
288,
11215,
17034,
7249,
17,
77,
4178,
13,
38942,
671,
13,
67,
11215,
17034,
7249,
17,
77,
4178,
1330,
1351,
62,
17034,
62,
7249,
82,
628,
198,
4871,
7343,
44909,
82,
7,... | 2.663158 | 95 |
# Generated by Django 3.0.7 on 2020-07-15 03:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
22,
319,
12131,
12,
2998,
12,
1314,
7643,
25,
2780,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14... | 3.019231 | 52 |
# coding: utf-8
# # Evaluation of the new oversampler on the standard database foldings
#
# In this notebook we give an example evaluating a new oversampler on the standard 104 imbalanced datasets. The evaluation is highly similar to that illustrated in the notebook ```002_evaluation_multiple_datasets``` with the difference that in this case some predefined dataset foldings are used to make the results comparable to those reported in the ranking page of the documentation. The database foldings need to be downloaded from the github repository and placed in the 'smote_foldings' directory.
# In[1]:
import os, pickle, itertools
# import classifiers
from sklearn.calibration import CalibratedClassifierCV
from sklearn.svm import LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from smote_variants import MLPClassifierWrapper
# import SMOTE variants
import smote_variants as sv
# itertools to derive imbalanced databases
import imbalanced_databases as imbd
# In[2]:
# setting global parameters
folding_path= os.path.join(os.path.expanduser('~'), 'smote_foldings')
if not os.path.exists(folding_path):
os.makedirs(folding_path)
max_sampler_parameter_combinations= 35
n_jobs= 5
# In[3]:
# instantiate classifiers
# Support Vector Classifiers with 6 parameter combinations
sv_classifiers= [CalibratedClassifierCV(LinearSVC(C=1.0, penalty='l1', loss= 'squared_hinge', dual= False)),
CalibratedClassifierCV(LinearSVC(C=1.0, penalty='l2', loss= 'hinge', dual= True)),
CalibratedClassifierCV(LinearSVC(C=1.0, penalty='l2', loss= 'squared_hinge', dual= False)),
CalibratedClassifierCV(LinearSVC(C=10.0, penalty='l1', loss= 'squared_hinge', dual= False)),
CalibratedClassifierCV(LinearSVC(C=10.0, penalty='l2', loss= 'hinge', dual= True)),
CalibratedClassifierCV(LinearSVC(C=10.0, penalty='l2', loss= 'squared_hinge', dual= False))]
# Multilayer Perceptron Classifiers with 6 parameter combinations
mlp_classifiers= []
for x in itertools.product(['relu', 'logistic'], [1.0, 0.5, 0.1]):
mlp_classifiers.append(MLPClassifierWrapper(activation= x[0], hidden_layer_fraction= x[1]))
# Nearest Neighbor Classifiers with 18 parameter combinations
nn_classifiers= []
for x in itertools.product([3, 5, 7], ['uniform', 'distance'], [1, 2, 3]):
nn_classifiers.append(KNeighborsClassifier(n_neighbors= x[0], weights= x[1], p= x[2]))
# Decision Tree Classifiers with 6 parameter combinations
dt_classifiers= []
for x in itertools.product(['gini', 'entropy'], [None, 3, 5]):
dt_classifiers.append(DecisionTreeClassifier(criterion= x[0], max_depth= x[1]))
classifiers= []
classifiers.extend(sv_classifiers)
classifiers.extend(mlp_classifiers)
classifiers.extend(nn_classifiers)
classifiers.extend(dt_classifiers)
# In[4]:
# querying datasets for the evaluation
datasets= imbd.get_data_loaders('study')
# In[ ]:
# executing the evaluation
results= sv.evaluate_oversamplers(datasets,
samplers= sv.get_all_oversamplers(),
classifiers= classifiers,
cache_path= folding_path,
n_jobs= n_jobs,
remove_sampling_cache= True,
max_samp_par_comb= max_sampler_parameter_combinations)
# In[ ]:
# The evaluation results are available in the results dataframe for further analysis.
print(results)
| [
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
1303,
34959,
286,
262,
649,
10753,
321,
20053,
319,
262,
3210,
6831,
5591,
654,
198,
2,
220,
198,
2,
554,
428,
20922,
356,
1577,
281,
1672,
22232,
257,
649,
10753,
321,
20053,
319,... | 2.609649 | 1,368 |
import numpy as np
import tensorflow as tf
from tensorflow import keras
print(tf.__version__)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
11192,
273,
11125,
1330,
41927,
292,
198,
4798,
7,
27110,
13,
834,
9641,
834,
8,
198
] | 3.133333 | 30 |
#
# Question: given a range a,b, find the total number of perfect square numbers in the given range.
#
import cmath
def find_perfect_square_count(a, b, verbose=False):
'''
The strategy here is not to iterate through the set of possible integer values and check for is_perfect_square()
each time but to translate the upper and lower values to either complex or real space of square numbers.
# O(n) complexity
len([x for x in range(0, 100) if x!= 0 and float(math.sqrt(x)).is_integer()])
so if a and b positive then we count the number of integer values between upper and lower sqrt() values
if either a or b are negative then we need to use the complex number space for the sqrt() results. In this case
we are still counting integer values either along the imaginary or real axis, the result is then a simple sum
if both a and b are negative we need to make sure that when walking down the same imaginary axis we dont count
the same inteters twice, in this case we can take the min(a,b) to get max distinct set of integer values.
a : float
represents either start or end of range
b : float
represents either start or end of range
return : integer or NaN
returns total number of perfect square numbers between (a,b) or NaN if not available.
complexity:
O(a) scalar complexity
'''
# protect against silly type errors
try:
float(a)
float(b)
except:
return NaN
# protect against something that might be nothing
if (a or b) in [None]:
return NaN
# nothing to do here, fail quickly no range, what if a and b are a square?
if b==a :
return 0
# do we need to handle complex numbers?
if a < 0 or b < 0:
if verbose:
print 'complex'
# case when a img and b real
if a < 0 and b >= 0:
return np.sum([math.ceil(cmath.sqrt(complex(a,0)).imag), math.ceil(cmath.sqrt(complex(b,0)).real)])-2
# case when a real b imag
if a >= 0 and b < 0:
return np.sum([math.ceil(cmath.sqrt(complex(b,0)).imag), math.ceil(cmath.sqrt(complex(a,0)).real)])-2
# special case when both negative, both vectors are aligned to the
# i axis, in this case we only need the min(a,b) otherwise we will double count
if a < 0 and b < 0:
return np.sum([math.ceil(cmath.sqrt(complex(min(a,b),0)).imag)])-1
if a >= b:
count = 2*(ceil(math.sqrt(abs(a))) - ceil(math.sqrt(abs(b))))
if count > 0:
return count-2
else:
return 0
if b >= a:
# check to make sure we dont remove zero adjustment from zero count
# incorrectly gives a negative count.
count = 2*(ceil(math.sqrt(abs(b))) - ceil(math.sqrt(abs(a))))
if count > 0:
return count-2
else:
return 0
# else return NaN
return NaN
# some preflight checks
assert(find_perfect_square_count(0, 100) == 18)
assert(isnan(find_perfect_square_count('ff', 1.2)))
assert(isnan(find_perfect_square_count('ff', None)))
# lets fully test
import random
number_of_tests = 5
value = 1e4
for (a,b) in zip([random.randint(-value,value) for x in arange(number_of_tests)]
, [random.randint(-value,value) for x in arange(number_of_tests)]):
print '%d\t between \t[%d, %d]'%(find_perfect_square_count(a,b), a, b)
| [
2,
198,
2,
18233,
25,
1813,
257,
2837,
257,
11,
65,
11,
1064,
262,
2472,
1271,
286,
2818,
6616,
3146,
287,
262,
1813,
2837,
13,
198,
2,
198,
198,
11748,
269,
11018,
198,
198,
4299,
1064,
62,
25833,
62,
23415,
62,
9127,
7,
64,
11... | 2.3722 | 1,518 |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from beanmachine.ppl.experimental.mala import (
SingleSiteMetropolisAdapatedLangevinAlgorithm,
)
from beanmachine.ppl.testlib.abstract_conjugate import AbstractConjugateTests
| [
2,
15069,
357,
66,
8,
30277,
19193,
82,
11,
3457,
13,
290,
29116,
13,
198,
2,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
287,
262,
198,
2,
38559,
24290,
2393,
287,
262,
6808,
8619,
286,
428,
2723,
5509,
13,
... | 3.427273 | 110 |
# Dashboard blueprint
from flask.blueprints import Blueprint
from config import api_version
dash = Blueprint(
"dashboard",
__name__,
url_prefix=f"/api/{api_version}/dashboard",
template_folder="templates/dashboard",
)
from . import views
| [
2,
16189,
3526,
30881,
198,
6738,
42903,
13,
17585,
17190,
1330,
39932,
198,
6738,
4566,
1330,
40391,
62,
9641,
198,
198,
42460,
796,
39932,
7,
198,
220,
220,
220,
366,
42460,
3526,
1600,
198,
220,
220,
220,
11593,
3672,
834,
11,
198,... | 2.976744 | 86 |
from setuptools import _install_setup_requires, setup
setup(
name="twitoff_app",
packages=['twitoff_app'],
include_package_data=True,
install_requires=[
'flask',
],
) | [
6738,
900,
37623,
10141,
1330,
4808,
17350,
62,
40406,
62,
47911,
11,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
4246,
270,
2364,
62,
1324,
1600,
220,
198,
220,
220,
220,
10392,
28,
17816,
4246,
270,
2364,
62,
1324,
6... | 2.241758 | 91 |
from subprocess import call
import sys
projects_file = sys.argv[1]
svn_path = sys.argv[2]
oldest_rev = sys.argv[3]
with open(projects_file) as f:
repo_paths = f.readlines()
print [svn_path + line[:-1] for line in repo_paths]
paths = [svn_path + line[:-1] for line in repo_paths]
for path in paths:
call(['svn', 'log', path, '-v', '--stop-on-copy', '-r', 'HEAD:' + oldest_rev]) | [
6738,
850,
14681,
1330,
869,
198,
11748,
25064,
198,
198,
42068,
62,
7753,
796,
25064,
13,
853,
85,
58,
16,
60,
198,
21370,
77,
62,
6978,
796,
25064,
13,
853,
85,
58,
17,
60,
220,
198,
727,
395,
62,
18218,
796,
25064,
13,
853,
8... | 2.359756 | 164 |
# Generated by Django 3.1.5 on 2021-02-12 20:45
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
20,
319,
33448,
12,
2999,
12,
1065,
1160,
25,
2231,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# Generated by Django 2.2.6 on 2019-10-26 14:09
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
21,
319,
13130,
12,
940,
12,
2075,
1478,
25,
2931,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import asyncio
import os
import io
import disnake
import logging
from disnake.ext import commands
from disnake.ext.audiorec import NativeVoiceClient
logging.basicConfig(level=logging.INFO)
bot = commands.Bot(command_prefix=commands.when_mentioned_or("+"),
description='Relatively simple recording bot example')
@bot.event
bot.add_cog(Recorder(bot))
bot.run(os.environ['TOKEN'])
| [
11748,
30351,
952,
201,
198,
11748,
28686,
201,
198,
11748,
33245,
201,
198,
201,
198,
11748,
595,
77,
539,
201,
198,
11748,
18931,
201,
198,
201,
198,
6738,
595,
77,
539,
13,
2302,
1330,
9729,
201,
198,
6738,
595,
77,
539,
13,
2302... | 2.588957 | 163 |
#!/usr/bin/env python3
"""
The following functions are a miscellaneous collection of utility functions
that can be useful in making code shorter and easier to read.
"""
def shortened_hash(s, n):
""" Return a shortened string with the first and last bits of a hash
:param s: the full string to shorten
:param n: the desired length of the string returned
:return: An n-character string with the first and last bits of s
"""
side_len = int((n - 3) / 2)
if len(s) <= n:
return s
else:
return s[0:side_len] + "..." + s[(-1 * side_len):]
def hash_file(filename, block_size=32768):
""" Return a 256-bit (64 character) sha256 hash of filename
:param filename: the full path of the file to hash
:param block_size: for large files, how big of a chunk should we read at a time
:return: A 64-character string representing the 256-bit sha256 hash
"""
import hashlib
try:
with open(filename, 'rb') as f:
hasher = hashlib.sha256()
while True:
buf = f.read(block_size)
if not buf:
break
hasher.update(buf)
except IOError:
return None
return hasher.hexdigest()
def tree(base_dir, padding=' ', print_files=True, is_last=False, is_first=False):
""" Return a list of strings that can be combined to form ASCII-art-style
directory listing
:param base_dir: the path to explore
:param padding: a string to prepend to each line
:param print_files: True to print directories and files, False for just directories
:param is_last: only used recursively
:param is_first: only used recursively
"""
import os
out_lines = []
if is_first:
out_lines.append(base_dir)
else:
if is_last:
out_lines.append(padding[:-2] + '└─ ' + os.path.basename(os.path.abspath(base_dir)))
else:
out_lines.append(padding[:-2] + '├─ ' + os.path.basename(os.path.abspath(base_dir)))
if print_files:
files = os.listdir(base_dir)
else:
files = [x for x in os.listdir(base_dir) if os.path.isdir(base_dir + os.sep + x)]
if not is_first:
padding = padding + ' '
files = sorted(files, key=lambda s: s.lower())
count = 0
last = len(files) - 1
for i, file in enumerate(files):
count += 1
path = base_dir + os.sep + file
is_last = i == last
if os.path.isdir(path):
if count == len(files):
if is_first:
out_lines = out_lines + tree(path, padding + ' ', print_files, is_last, False)
else:
out_lines = out_lines + tree(path, padding + ' ', print_files, is_last, False)
else:
out_lines = out_lines + tree(path, padding + '│ ', print_files, is_last, False)
else:
if is_last:
out_lines.append(padding + '└─ ' + file)
else:
out_lines.append(padding + '├─ ' + file)
return out_lines
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
198,
464,
1708,
5499,
389,
257,
2984,
25673,
4947,
286,
10361,
5499,
198,
5562,
460,
307,
4465,
287,
1642,
2438,
12238,
290,
4577,
284,
1100,
13,
198,
37811,
628,
198,
... | 2.287946 | 1,344 |
#!/usr/bin/python3
para_str = """ Python is a scripting language which was created by
Guido van Rossum in 1991, \t which is used in various sectors such as \n Game Development, GIS Programming, Software Development, web development,
Data Analytics and Machine learning, System Scripting etc.
"""
print (para_str)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
1845,
64,
62,
2536,
796,
37227,
11361,
318,
257,
36883,
3303,
543,
373,
2727,
416,
198,
8205,
17305,
5719,
9847,
388,
287,
10249,
11,
3467,
83,
543,
318,
973,
287,
2972,
16020,
884... | 3.807229 | 83 |
# Copyright 2020 Atthaboon S.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import i18n
from robot.libraries.BuiltIn import BuiltIn
__version__ = '0.1.4'
class I18nLibrary:
"""
I18nLibrary translator library for support in robotframework
"""
def load_path_append(self, append_path):
"""
Auto load language from specific path
:param append_path:
:return:
"""
i18n.load_path.append(append_path)
# Load lang files to memory
for lang in i18n.config.get('pre_load_langs'):
self._load_directory(append_path, lang)
subfolders = self._get_list_of_sub_folders(append_path)
for folder_path in subfolders:
self._load_directory(folder_path, lang)
| [
2,
15069,
12131,
1629,
400,
397,
2049,
311,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198... | 2.784783 | 460 |
import os
import sys
try:
from shutil import which
except ImportError:
from backports.shutil_which import which
import _vmprof
from vmprof import cli
from vmprof.reader import (MARKER_NATIVE_SYMBOLS, FdWrapper,
LogReaderState, LogReaderDumpNative)
from vmprof.stats import Stats
from vmprof.profiler import Profiler, read_profile
PY3 = sys.version_info[0] >= 3
IS_PYPY = '__pypy__' in sys.builtin_module_names
# it's not a good idea to use a "round" default sampling period, else we risk
# to oversample periodic tasks which happens to run at e.g. 100Hz or 1000Hz:
# http://www.solarisinternals.com/wiki/index.php/DTrace_Topics_Hints_Tips#profile-1001.2C_profile-997.3F
#
# To avoid the problem, we use a period which is "almost" but not exactly
# 1000Hz
DEFAULT_PERIOD = 0.00099
if IS_PYPY:
else:
# CPYTHON
def sample_stack_now(skip=0):
""" Helper utility mostly for tests, this is considered
private API.
It will return a list of stack frames the python program currently
walked.
"""
stackframes = _vmprof.sample_stack_now(skip)
assert isinstance(stackframes, list)
return stackframes
def resolve_addr(addr):
""" Private API, returns the symbol name of the given address.
Only considers linking symbols found by dladdr.
"""
return _vmprof.resolve_addr(addr)
def insert_real_time_thread(thread_id=0):
""" Inserts a thread into the list of threads to be sampled in real time mode.
When enabling real time mode, the caller thread is inserted automatically.
Returns the number of registered threads, or -1 if we can't insert thread.
Inserts the current thread if thread_id is not provided.
"""
return _vmprof.insert_real_time_thread(thread_id)
def remove_real_time_thread(thread_id=0):
""" Removes a thread from the list of threads to be sampled in real time mode.
When disabling in real time mode, *all* threads are removed automatically.
Returns the number of registered threads, or -1 if we can't remove thread.
Removes the current thread if thread_id is not provided.
"""
return _vmprof.remove_real_time_thread(thread_id)
def is_enabled():
""" Indicates if vmprof has already been enabled for this process.
Returns True or False. None is returned if the state is unknown.
"""
if hasattr(_vmprof, 'is_enabled'):
return _vmprof.is_enabled()
raise NotImplementedError("is_enabled is not implemented on this platform")
def get_profile_path():
""" Returns the absolute path for the file that is currently open.
None is returned if the backend implementation does not implement that function,
or profiling is not enabled.
"""
if hasattr(_vmprof, 'get_profile_path'):
return _vmprof.get_profile_path()
raise NotImplementedError("get_profile_path not implemented on this platform")
| [
11748,
28686,
198,
11748,
25064,
198,
28311,
25,
198,
220,
220,
220,
422,
4423,
346,
1330,
543,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
422,
736,
3742,
13,
1477,
22602,
62,
4758,
1330,
543,
198,
198,
11748,
4808,
14761,
5577... | 2.883721 | 1,032 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: lte/protos/ha_orc8r.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='lte/protos/ha_orc8r.proto',
package='magma.lte',
syntax='proto3',
serialized_options=b'Z\031magma/lte/cloud/go/protos',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x19lte/protos/ha_orc8r.proto\x12\tmagma.lte\"\x1e\n\x1cGetEnodebOffloadStateRequest\"\xd9\x02\n\x1dGetEnodebOffloadStateResponse\x12`\n\x15\x65nodeb_offload_states\x18\x01 \x03(\x0b\x32\x41.magma.lte.GetEnodebOffloadStateResponse.EnodebOffloadStatesEntry\x1aw\n\x18\x45nodebOffloadStatesEntry\x12\x0b\n\x03key\x18\x01 \x01(\r\x12J\n\x05value\x18\x02 \x01(\x0e\x32;.magma.lte.GetEnodebOffloadStateResponse.EnodebOffloadState:\x02\x38\x01\"]\n\x12\x45nodebOffloadState\x12\t\n\x05NO_OP\x10\x00\x12\x15\n\x11PRIMARY_CONNECTED\x10\x01\x12%\n!PRIMARY_CONNECTED_AND_SERVING_UES\x10\x02\x32r\n\x02Ha\x12l\n\x15GetEnodebOffloadState\x12\'.magma.lte.GetEnodebOffloadStateRequest\x1a(.magma.lte.GetEnodebOffloadStateResponse\"\x00\x42\x1bZ\x19magma/lte/cloud/go/protosb\x06proto3'
)
_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATE = _descriptor.EnumDescriptor(
name='EnodebOffloadState',
full_name='magma.lte.GetEnodebOffloadStateResponse.EnodebOffloadState',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='NO_OP', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PRIMARY_CONNECTED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PRIMARY_CONNECTED_AND_SERVING_UES', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=325,
serialized_end=418,
)
_sym_db.RegisterEnumDescriptor(_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATE)
_GETENODEBOFFLOADSTATEREQUEST = _descriptor.Descriptor(
name='GetEnodebOffloadStateRequest',
full_name='magma.lte.GetEnodebOffloadStateRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=40,
serialized_end=70,
)
_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATESENTRY = _descriptor.Descriptor(
name='EnodebOffloadStatesEntry',
full_name='magma.lte.GetEnodebOffloadStateResponse.EnodebOffloadStatesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='magma.lte.GetEnodebOffloadStateResponse.EnodebOffloadStatesEntry.key', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='magma.lte.GetEnodebOffloadStateResponse.EnodebOffloadStatesEntry.value', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=204,
serialized_end=323,
)
_GETENODEBOFFLOADSTATERESPONSE = _descriptor.Descriptor(
name='GetEnodebOffloadStateResponse',
full_name='magma.lte.GetEnodebOffloadStateResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='enodeb_offload_states', full_name='magma.lte.GetEnodebOffloadStateResponse.enodeb_offload_states', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATESENTRY, ],
enum_types=[
_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=73,
serialized_end=418,
)
_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATESENTRY.fields_by_name['value'].enum_type = _GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATE
_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATESENTRY.containing_type = _GETENODEBOFFLOADSTATERESPONSE
_GETENODEBOFFLOADSTATERESPONSE.fields_by_name['enodeb_offload_states'].message_type = _GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATESENTRY
_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATE.containing_type = _GETENODEBOFFLOADSTATERESPONSE
DESCRIPTOR.message_types_by_name['GetEnodebOffloadStateRequest'] = _GETENODEBOFFLOADSTATEREQUEST
DESCRIPTOR.message_types_by_name['GetEnodebOffloadStateResponse'] = _GETENODEBOFFLOADSTATERESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetEnodebOffloadStateRequest = _reflection.GeneratedProtocolMessageType('GetEnodebOffloadStateRequest', (_message.Message,), {
'DESCRIPTOR' : _GETENODEBOFFLOADSTATEREQUEST,
'__module__' : 'lte.protos.ha_orc8r_pb2'
# @@protoc_insertion_point(class_scope:magma.lte.GetEnodebOffloadStateRequest)
})
_sym_db.RegisterMessage(GetEnodebOffloadStateRequest)
GetEnodebOffloadStateResponse = _reflection.GeneratedProtocolMessageType('GetEnodebOffloadStateResponse', (_message.Message,), {
'EnodebOffloadStatesEntry' : _reflection.GeneratedProtocolMessageType('EnodebOffloadStatesEntry', (_message.Message,), {
'DESCRIPTOR' : _GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATESENTRY,
'__module__' : 'lte.protos.ha_orc8r_pb2'
# @@protoc_insertion_point(class_scope:magma.lte.GetEnodebOffloadStateResponse.EnodebOffloadStatesEntry)
})
,
'DESCRIPTOR' : _GETENODEBOFFLOADSTATERESPONSE,
'__module__' : 'lte.protos.ha_orc8r_pb2'
# @@protoc_insertion_point(class_scope:magma.lte.GetEnodebOffloadStateResponse)
})
_sym_db.RegisterMessage(GetEnodebOffloadStateResponse)
_sym_db.RegisterMessage(GetEnodebOffloadStateResponse.EnodebOffloadStatesEntry)
DESCRIPTOR._options = None
_GETENODEBOFFLOADSTATERESPONSE_ENODEBOFFLOADSTATESENTRY._options = None
_HA = _descriptor.ServiceDescriptor(
name='Ha',
full_name='magma.lte.Ha',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=420,
serialized_end=534,
methods=[
_descriptor.MethodDescriptor(
name='GetEnodebOffloadState',
full_name='magma.lte.Ha.GetEnodebOffloadState',
index=0,
containing_service=None,
input_type=_GETENODEBOFFLOADSTATEREQUEST,
output_type=_GETENODEBOFFLOADSTATERESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_HA)
DESCRIPTOR.services_by_name['Ha'] = _HA
# @@protoc_insertion_point(module_scope)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
300,
660,
14,
11235,
418,
14,
3099,
62,
24449,
23,
81,
13,
1676,
1462,
... | 2.422393 | 3,376 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch script for pre-training representations."""
import os.path as osp
from absl import app
from absl import flags
from absl import logging
from base_configs import validate_config
from ml_collections import config_flags
import torch
from torchkit import CheckpointManager
from torchkit import experiment
from torchkit import Logger
from torchkit.utils.py_utils import Stopwatch
from utils import setup_experiment
from xirl import common
# pylint: disable=logging-fstring-interpolation
FLAGS = flags.FLAGS
flags.DEFINE_string("experiment_name", None, "Experiment name.")
flags.DEFINE_boolean("resume", False, "Whether to resume training.")
flags.DEFINE_string("device", "cuda:0", "The compute device.")
flags.DEFINE_boolean("raw_imagenet", False, "")
config_flags.DEFINE_config_file(
"config",
"base_configs/pretrain.py",
"File path to the training hyperparameter configuration.",
)
@experiment.pdb_fallback
if __name__ == "__main__":
flags.mark_flag_as_required("experiment_name")
app.run(main)
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
33160,
383,
3012,
4992,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
... | 3.420502 | 478 |
from pkg.creator._creator import Creator
from pkg.creator.app_plan._app_plan import FunctionPlanCreator
from pkg.creator.az_resources._az_resources import AzureResourceCreator
from pkg.creator.function_app._function_app import FunctionAppCreator
from pkg.creator.local_function._local_function import LocalFunctionAppCreator
from pkg.creator.storageacnt._storage_account import StorageAccountCreator
from pkg.creator.type._creator_type import CreatorType
from pkg.factory._absfactory import KafkaExtTestAbsFactory
from pkg.enums.language._language import Language
factory_instance = CreatorFactory() | [
6738,
279,
10025,
13,
45382,
13557,
45382,
1330,
21038,
198,
6738,
279,
10025,
13,
45382,
13,
1324,
62,
11578,
13557,
1324,
62,
11578,
1330,
15553,
20854,
16719,
273,
198,
6738,
279,
10025,
13,
45382,
13,
1031,
62,
37540,
13557,
1031,
6... | 3.75625 | 160 |
"""Template tags to make e-mail styling less painful"""
import cssutils
from django import template
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.contrib.staticfiles import finders
from django.utils.safestring import mark_safe
register = template.Library()
_styles = None
@register.simple_tag()
| [
37811,
30800,
15940,
284,
787,
304,
12,
4529,
35517,
1342,
12132,
37811,
198,
11748,
269,
824,
26791,
198,
6738,
42625,
14208,
1330,
11055,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755... | 3.670103 | 97 |
# -*- coding: utf-8 -*-
import os
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
from os import path, listdir
import numpy as np
np.random.seed(1)
import random
random.seed(1)
import pandas as pd
import timeit
import cv2
from tqdm import tqdm
import sys
sys.setrecursionlimit(10000)
from multiprocessing import Pool
from shapely.geometry.linestring import LineString
# from skimage.morphology import skeletonize_3d, square, erosion, dilation, medial_axis
# from skimage.measure import label, regionprops, approximate_polygon
from math import hypot, sin, cos, asin, acos, radians
from sklearn.neighbors import KDTree
from shapely.wkt import dumps, loads
import scipy
import utm
#pip install utm
import gdal
gdal.UseExceptions()
import osr
import ogr
#conda install gdal
import ntpath
from shapely.geometry import mapping, Point, LineString
# import matplotlib.pyplot as plt
# import seaborn as sns
pred_folders = ['/wdata/test_pred', '/wdata/test_pred_960']
speed_bins = np.array([15, 18.75, 20, 25, 30, 35, 45, 55, 65])
# test_folders = ['/data/SN5_roads/test_public/AOI_7_Moscow', '/data/SN5_roads/test_public/AOI_8_Mumbai', '/data/SN5_roads/test_public/AOI_9_San_Juan']
test_folders = []
for i in range(1, len(sys.argv) - 1):
test_folders.append(sys.argv[i])
df = pd.read_csv(path.join('/wdata', 'solution_length.csv'), header=None)
df.columns = ['ImageId', 'WKT_Pix']
# example GDAL error handler function
gdal.PushErrorHandler(gdal_error_handler)
# from https://github.com/CosmiQ/cresi
def get_linestring_midpoints(geom):
'''Get midpoints of each line segment in the line.
Also return the length of each segment, assuming cartesian coordinates'''
coords = list(geom.coords)
N = len(coords)
x_mids, y_mids, dls = [], [], []
for i in range(N-1):
(x0, y0) = coords[i]
(x1, y1) = coords[i+1]
x_mids.append(np.rint(0.5 * (x0 + x1)))
y_mids.append(np.rint(0.5 * (y0 + y1)))
dl = scipy.spatial.distance.euclidean(coords[i], coords[i+1])
dls. append(dl)
return np.array(x_mids).astype(int), np.array(y_mids).astype(int), \
np.array(dls)
def pixelToGeoCoord(xPix, yPix, inputRaster, sourceSR='', geomTransform='', targetSR=''):
'''from spacenet geotools'''
if targetSR =='':
performReprojection=False
targetSR = osr.SpatialReference()
targetSR.ImportFromEPSG(4326)
else:
performReprojection=True
if geomTransform=='':
srcRaster = gdal.Open(inputRaster)
geomTransform = srcRaster.GetGeoTransform()
source_sr = osr.SpatialReference()
source_sr.ImportFromWkt(srcRaster.GetProjectionRef())
geom = ogr.Geometry(ogr.wkbPoint)
xOrigin = geomTransform[0]
yOrigin = geomTransform[3]
pixelWidth = geomTransform[1]
pixelHeight = geomTransform[5]
xCoord = (xPix * pixelWidth) + xOrigin
yCoord = (yPix * pixelHeight) + yOrigin
geom.AddPoint(xCoord, yCoord)
if performReprojection:
if sourceSR=='':
srcRaster = gdal.Open(inputRaster)
sourceSR = osr.SpatialReference()
sourceSR.ImportFromWkt(srcRaster.GetProjectionRef())
coord_trans = osr.CoordinateTransformation(sourceSR, targetSR)
geom.Transform(coord_trans)
return (geom.GetX(), geom.GetY())
def convert_pix_lstring_to_geo(wkt_lstring, im_file,
utm_zone=None, utm_letter=None, verbose=False):
'''Convert linestring in pixel coords to geo coords
If zone or letter changes inthe middle of line, it's all screwed up, so
force zone and letter based on first point
(latitude, longitude, force_zone_number=None, force_zone_letter=None)
Or just force utm zone and letter explicitly
'''
shape = wkt_lstring #shapely.wkt.loads(lstring)
x_pixs, y_pixs = shape.coords.xy
coords_latlon = []
coords_utm = []
for i,(x,y) in enumerate(zip(x_pixs, y_pixs)):
targetSR = osr.SpatialReference()
targetSR.ImportFromEPSG(4326)
lon, lat = pixelToGeoCoord(x, y, im_file, targetSR=targetSR)
if utm_zone and utm_letter:
[utm_east, utm_north, _, _] = utm.from_latlon(lat, lon,
force_zone_number=utm_zone, force_zone_letter=utm_letter)
else:
[utm_east, utm_north, utm_zone, utm_letter] = utm.from_latlon(lat, lon)
if verbose:
print("lat lon, utm_east, utm_north, utm_zone, utm_letter]",
[lat, lon, utm_east, utm_north, utm_zone, utm_letter])
coords_utm.append([utm_east, utm_north])
coords_latlon.append([lon, lat])
lstring_latlon = LineString([Point(z) for z in coords_latlon])
lstring_utm = LineString([Point(z) for z in coords_utm])
return lstring_latlon, lstring_utm, utm_zone, utm_letter
meters_to_miles = 0.000621371
###########
if __name__ == '__main__':
t0 = timeit.default_timer()
out_file = sys.argv[-1]
# out_file = '/wdata/solution.csv'
all_files = []
for d in test_folders:
for f in listdir(path.join(d, 'PS-MS')):
if '.tif' in f:
all_files.append(path.join(d, 'PS-MS', f))
# for fn in tqdm(all_files):
# process_file(fn)
with Pool() as pool:
results = pool.map(process_file, all_files)
res_rows = []
for i in range(len(results)):
res_rows.extend(results[i])
sub = pd.DataFrame(res_rows, columns=['ImageId', 'WKT_Pix', 'length_m', 'travel_time_s'])
sub.to_csv(path.join('/wdata', out_file), index=False, header=False)
elapsed = timeit.default_timer() - t0
print('Submission file created! Time: {:.3f} min'.format(elapsed / 60)) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
418,
13,
268,
2268,
14692,
33907,
43,
62,
41359,
62,
4221,
15675,
50,
8973,
796,
366,
16,
1,
220,
198,
418,
13,
268,
2268,
14692,
41359,
6369,
4805,... | 2.192681 | 2,678 |
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
import json
import random
import re
from ..compat import (
compat_parse_qs,
compat_str,
)
from ..utils import (
js_to_json,
strip_jsonp,
urlencode_postdata,
)
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
764,
11321,
1330,
14151,
11627,
40450,
198,
198,
11748,
33918,
198,
11748,
4738,
198,
11748,
302,
198,
198,
6738,
11485,
... | 2.609524 | 105 |
import os
import sublime
from .. import settings
| [
11748,
28686,
198,
11748,
41674,
198,
198,
6738,
11485,
1330,
6460,
628,
628,
628,
628,
628
] | 3.6875 | 16 |
#!/usr/bin/env pytho
'''
This script parses cisco.txt file and searches for 'crypto map CRYPTO' lines using CCP module.
Then it displays childrens indented multiple times for each element of crypto_map variable (list).
'''
import pprint
from ciscoconfparse import CiscoConfParse
fo = open('cisco.txt', 'r') # Opening text file as FileObject with open() function
parse = CiscoConfParse(fo) # Loading the file to CCF module as argument for parsing later
crypto_map = parse.find_all_children(r'^crypto map CRYPTO')
# In the line above using find_all_children method with regex (parsing)
print 'Show the content of crypto_map variable: \n',crypto_map
print
print 'Show me parent-child relationships for crypto map CRYPTO lines in cisco.txt file: '
# Iterate over elements of crypto_map list and display in a nice human readable format
for line in crypto_map:
# pprint.pprint(line) # Replaced with the below as suggested by Kirk (clean output)
print(line.strip("\n"))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
279,
5272,
78,
198,
7061,
6,
198,
1212,
4226,
13544,
274,
269,
4861,
13,
14116,
2393,
290,
15455,
329,
705,
29609,
78,
3975,
8740,
48232,
10468,
6,
3951,
1262,
37963,
8265,
13,
198,
6423,
340,
1... | 3.320946 | 296 |
from pyesg.configuration import validation_configuration as valid_config
from pyesg.validation.run import validate_simulations
from pyesg.configuration.pyesg_configuration import *
config = PyESGConfiguration()
config.number_of_projection_steps = 30
config.number_of_batches = 1
config.number_of_simulations = 100000
config.projection_frequency = 'annually'
config.random_seed = 128
config.start_date = '2018-01-01'
economy = Economy()
economy.id = "GBP"
asset_class = AssetClass()
asset_class.id = "GBP_Nominal"
asset_class.model_id = 'hull_white'
asset_class.add_parameter('alpha', 0.05)
asset_class.add_parameter('sigma', 0.2 * 0.1)
yc_points = {
0.5: 0.00679070105770901,
1: 0.00745916002218801,
1.5: 0.0079074852733388,
2: 0.00836441669643775,
2.5: 0.00884161282573678,
3: 0.00932762601832977,
3.5: 0.00981445589941161,
4: 0.0102969721178294,
4.5: 0.0107716710398867,
5: 0.0112363849191675,
5.5: 0.0116900851233338,
6: 0.0121325124408309,
6.5: 0.0125637162796559,
7: 0.0129837371605093,
7.5: 0.0133924143022063,
8: 0.0137892855650153,
8.5: 0.0141736214537358,
9: 0.0145445182679629,
9.5: 0.0149010412164557,
10: 0.0152422849420296,
10.5: 0.0155674503497323,
11: 0.0158758864638649,
11.5: 0.0161671188651251,
12: 0.0164409074632115,
12.5: 0.016697217851849,
13: 0.0169361824548138,
13.5: 0.0171580886888855,
14: 0.0173633870307634,
14.5: 0.0175526692648801,
15: 0.0177266234016501,
15.5: 0.0178859783210095,
16: 0.0180314895849257,
16.5: 0.0181639353683754,
17: 0.018284106311916,
17.5: 0.0183927617968095,
18: 0.018490607925128,
18.5: 0.0185782967490554,
19: 0.0186563922209754,
19.5: 0.0187253557221218,
20: 0.018785557677642,
20.5: 0.0188372886488034,
21: 0.0188807683798148,
21.5: 0.0189161404104334,
22: 0.0189434581524923,
22.5: 0.0189627104915117,
23: 0.0189738426838589,
23.5: 0.0189767792253448,
24: 0.0189714599105421,
24.5: 0.018957845218761,
25: 0.0189359147882514,
25.5: 0.0189056816921497,
26: 0.0188672208215708,
26.5: 0.0188206722776286,
27: 0.0187662444763932,
27.5: 0.0187042132382632,
28: 0.0186349225161717,
28.5: 0.0185587809820652,
29: 0.0184762565449625,
29.5: 0.0183878727980299,
30: 0.0182942021898953,
30.5: 0.0181958450182937,
31: 0.0180934206282059,
31.5: 0.0179875657839365,
32: 0.0178789330057234,
32.5: 0.0177681797287789,
33: 0.017655948801948,
33.5: 0.0175428655247506,
34: 0.0174295389236686,
34.5: 0.0173165628801392,
35: 0.0172045112957245,
35.5: 0.0170939187050309,
36: 0.0169852750575237,
36.5: 0.0168790286221742,
37: 0.0167755888037096,
37.5: 0.0166753287335625,
38: 0.0165785875430073,
38.5: 0.0164856688754966,
39: 0.0163968357127189,
39.5: 0.0163123113348747,
40: 0.0162322805072689,
}
for key, value in yc_points.items():
asset_class.add_parameter(f"yc_{key}", value)
asset_class.add_output('GBP_Nominal_Discount_Factor', 'discount_factor')
asset_class.add_output('GBP_Nominal_ZCB_5', 'zero_coupon_bond', term=5)
asset_class.add_output('GBP_Nominal_ZCB_10', 'zero_coupon_bond', term=10)
asset_class.add_output('GBP_Nominal_CMI_5', 'bond_index', term=5)
asset_class.add_output('GBP_Nominal_CMI_10', 'bond_index', term=10)
asset_class.random_drivers.append("GBP_Nominal")
equity_asset_class = AssetClass()
equity_asset_class.id = "GBP_Equities"
equity_asset_class.model_id = 'black_scholes'
equity_asset_class.add_parameter('sigma', 0.2)
equity_asset_class.add_output('GBP_Equities_TRI', 'total_return_index', 1)
equity_asset_class.random_drivers.append("GBP_Equities")
equity_asset_class.dependencies.append("GBP_Nominal")
economy.asset_classes.append(asset_class)
economy.asset_classes.append(equity_asset_class)
config.economies.append(economy)
config.output_file_directory = '/Users/rajivpatel/Desktop'
config.output_file_name = 'yomama'
validation_settings = valid_config.ValidationConfiguration(
output_file_directory='/users/rajivpatel/Desktop/',
output_file_name='test',
)
validation_settings.asset_classes.append(valid_config.AssetClass(
id="GBP_Nominal",
validation_analyses=[
valid_config.ValidationAnalysis(
id='average_discount_factor',
parameters=Parameters(confidence_level=0.95)
),
valid_config.ValidationAnalysis(
id='discounted_zero_coupon_bond',
parameters=Parameters(confidence_level=0.95, terms=[5, 10])
),
valid_config.ValidationAnalysis(
id='discounted_bond_index',
parameters=Parameters(confidence_level=0.95, terms=[5, 10])
)
]
))
validation_settings.asset_classes.append(valid_config.AssetClass(
id="GBP_Equities",
validation_analyses=[
valid_config.ValidationAnalysis(
id='discounted_total_return_index',
parameters=Parameters(confidence_level=0.95)
),
# valid_config.ValidationAnalysis(
# id='total_return_index_log_return_moments'
# )
]
))
validate_simulations(config, validation_settings)
| [
6738,
279,
8505,
70,
13,
11250,
3924,
1330,
21201,
62,
11250,
3924,
355,
4938,
62,
11250,
198,
6738,
279,
8505,
70,
13,
12102,
341,
13,
5143,
1330,
26571,
62,
14323,
5768,
198,
6738,
279,
8505,
70,
13,
11250,
3924,
13,
79,
8505,
70,... | 2.035491 | 2,564 |
from openrec.recommenders import BPR
from openrec.modules.extractions import LatentFactor, MultiLayerFC
import tensorflow as tf
| [
6738,
1280,
8344,
13,
47335,
7338,
1330,
347,
4805,
198,
6738,
1280,
8344,
13,
18170,
13,
2302,
37810,
1330,
5476,
298,
41384,
11,
15237,
49925,
4851,
198,
11748,
11192,
273,
11125,
355,
48700,
198
] | 3.764706 | 34 |
#!/usr/bin/env python
import os
import re
import sys
import glob
import subprocess
from ctypes import c_int
from multiprocessing import Process, Lock, Value, BoundedSemaphore, cpu_count
#---------------------------------------------------------------------
# Extract scenarios from the specified test
#---------------------------------------------------------------------
# Run the specified scenario and print the results
#---------------------------------------------------------------------
# Main program
# Compile some regular expressions
match_pids = re.compile("<\d+\.\d+\.\d+>")
match_refs = re.compile("#Ref<[\d\.]+>")
#match_file = re.compile("suites/.+/src/.*\.erl")
ignore_matches = [match_pids, match_refs]
# Get the directory of Concuerror's testsuite
dirname = os.path.abspath(os.path.dirname(sys.argv[0]))
concuerror = os.path.abspath(dirname + "/../concuerror")
results = os.path.abspath(dirname + "/results")
# Cleanup temp files
# TODO: make it os independent
os.system("find %s \( -name '*.beam' -o -name '*.dump' \) -exec rm {} \;"
% dirname)
os.system("rm -rf %s/*" % results)
# Compile scenarios.erl
os.system("erlc %s/scenarios.erl" % dirname)
# If we have arguments we should use them as tests,
# otherwise check them all
if len(sys.argv) > 1:
tests = sys.argv[1:]
tests = [os.path.abspath(item) for item in tests]
else:
tests = glob.glob(dirname + "/suites/*/src/*")
# How many threads we want (default, number of CPUs in the system)
threads = os.getenv("THREADS", "")
if threads == "":
try:
threads = str(cpu_count())
except:
threads = "4"
# Print header
print "Concuerror's Testsuite (%d threads)\n" % int(threads)
print "%-10s %-20s %-50s %s" % \
("Suite", "Test", "(Function, Preemption Bound, Reduction)", "Result")
print "---------------------------------------------" + \
"---------------------------------------------"
# Create share integers to count tests and
# a lock to protect printings
lock = Lock()
total_tests = Value(c_int, 0, lock=False)
total_failed = Value(c_int, 0, lock=False)
sema = BoundedSemaphore(int(threads))
# For every test do
procT = []
for test in tests:
p = Process(target=runTest, args=(test,))
p.start()
procT.append(p)
# Wait
for p in procT:
p.join()
# Print overview
print "\nOVERALL SUMMARY for test run"
print " %d total tests, which gave rise to" % len(tests)
print " %d test cases, of which" % total_tests.value
print " %d caused unexpected failures!" % total_failed.value
# Cleanup temp files
os.system("find %s -name '*.beam' -exec rm {} \;" % dirname)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
25064,
198,
11748,
15095,
198,
11748,
850,
14681,
198,
6738,
269,
19199,
1330,
269,
62,
600,
198,
6738,
18540,
305,
919,
278,
1330,
10854,... | 2.946067 | 890 |
import venv
import sys
import pathlib
import platform
from configparser import ConfigParser
CONFIG_PATH: pathlib.Path = pathlib.Path(__file__).parent.parent.parent / "setup.cfg"
PLATFORM = platform.system()
def load_cfg() -> ConfigParser:
"""
loads library config file
:return: loaded `ConfigParser` object
"""
config = ConfigParser()
config.read(CONFIG_PATH)
return config
def create_venv(lib_name: str, py_version: str) -> pathlib.Path:
"""
creates the new virtual environment
:param lib_name: name of library
:param py_version: string representation of two-digit python version (ie 37)
:return: path to venv
"""
venv_name = f"{lib_name}-go-{py_version}"
venv_path = pathlib.Path(f"~/venvs/{venv_name}").expanduser()
try:
venv_path.mkdir(parents=True, exist_ok=False)
except FileExistsError as error:
raise error
venv.create(env_dir=str(venv_path), with_pip=True, system_site_packages=True)
return venv_path
def register_venv(activate_path: pathlib.Path, lib_name: str, py_version: str) -> str:
"""
registers the new environment with a .bashrc entry alias for easy venv entry
:param activate_path: path to virtual env activation script
:param py_version: string representation of two-digit python version (ie 37)
:param lib_name: name of library
:return: bash alias to enter venv
"""
lib_path: pathlib.Path = pathlib.Path(__file__).parent.parent.parent.absolute()
bash_alias = f"env_go-{lib_name}-{py_version}"
command = f'alias {bash_alias}=\'cd "{lib_path}";source "{activate_path}"\''
if PLATFORM == "Darwin":
bash_rc_path = pathlib.Path("~/.bash_aliases").expanduser()
elif PLATFORM == "Linux":
bash_rc_path = pathlib.Path("~/.bash_aliases").expanduser()
else:
raise RuntimeError("operating system not supported for venv creation")
if bash_rc_path.exists():
bash_rc_text = bash_rc_path.read_text()
else:
bash_rc_text = ""
if command in bash_rc_text:
return bash_alias
bash_rc_text += (
f"\n"
f"\n# {lib_name} development virtual env entry for Python {py_version}"
f"\n{command}"
)
with bash_rc_path.open(mode="w") as f:
f.write(bash_rc_text)
return bash_alias
def main() -> None:
"""makes virtual environment for development and adds alias to ~/.bashrc"""
py_version = f"{sys.version_info[0]}{sys.version_info[1]}"
config = load_cfg()
lib_name = config.get("metadata", "name")
venv_path = create_venv(lib_name, py_version)
activate_path = venv_path / "bin" / "activate"
bash_alias = register_venv(activate_path, lib_name, py_version)
sys.stdout.write(str(bash_alias))
if __name__ == "__main__":
"""creates virtual environment and writes path to stdout"""
main()
| [
11748,
8710,
85,
198,
11748,
25064,
198,
11748,
3108,
8019,
198,
11748,
3859,
198,
198,
6738,
4566,
48610,
1330,
17056,
46677,
198,
198,
10943,
16254,
62,
34219,
25,
3108,
8019,
13,
15235,
796,
3108,
8019,
13,
15235,
7,
834,
7753,
834,
... | 2.610659 | 1,107 |
import os
from io import StringIO
from pathlib import Path
from tests.common import HouseParameters, HouseStyle, RoofMaterial, test_data_dir
from ext_argparse import process_arguments, save_defaults, dump, add_comments_from_help, process_settings_file
| [
11748,
28686,
198,
6738,
33245,
1330,
10903,
9399,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
5254,
13,
11321,
1330,
2097,
48944,
11,
2097,
21466,
11,
35271,
17518,
11,
1332,
62,
7890,
62,
15908,
198,
198,
6738,
1070,
62,
853,
... | 3.611111 | 72 |
#############################################################################
_LCG_X = None #
#
#############################################################################
_MIN_SPLIT_FACTOR = 40 #In percent
_MAX_SPLIT_FACTOR = 100 - _MIN_SPLIT_FACTOR #In percent
_MIN_ROOM_WIDTH = 3
_MIN_ROOM_HEIGHT = 3
_SPLITS = 12
_FLOOR_CODE = 'floor'
_WALL_CODE = 'wall'
_DOOR_CODE = 'door'
#############################################################################################################
#############################################################################################################
| [
29113,
29113,
7804,
4242,
2,
198,
62,
5639,
38,
62,
55,
796,
6045,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.608392 | 286 |
#!/usr/bin/env python
#coding=utf8
'''
Created on 2016/9/19
@author: cloudy
'''
from django.conf.urls import url
from . import views
urlpatterns = [
#绩效首页
url(r'^index/$',views.index,name='index'),
#绩效趋势post 请求url
url(r'^echart/$',views.echart,name='echart'),
#绩效设置
url(r'^setting/$',views.setting,name='setting'),
#绩效考核打分页面
url(r'^list/$',views.list,name='list'),
#绩效考核打分页面
url(r'^newlist/$',views.newlist,name='newlist'),
#绩效考核提交
url(r'^post/$',views.action_post,name='action_post'),
# 绩效考核提交
url(r'^check/$', views.check_done, name='check_done'),
# 历史记录
url(r'^history/$', views.history, name='history'),
# 绩效考核详情
#url(r'^detail/$', views.detail, name='detail'),
# 绩效考核月度详情
url(r'^month_detail/$', views.month_detail, name='month_detail'),
#结果查看
url(r'^result/$',views.result,name='result'),
] | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
66,
7656,
28,
40477,
23,
198,
7061,
6,
198,
41972,
319,
1584,
14,
24,
14,
1129,
198,
198,
31,
9800,
25,
40026,
198,
7061,
6,
198,
6738,
220,
42625,
14208,
13,
10414,
13,
6371,
... | 1.733463 | 514 |
import sys
import cv2
import numpy as np
import traceback
import darknet.python.darknet as dn
from os.path import splitext, basename
from glob import glob
from darknet.python.darknet import detect
from src.label import dknet_label_conversion
from src.utils import nms
if __name__ == '__main__':
try:
input_dir = sys.argv[1].rstrip('/')
output_dir = input_dir
lp_target = sys.argv[2]
ocr_threshold = .4
ocr_weights = bytes('data/ocr/ocr-net.weights', encoding='utf-8')
ocr_netcfg = bytes('data/ocr/ocr-net.cfg', encoding='utf-8')
ocr_dataset = bytes('data/ocr/ocr-net.data', encoding='utf-8')
ocr_net = dn.load_net(ocr_netcfg, ocr_weights, 0)
ocr_meta = dn.load_meta(ocr_dataset)
imgs_paths = sorted(glob('%s/*lp.png' % output_dir))
print('Performing OCR...')
print('Target: %s' % lp_target)
target_found = False
for i,img_path in enumerate(imgs_paths):
if target_found == True:
print('\tTarget found. Ending OCR...')
break
print('\tScanning %s' % img_path)
bname = basename(splitext(img_path)[0])
R,(width,height) = detect(ocr_net, ocr_meta, img_path ,thresh=ocr_threshold, nms=None)
if len(R):
L = dknet_label_conversion(R,width,height)
L = nms(L,.45)
L.sort(key=lambda x: x.tl()[0])
lp_str = ''.join([chr(l.cl()) for l in L])
lp_len = len(lp_str)
if lp_len >= 6 and lp_len <= 7:
print('\t\tLP: %s' % lp_str)
if lp_str == lp_target:
target_found = True
# Erases the trailing substring "_lp" from `bname`.
# original format of bname: "out<frame_id>_<object_id><class_name>_lp"
# modified format of bname: "out<frame_id>_<object_id><class_name>"
bname_target = bname.rsplit('_', 1)[0]
with open('%s/target.txt' % (output_dir), 'w') as f:
f.write(bname_target + '\n')
with open('%s/%s_str.txt' % (output_dir,bname),'w') as f:
f.write(lp_str + '\n')
else:
print('No characters found')
if target_found == False:
print('\tTarget not found. Ending OCR...')
except:
traceback.print_exc()
sys.exit(1)
sys.exit(0)
| [
11748,
25064,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12854,
1891,
198,
198,
11748,
3223,
3262,
13,
29412,
13,
21953,
3262,
355,
288,
77,
198,
198,
6738,
28686,
13,
6978,
220,
220,
220,
220,
220,
220,
... | 1.822069 | 1,450 |
from typing import List
from pypadre.core.validation.validation import ValidateableFactory
from pypadre.pod.app.base_app import BaseChildApp
from pypadre.pod.repository.i_repository import ICodeRepository
from pypadre.pod.service.code_service import CodeService
| [
6738,
19720,
1330,
7343,
198,
198,
6738,
279,
4464,
324,
260,
13,
7295,
13,
12102,
341,
13,
12102,
341,
1330,
3254,
20540,
540,
22810,
198,
6738,
279,
4464,
324,
260,
13,
33320,
13,
1324,
13,
8692,
62,
1324,
1330,
7308,
16424,
4677,
... | 3.180723 | 83 |
import base64
from direct.directnotify.DirectNotifyGlobal import directNotify
import json
import time
from Crypto.Cipher import AES
UNKNOWN = 700
USER = 100
COMMUNITY_MANAGER = 200
MODERATOR = 300
ARTIST = 400
PROGRAMMER = 500
ADMINISTRATOR = 600
SYSTEM_ADMINISTRATOR = 700
rpcmethod = RPCMethod
| [
11748,
2779,
2414,
198,
6738,
1277,
13,
12942,
1662,
1958,
13,
13470,
3673,
1958,
22289,
1330,
1277,
3673,
1958,
198,
11748,
33918,
198,
11748,
640,
198,
198,
6738,
36579,
13,
34,
10803,
1330,
34329,
628,
198,
4944,
44706,
796,
13037,
1... | 2.970588 | 102 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright CNRS 2012
# Roman Yurchak (LULI)
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software.
import numpy as np
from scipy.constants import N_A
def ionmix_validity(rho, temp, Zbar, Abar):
"""
Returns the distance to the IONMIX EoS validity domain:
if Ion_density < 1e20 (T/Zbar)³ cm⁻³ return 0
else: return orthogonal distance to that validity limit
in the log log space
Parameters
----------
- rho: ndarray: density [g.cm⁻³]
- temp:ndarray: temperature [eV]
- Zbar:ndarray: average ionization
- Abar:ndarray or float: average atomic mass
Returns
-------
- d: ndarray: distance to the validity region in
log log space
"""
return temp >= (rho*N_A*Zbar**3/(1e20*Abar))**(1./3)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
31171,
6998,
2321,
198,
2,
7993,
575,
2575,
461,
357,
43,
6239,
40,
8,
198,
2,
770,
3788,
318,
21825,
416,
262,
... | 2.495845 | 361 |
"""
trial is a way of creating a project
features:
- copies the source of the file you're using to run it
(to have a copy of the file)
(WARNING: there can be a race condition if the file changes before the trial
code is run - the earlier you import du, the less likely this is to
occur, due to the cache_inspect function)
- creates a directory where files for the run can be stored without worrying
about collisions
- that directory also has a temporary directory that is cleared out
- allows storing intermediate values, to later be retrieved
- seeds random number generator
- creates a summary file (in trial.yml) with all of the important stored
parameters
WARNING: when canceling a trial run, don't spam CTRL-C, since that can cancel
the clean up operations too
eg.
# in some file:
with run_trial("some_trial_name") as trial:
# printed stuff is stored to _trials_dir_/some_trial_name/1/log.txt
print "Hello world"
# storing values
trial.store_important("foo", 1)
trial.store_important("foo", 2)
trial.get_important("foo") # returns [1, 2]
"""
import os
import sys
import re
import logging
import shutil
import datetime
import inspect
import importlib
import contextlib
from collections import defaultdict
import six
import setproctitle
from . import utils, random_utils, io_utils, yaml_db
def cache_inspect():
"""
NOTE: seems to cache source code, so that future calls to
inspect.getsource return the older sources, rather than the
source when getsource is called
"""
for record in inspect.getouterframes(inspect.currentframe()):
frame = record[0]
module = inspect.getmodule(frame)
if module is not None:
inspect.getsource(module)
try:
cache_inspect()
except Exception as e:
# catch exception for running for REPL
if str(e) != "<module '__main__'> is a built-in module":
raise e
def trial_lock(trial_base_path):
"""
only for making sure no race conditions when adding new iterations
"""
return io_utils.file_lock(os.path.join(trial_base_path, ".lock"))
@contextlib.contextmanager
def create_trial_dir(trial_base_path, iteration_num, replace_strategy):
"""
creates a new directory named 'iteration_num' under 'trial_base_dir',
automatically selecting the next sequential iteration number if
iteration_num is None.
returns iteration_num because it needs to be chosen with the trial locked
(if not specified)
"""
with trial_lock(trial_base_path):
if iteration_num is None:
iteration_num = get_next_iteration_num(trial_base_path)
assert iteration_num > 0 and isinstance(iteration_num, int)
trial_path = path_in_trial_dir(trial_base_path, iteration_num)
# check if already existing, if so handle appropriately
if os.path.exists(trial_path):
if replace_strategy == "force":
replace = True
elif replace_strategy == "ask":
in_str = None
while in_str not in ["y", "n"]:
in_str = six.moves.input(
("%s exists, would you like "
"to overwrite? (y/n) ") % trial_path)
replace = in_str == "y"
elif replace_strategy is None:
replace_strategy = False
else:
raise ValueError("replace strategy %s not found"
% replace_strategy)
if replace:
shutil.rmtree(trial_path)
else:
raise ValueError("Trial already exists: %s" % trial_path)
os.mkdir(trial_path)
return iteration_num
@contextlib.contextmanager
def temporarily_add_file_logger_to(filename, loggers):
"""
adds a FileLogger to each of the given loggers, which causes the
logger to log to that file as well
eg.
>>> with temporarily_add_file_logger_to("log.txt", [logger]):
>>> logger.info("foo")
"""
# create log file
io_utils.guarantee_exists(filename)
# add log handler
file_logger = logging.FileHandler(filename)
try:
for logger in loggers:
file_logger.setFormatter(logger.handlers[0].formatter)
logger.addHandler(file_logger)
yield
finally:
# remove log handler
for logger in loggers:
logger.removeHandler(file_logger)
@contextlib.contextmanager
@contextlib.contextmanager
def _run_trial_internal(trial_name,
iteration_num=None,
description=None,
snippets=None,
trials_dir=utils.config["trial"]["trials_dir"],
loggers="default",
random_seed=42,
trial_runner_string=None,
replace_strategy=None):
"""
trial_name:
name of the trial as a string
iteration_num:
integer of which the iteration in the current trial
description:
any json encodable object (eg. string or list)
snippets:
list of pairs of a snippet name (used to import the snippet) and
a source file used in this trial
"""
# handling default values
if description is None:
description = []
if snippets is None:
snippets = []
# expand the trials_dir
trials_dir = os.path.realpath(trials_dir)
if loggers == "default":
loggers = [utils.DEFAULT_LOGGER,
utils.SIMPLE_LOGGER]
# validation
assert re.match(r'^[A-Za-z0-9_\-]+$', trial_name), trial_name
for snippet_name, snippet_path in snippets:
assert isinstance(snippet_name, str)
assert "." not in snippet_name
assert isinstance(snippet_path, str)
snippet_names = set([snippet[0] for snippet in snippets])
assert len(snippets) == len(snippet_names),\
"Snippet names must be unique"
assert "trial_runner" not in snippet_names
# make trials dir if doesn't exist
io_utils.guarantee_dir_exists(trials_dir)
trial_base_path = os.path.join(trials_dir, trial_name)
# make trial base dir if doesn't exist
io_utils.guarantee_dir_exists(trial_base_path)
# make yaml_db if doesn't exist
io_utils.guarantee_exists(os.path.join(trial_base_path, "trial.yml"))
start_date = datetime.datetime.now()
iteration_num = create_trial_dir(trial_base_path,
iteration_num,
replace_strategy)
path_in_this_trial_dir = utils.partial(path_in_trial_dir,
trial_base_path,
iteration_num)
# file paths
src_path = path_in_this_trial_dir("src")
tmp_path = path_in_this_trial_dir("tmp")
files_path = path_in_this_trial_dir("files")
params_path = path_in_this_trial_dir("params.yml")
log_path = path_in_this_trial_dir("log.txt")
# create directories
for dirname in [src_path,
files_path]:
os.mkdir(dirname)
# copy files
for snippet_name, snippet_path in snippets:
new_snippet_path = path_in_this_trial_dir(
"src", snippet_name + ".py")
shutil.copy(snippet_path, new_snippet_path)
# write down the string to create the trial
if trial_runner_string is not None:
trial_runner_path = path_in_this_trial_dir("src", "trial_runner.py")
assert not os.path.exists(trial_runner_path)
with open(trial_runner_path, 'w') as f:
f.write(trial_runner_string)
# writing description to trial db
with trial_db_iteration_transaction(trial_base_path, iteration_num) as m:
m["description"] = description
# create trial state
trial = TrialState(trial_name=trial_name,
iteration_num=iteration_num,
trials_dir=trials_dir)
utils.simple_info("Running trial %s:%d on pid %d"
% (trial_name, iteration_num, os.getpid()))
try:
proc_title = setproctitle.getproctitle()
setproctitle.setproctitle("%s:%d" % (trial_name, iteration_num))
with random_utils.seed_random(random_seed):
with io_utils.temporary_directory(tmp_path):
with temporarily_add_file_logger_to(log_path, loggers):
# capture stdout and stderr as well as loggers,
# so that all printing gets logged
with io_utils.Tee(log_path,
"a",
"stderr",
auto_flush=True):
with io_utils.Tee(log_path,
"a",
"stdout",
auto_flush=True):
# execute trial
yield trial
finally:
# save params / state to persistent storage
trial.dump()
io_utils.yaml_dump(dict(
trial_name=trial_name,
iteration_num=iteration_num,
snippets=snippets,
description=description,
random_seed=random_seed,
start_date=str(start_date),
end_date=str(datetime.datetime.now())
),
params_path)
setproctitle.setproctitle(proc_title)
def _get_source_of_caller(additional_frames=0):
"""
utility function to get source code of the file calling the function
calling this function (i.e. 2 levels up)
"""
# this might be sketchy using inspect and is not REPL
# friendly
current_frame = inspect.currentframe()
outer_frames = inspect.getouterframes(current_frame)
# drop the first 2 (this function + the calling function)
caller_frame = outer_frames[2 + additional_frames][0]
return inspect.getsource(inspect.getmodule(caller_frame))
def run_trial(*args, _run_trial_additional_frames=0, **kwargs):
"""
wrapper around _run_trial_internal that reads the file that this function
was called from and saves its contents as a string
see docstring of _run_trial_internal for arguments
"""
assert "trial_runner_string" not in kwargs
runner_str = _get_source_of_caller(_run_trial_additional_frames)
# just a sanity check that it worked
assert "run_trial(" in runner_str
return _run_trial_internal(*args, trial_runner_string=runner_str, **kwargs)
def run_trial_function(trial_function, args=None, kwargs=None, **_kwargs):
"""
wrapper around _run_trial_internal that saves the source code of the given
function as a string, and calls the function with a TrialState object
args:
positional arguments to pass into trial_function
kwargs:
keyword arguments to pass into trial_function
see docstring of _run_trial_internal for arguments
"""
assert "trial_runner_string" not in _kwargs
if args is None:
args = ()
if kwargs is None:
kwargs = {}
func_str = "".join(inspect.getsourcelines(trial_function)[0])
with _run_trial_internal(trial_runner_string=func_str,
**_kwargs) as trial:
return trial_function(trial, *args, **kwargs)
| [
37811,
198,
45994,
318,
257,
835,
286,
4441,
257,
1628,
198,
198,
40890,
25,
198,
12,
9088,
262,
2723,
286,
262,
2393,
345,
821,
1262,
284,
1057,
340,
198,
220,
357,
1462,
423,
257,
4866,
286,
262,
2393,
8,
198,
220,
357,
31502,
2... | 2.322706 | 4,893 |
import envexamples # modifies path
from raytracing import *
"""
The Lagrange invariant is a constant defining the collection efficiency of an optical system. The Lagrange
invariant is calculated using the principal and axial rays, whether the optical invariant is calculated with
another combination of rays. This code uses the optical invariant to characterize the ray transmission in a
4f system and shows that the optical invariant is greatly affected by the used optics. Indeed, changing the
diameter of the first lens affects the number of detected rays at the imaged plane.
"""
path = ImagingPath()
path.design(fontScale=1.7)
path.append(System4f(f1=10, diameter1=25.4, f2=20, diameter2=25.4))
path.reportEfficiency()
path.display(interactive=False)
path2 = ImagingPath()
path2.design(fontScale=1.5)
path2.append(System4f(f1=10, diameter1=12.7, f2=20, diameter2=25.4))
path2.reportEfficiency()
path2.display(interactive=False) | [
11748,
551,
303,
87,
12629,
220,
1303,
953,
6945,
3108,
198,
6738,
26842,
2213,
4092,
1330,
1635,
198,
198,
37811,
198,
464,
21003,
9521,
25275,
415,
318,
257,
6937,
16215,
262,
4947,
9332,
286,
281,
18480,
1080,
13,
383,
21003,
9521,
... | 3.448529 | 272 |
from .methods import Bisection, Newton, Secant
from .function_examples import f_root
if __name__ == '__main__':
# change the method and function as desired
v = Newton(**f_root).solve(**f_root)
print(v) | [
6738,
764,
24396,
82,
1330,
38045,
3213,
11,
17321,
11,
1882,
415,
198,
6738,
764,
8818,
62,
1069,
12629,
1330,
277,
62,
15763,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1303,
1487,
262,
... | 2.828947 | 76 |
from .driver import DriverMixin as DriverMixin
from .process import ProcessMixin as ProcessMixin
from .call_api import CallApiMixin as CallApiMixin
from .dependent import DependentMixin as DependentMixin
| [
6738,
764,
26230,
1330,
12434,
35608,
259,
355,
12434,
35608,
259,
198,
6738,
764,
14681,
1330,
10854,
35608,
259,
355,
10854,
35608,
259,
198,
6738,
764,
13345,
62,
15042,
1330,
4889,
32,
14415,
35608,
259,
355,
4889,
32,
14415,
35608,
... | 3.642857 | 56 |
import mock
import ddt
import six
from openstackinabox.tests.base import TestBase
from openstackinabox.models.keystone import exceptions
from openstackinabox.models.keystone.model import (
schema,
KeystoneModel
)
@ddt.ddt
@ddt.ddt
| [
11748,
15290,
198,
198,
11748,
288,
28664,
198,
11748,
2237,
198,
198,
6738,
1280,
25558,
259,
397,
1140,
13,
41989,
13,
8692,
1330,
6208,
14881,
198,
198,
6738,
1280,
25558,
259,
397,
1140,
13,
27530,
13,
2539,
6440,
1330,
13269,
198,
... | 2.827586 | 87 |
# -*- coding: utf-8 -*-
# Scrapy settings for GubaCrawler project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'GubaCrawler'
SPIDER_MODULES = ['GubaCrawler.spiders']
NEWSPIDER_MODULE = 'GubaCrawler.spiders'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 20
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 0.0001
# DOWNLOAD_TIMEOUT = 10
# The download delay setting will honor only one of:
CONCURRENT_REQUESTS_PER_DOMAIN = 32
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
# 'GubaDC.middlewares.GubadcSpiderMiddleware': 543,
}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
'GubaCrawler.middlewares.GubaDownloaderMiddleware': 843,
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': None,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# # 'scrapy.extensions.telnet.TelnetConsole': None,
# # 'scrapy.extensions.throttle.AutoThrottle': 561,
# }
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'GubaCrawler.pipelines.GubaPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True
# The initial download delay
AUTOTHROTTLE_START_DELAY = 0.0001
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
AUTOTHROTTLE_TARGET_CONCURRENCY = 20
# Enable showing throttling stats for every response received:
AUTOTHROTTLE_DEBUG = True
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 1
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'
REDIRECT_ENABLED = False
DUPEFILTER_CLASS = "scrapy_splash.SplashAwareDupeFilter"
LOG_LEVEL = 'INFO'
SPLASH_URL = "http://localhost:8050/"
# JOBDIR = '/home/alex/桌面/Python/Project/Spider_Project/GubaCrawler/GubaCrawler/job_info'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
1446,
2416,
88,
6460,
329,
402,
22013,
34,
39464,
1628,
198,
2,
198,
2,
1114,
21654,
11,
428,
2393,
4909,
691,
6460,
3177,
1593,
393,
198,
2,
8811,
973,
13,
... | 2.733071 | 1,270 |
# 기록장 (progress)
import pickle
from datetime import *
import os.path
# 작성함수
# 파일이 존재하는가?
print("-- Progress --")
clock = datetime.now()
line = 0
file = "File/progress_{}.md".format(clock.day) # 파일명 초기화
texts = ''
while True:
line += 1
text = input("%d: "%line)
if text == "q": # 저장 후 종료
Save(texts, file)
exit()
if text == "q!": # 저장하지 않고 종료
isfile = Isfile(file)
if isfile == 1:
exit()
else:
print("=> 저장을 하지 말까요? <=")
dist = str(input("y/n: "))
if dist == "y":
exit()
else:
Save(texts, file)
isfile = Isfile(file)
if isfile == 1:
print("=> 저장되었어요 <=")
exit()
else:
print("=> Error <=")
exit()
l = text.split() # Str -> List
l[-1] += "\n"
text = ' '.join(l)
texts += text
Save(texts, file) | [
2,
220,
166,
116,
108,
167,
94,
251,
168,
252,
98,
357,
33723,
8,
198,
11748,
2298,
293,
198,
6738,
4818,
8079,
1330,
1635,
198,
11748,
28686,
13,
6978,
198,
2,
23821,
252,
239,
168,
226,
109,
47991,
101,
168,
230,
246,
198,
2,
... | 1.476048 | 668 |
from typing import List
from project.medicine.medicine import Medicine
from project.supply.supply import Supply
from project.survivor import Survivor
| [
6738,
19720,
1330,
7343,
198,
198,
6738,
1628,
13,
1150,
291,
500,
13,
1150,
291,
500,
1330,
11558,
198,
6738,
1628,
13,
18608,
306,
13,
18608,
306,
1330,
22663,
198,
6738,
1628,
13,
48846,
452,
273,
1330,
23740,
628
] | 3.897436 | 39 |
if __name__ == '__main__':
main() | [
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419
] | 2.157895 | 19 |
'''Author: Brandon Trabucco, Copyright 2019
Helper functions to display and run a simple game'''
from game_engine.colors import *
from game_engine.drawable import Drawable
from game_engine.entity import Entity
from game_engine.interactables import *
####################################
# lets make some game tiles to use #
####################################
| [
7061,
6,
13838,
25,
14328,
833,
397,
18863,
78,
11,
15069,
13130,
198,
47429,
5499,
284,
3359,
290,
1057,
257,
2829,
983,
7061,
6,
628,
198,
6738,
983,
62,
18392,
13,
4033,
669,
1330,
1635,
198,
6738,
983,
62,
18392,
13,
19334,
540,... | 3.796117 | 103 |
"""This module comprises PatchCore Sampling Methods for the embedding.
- k Center Greedy Method
Returns points that minimizes the maximum distance of any point to a center.
. https://arxiv.org/abs/1708.00489
"""
from typing import List, Optional
import torch
import torch.nn.functional as F
from torch import Tensor
from anomalib.core.model.random_projection import SparseRandomProjection
class KCenterGreedy:
"""Implements k-center-greedy method.
Args:
embedding (Tensor): Embedding vector extracted from a CNN
sampling_ratio (float): Ratio to choose coreset size from the embedding size.
Example:
>>> embedding.shape
torch.Size([219520, 1536])
>>> sampler = KCenterGreedy(embedding=embedding)
>>> sampled_idxs = sampler.select_coreset_idxs()
>>> coreset = embedding[sampled_idxs]
>>> coreset.shape
torch.Size([219, 1536])
"""
def reset_distances(self) -> None:
"""Reset minimum distances."""
self.min_distances = None
def update_distances(self, cluster_centers: List[int]) -> None:
"""Update min distances given cluster centers.
Args:
cluster_centers (List[int]): indices of cluster centers
"""
if cluster_centers:
centers = self.features[cluster_centers]
distance = F.pairwise_distance(self.features, centers, p=2).reshape(-1, 1)
if self.min_distances is None:
self.min_distances = distance
else:
self.min_distances = torch.minimum(self.min_distances, distance)
def get_new_idx(self) -> int:
"""Get index value of a sample.
Based on minimum distance of the cluster
Returns:
int: Sample index
"""
if isinstance(self.min_distances, Tensor):
idx = int(torch.argmax(self.min_distances).item())
else:
raise ValueError(f"self.min_distances must be of type Tensor. Got {type(self.min_distances)}")
return idx
def select_coreset_idxs(self, selected_idxs: Optional[List[int]] = None) -> List[int]:
"""Greedily form a coreset to minimize the maximum distance of a cluster.
Args:
selected_idxs: index of samples already selected. Defaults to an empty set.
Returns:
indices of samples selected to minimize distance to cluster centers
"""
if selected_idxs is None:
selected_idxs = []
if self.embedding.ndim == 2:
self.model.fit(self.embedding)
self.features = self.model.transform(self.embedding)
self.reset_distances()
else:
self.features = self.embedding.reshape(self.embedding.shape[0], -1)
self.update_distances(cluster_centers=selected_idxs)
selected_coreset_idxs: List[int] = []
idx = int(torch.randint(high=self.n_observations, size=(1,)).item())
for _ in range(self.coreset_size):
self.update_distances(cluster_centers=[idx])
idx = self.get_new_idx()
if idx in selected_idxs:
raise ValueError("New indices should not be in selected indices.")
self.min_distances[idx] = 0
selected_coreset_idxs.append(idx)
return selected_coreset_idxs
def sample_coreset(self, selected_idxs: Optional[List[int]] = None) -> Tensor:
"""Select coreset from the embedding.
Args:
selected_idxs: index of samples already selected. Defaults to an empty set.
Returns:
Tensor: Output coreset
Example:
>>> embedding.shape
torch.Size([219520, 1536])
>>> sampler = KCenterGreedy(...)
>>> coreset = sampler.sample_coreset()
>>> coreset.shape
torch.Size([219, 1536])
"""
idxs = self.select_coreset_idxs(selected_idxs)
coreset = self.embedding[idxs]
return coreset
| [
37811,
1212,
8265,
28800,
17106,
14055,
3409,
11347,
25458,
329,
262,
11525,
12083,
13,
198,
198,
12,
479,
3337,
11955,
4716,
11789,
198,
220,
220,
220,
16409,
2173,
326,
10356,
4340,
262,
5415,
5253,
286,
597,
966,
284,
257,
3641,
13,
... | 2.321614 | 1,735 |
# -*- coding: utf-8 -*-
## File = 'myTime.py"
#
# A python script to return the time and
# date from the computer when called.
# Written by Merrill Jenkins 2014Dec17
# Department of Physics
# University of South Alabama
# Mobile, AL 36688
# Used "Core Python Programmaing", Page 49
# And "Python Essential Reference", Page 405
##
## Modified by cmj2021May11 to remove tabs for blocked statements
#
# To use:
# import myTime *
# t = myTime()
# t.getComputerTime() # call at instant you want time
# t.getCalendarDate() # returns string with calandar date
# t.getClockTime() # returns as string with the clock time
# To test, just run this file (with stand alone test program at the end)
# python myTime.py
# Other functions available:
# getYear() # returns string with year
# getMon() # returns string with month
# getDay() # returns string with day
# getHour() # returns string with hour
# getSec() # returns string with second
# getDayOfWeek() # returns string with the day of the week
# getTimeZone() # returns string with the time zone
# getInternationalCalendarDate() # returns string with calendar date and time zone
# getInternationalClockTime() # returns string with time and time zone
# getTimeForSavedFiles() # returns string with calandar date and time
# # in format to use with file name
# getInternationalTimeForSavedFiles() # returns string with calandar date,
# # time and time zone in format to us
# # with file name
#
## To transmit any changes to the dependent
## python scripts, complete these steps in the
## Utilities directory:
## > rm Utilities.zip
## > zip -r Utilities.zip *.py
## Modified by cmj2021Mar1.... Convert from python2 to python3: 2to3 -w *.py
## Modified by cmj2021Mar1.... replace dataloader with dataloader3
## Modified by cmj2021May11... replace tabs with spaces for block statements to convert to python 3
#
#!/bin/env python
from time import *
#
# Get the computer time at the instant this method is called
# This time is decoded into useful strings with the other methods
#
# Test as stand alone.... This program only runs if this script is run in the command line:
# python myTime.py
if __name__ == "__main__":
whatTimeIsIt = myTime()
whatTimeIsIt.getComputerTime()
print('Date = %s' % whatTimeIsIt.getDate())
print('Year = %s' % whatTimeIsIt.getYear())
print('Month = %s' % whatTimeIsIt.getMonth())
print('Day = %s' % whatTimeIsIt.getDay())
print('Hour = %s' % whatTimeIsIt.getHour())
print('Minute = %s' % whatTimeIsIt.getMin())
print('Second = %s' % whatTimeIsIt.getSec())
print('Day of the Week = %s' % whatTimeIsIt.getDayOfWeek())
print('TimeZone = %s' % whatTimeIsIt.getTimeZone())
print() ; print()
print('------------------------- ')
print('Calendar = %s' % whatTimeIsIt.getCalendarDate())
print('Calendar with Day of Week = %s' % whatTimeIsIt.getCalendarDateWithDay())
print('Clock Time = %s' % whatTimeIsIt.getClockTime())
print('International Clock Time = %s' % whatTimeIsIt.getInternationalClockTime())
print('FileNameFragment = %s' % whatTimeIsIt.getTimeForSavedFiles())
print('FileNameFragment (with Time Zone) = %s' % whatTimeIsIt.getInternationalTimeForSavedFiles())
## Test that clock is updated after each call:
from time import sleep
print(); print(); print(' ============================ ')
for mm in range(1,10):
whatTimeIsIt.getComputerTime()
myDate = whatTimeIsIt.getCalendarDateWithDay()
myTime = whatTimeIsIt.getInternationalClockTime()
print('%s Clock Time = %s' %(myDate,myTime))
sleep(1.0) # ask the script to sleep for one second between iterations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2235,
9220,
796,
705,
1820,
7575,
13,
9078,
1,
198,
2,
198,
2,
220,
220,
220,
220,
220,
317,
21015,
4226,
284,
1441,
262,
640,
290,
220,
198,
2,
220,
220,
220,
220... | 2.43379 | 1,752 |
#!/usr/bin/python
import execnet, execnet.gateway, execnet.multi
execnet.multi.Group.makeportgateway = makeportgateway
execnet.makeportgateway = execnet.multi.default_group.makeportgateway
# originally posted as http://code.activestate.com/recipes/577545-monkey-patch-execnet-with-more-ssh-settings-port-i/
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
2452,
3262,
11,
2452,
3262,
13,
10494,
1014,
11,
2452,
3262,
13,
41684,
198,
18558,
3262,
13,
41684,
13,
13247,
13,
15883,
634,
10494,
1014,
796,
787,
634,
10494,
1014,
198,
18558,
326... | 2.990291 | 103 |
src_lang = ["ja_JP","ja"]
import os
from langs import langs, same
cd=os.path.join(os.getcwd(),"language")
#cd = "/home/maruo/ドキュメント/program/Marutools/language"
import babel, googletrans, json, os, sys
os.chdir(cd)
if input(f'The lang file will be save at "{cd}". Is it OK? [y/n]')!="y": exit()
translator=googletrans.Translator()
src = json.load(open(src_lang[0]+".lang","r", encoding="utf8"))
for glang, slangs in same.items():
try:
print(glang, end="", flush=True)
translate(slangs[0], glang)
except:
print("...error!!")
else:
print("...done") | [
10677,
62,
17204,
796,
14631,
6592,
62,
12889,
2430,
6592,
8973,
198,
11748,
28686,
198,
6738,
300,
27725,
1330,
300,
27725,
11,
976,
198,
10210,
28,
418,
13,
6978,
13,
22179,
7,
418,
13,
1136,
66,
16993,
3419,
553,
16129,
4943,
198,
... | 2.303502 | 257 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from matplotlib.cbook import get_sample_data
import matplotlib.patches as mpatches
def add_legends_val_fuel(figure, save):
'''
This function adds legends to the assembly model validation problem mesh.
Parameters:
-----------
figure: [string]
name of figure to add legends
save: [string]
name of the new figure
'''
red = mpatches.Patch(color=(1., 0., 0.), label='Fuel')
green = mpatches.Patch(color=(0., 1., 0.), label='Gap')
gray = mpatches.Patch(color=(0.63, 0.63, 0.63), label='Moderator')
yellow = mpatches.Patch(color=(1., 1., 0.), label='Film')
blue = mpatches.Patch(color=(0., 0., 1.), label='Coolant')
cwd = os.getcwd()
fname = get_sample_data('{0}/{1}.png'.format(cwd, figure))
im = plt.imread(fname)
plt.imshow(im)
plt.legend(handles=[red, green, gray, yellow, blue], loc="lower right")
plt.text(x=3, y=616, s='1', fontsize=15, color='w')
plt.text(x=3, y=490, s='2', fontsize=16, color='w')
plt.text(x=3, y=365, s='3', fontsize=16, color='w')
plt.text(x=3, y=235, s='4', fontsize=16, color='w')
plt.text(x=3, y=110, s='5', fontsize=16, color='w')
plt.text(x=90, y=550, s='6', fontsize=16, color='w')
plt.text(x=102, y=430, s='7', fontsize=16, color='w')
plt.text(x=102, y=300, s='8', fontsize=16, color='w')
plt.text(x=102, y=174, s='9', fontsize=16, color='w')
plt.text(x=170, y=360, s='10', fontsize=16, color='w')
plt.text(x=200, y=235, s='11', fontsize=16, color='w')
plt.text(x=200, y=108, s='12', fontsize=16, color='w')
plt.text(x=280, y=170, s='13', fontsize=16, color='w')
plt.text(x=170, y=0, s='gap', fontsize=16, color='black')
plt.text(x=-20, y=780, s='A', fontsize=20, color='black')
plt.text(x=-20, y=0, s='B', fontsize=20, color='black')
plt.text(x=420, y=0, s='C', fontsize=20, color='black')
plt.axis('off')
plt.savefig(save, dpi=300, bbox_inches="tight")
def plot_val_assem_results():
'''
Plots assembly model results.
Two plots:
- Temperature on line AB
- Temperature on line AC
Includes case with no gap and case with 3mm gap.
'''
plt.figure()
file = 'input_lineAB_0002.csv'
file = pd.read_csv(file)
d = file['y'].tolist()
temp = file['temp'].tolist()
plt.plot(d, temp, label='0 mm')
file = 'input-g_lineAB_0002.csv'
file = pd.read_csv(file)
d = file['y'].tolist()
temp = file['temp'].tolist()
plt.plot(d, temp, label='3 mm')
plt.legend(loc='upper right', fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(np.linspace(900, 1050, 6), fontsize=14)
plt.xlabel('Distance from point A [cm]', fontsize=14)
plt.ylabel(r'Temperature [$^{\circ}$C]', fontsize=14)
plt.savefig('val-assem-line-AB', dpi=300, bbox_inches="tight")
plt.close()
plt.figure()
file = 'input_lineAC_0002.csv'
file = pd.read_csv(file)
d = file['y'].tolist()
temp = file['temp'].tolist()
plt.plot(d, temp, label='0 mm')
file = 'input-g_lineAC_0002.csv'
file = pd.read_csv(file)
d = file['y'].tolist()
temp = file['temp'].tolist()
plt.plot(d, temp, label='3 mm')
plt.legend(loc='upper right', fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel('Distance from point A [cm]', fontsize=14)
plt.ylabel(r'Temperature [$^{\circ}$C]', fontsize=14)
plt.savefig('val-assem-line-AC', dpi=300, bbox_inches="tight")
plt.close()
if __name__ == "__main__":
# adds legend to mesh figure
add_legends_val_fuel('mesh', 'val-assem-mesh')
# plot results
plot_val_assem_results()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
28686,
198,
6738,
2603,
29487,
8019,
13,
66,
2070,
1330,
651,
62,
39873,
62,
7890,
198,
117... | 2.187169 | 1,699 |
# proxy module
from __future__ import absolute_import
from tvtk.indenter import *
| [
2,
15741,
8265,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
31557,
30488,
13,
521,
9255,
1330,
1635,
198
] | 3.727273 | 22 |
import combination_builder as Combine
# 1. Set Values and Initialize a Combinations Object
map_filepath = "Example_Files\\ExamplePlatemap.txt"
concentration_file = "Example_Files\\Example_Final_Concs.csv"
save_filepath = "Example_Files\\ExampleOutput3.csv"
cmt_filepath = "Example_Files\\ExampleOutput4.cmt"
backfill_wells = Combine.generate_well_range("A21", "P24")
control_wells = Combine.generate_well_range("A1","P2")
control_wells.extend(Combine.generate_well_range("A13","P14"))
static_transfer_volume = 100
assay_volume = 30
combination_max = 3
substance_id_regex = r'SJ[0-9-]+'
# Initialize the object
exp = Combine.Combinations()
# 2. Load the plate map
exp.load_platemap(map_filepath, substance_id_regex)
# 3. Setup the backfill wells - Comment/Uncomment as needed
# Option 1: Manually supply a list of wells
# This is fine for a small number of wells
# wells = ["A21", "A22", "A23", "A24", "B21", "B22", "B23", "B24"]
# Option 2: Generate well list from start and stop wells
# This option is good for a large number of wells
# List comprehension is required to get well alphas
wells = [x[0] for x in backfill_wells]
# Set backfill wells is specific to individual plates
# Repeat for all plates with backfill wells
exp.platemap.plates["E3P00000776"].set_backfill_wells(wells)
# 4. Set up Combinations - Comment/Uncomment as needed
# Option 1: Supply a manually curated list of combinations
# List compounds in separate columns, any number of
# columns is supported, header and any compound not
# in the platemap are skipped
# combinations_filepath = "Combination Template.csv"
# exp.load_platemap(combinations_filepath)
# Option 2: Calculate all permutations in the script
# Specify how many compounds to include in each combination
exp.generate_combinations(combination_max)
# 5. Set transfer volume or assay conditions
# Option 1: Set a static volume for all substances
# Volume is in nanoliters - All combinations will be
# the 1:1:1 volume ratios
# exp.set_transfer_volume(static_transfer_volume)
# Option 2: Set assay volume and assay concentration
# Assay volume is in microliters
# Assay concentration(s) must be supplied
exp.set_assay_volume(assay_volume)
# Set a constant concentration for all substances
# exp.set_assay_concentration(conc=50, unit="mM")
# Or set each concentration idependently with a csv file
exp.set_assay_concentration(file=concentration_file)
# 6. Configure assay plate layout
exp.reserve_control_wells([w[0] for w in control_wells])
# 7. Create the transfer list
exp.create_transfers()
# 8. Sort transfer list for optimized transfer speed
exp.sort_transfers()
# 9. Save transfer list - Echo formatted CSV file
exp.save_transfers(save_filepath)
# 10. Save *.cmt file - Screener Mapping File
# OPTIONAL - Set replicate number to create replicate
# plates with the same plate mapping and concentrations
exp.save_cmt(cmt_filepath, 3)
# IN A NEW SESSION
# After using the Echo CSV to transfer samples
#
# 11. Update CMT with barcodes after performing transfers
import src.combination_builder.Combinations as Combine
cmt_filepath = "Example_Files\\ExampleOutput4.cmt"
barcode_filepath = "Example_Files\\Barcode_List.csv"
# Update barcodes
Combine.update_CMT_barcodes(cmt_filepath, barcode_filepath)
| [
11748,
6087,
62,
38272,
355,
29176,
628,
198,
2,
352,
13,
5345,
27068,
290,
20768,
1096,
257,
14336,
7352,
9515,
198,
8899,
62,
7753,
6978,
796,
366,
16281,
62,
25876,
6852,
16281,
3646,
23900,
499,
13,
14116,
1,
198,
1102,
1087,
1358... | 2.983275 | 1,136 |
from .feature import assert_feature_mapping, Feature
__all__ = [
'Backness',
'BacknessCategory',
]
class Backness(Feature):
"""
https://en.wikipedia.org/wiki/International_Phonetic_Alphabet#Vowels
"""
FRONT = 'front'
NEAR_FRONT = 'near-front'
CENTRAL = 'central'
NEAR_BACK = 'near-back'
BACK = 'back'
BACKNESS_TO_CATEGORY = assert_feature_mapping({
Backness.FRONT: BacknessCategory.ABOUT_FRONT,
Backness.NEAR_FRONT: BacknessCategory.ABOUT_FRONT,
Backness.CENTRAL: BacknessCategory.ABOUT_CENTRAL,
Backness.NEAR_BACK: BacknessCategory.ABOUT_BACK,
Backness.BACK: BacknessCategory.ABOUT_BACK,
})
| [
6738,
764,
30053,
1330,
6818,
62,
30053,
62,
76,
5912,
11,
27018,
198,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
705,
7282,
1108,
3256,
198,
220,
220,
220,
705,
7282,
1108,
27313,
3256,
198,
60,
628,
198,
198,
4871,
5157,
... | 2.486792 | 265 |
# coding: utf-8
import sys
import os
from datetime import datetime
import time
from mylibs import my_fileList
from mylibs import my_csv
if __name__ == "__main__":
main()
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
640,
198,
6738,
616,
8019,
82,
1330,
616,
62,
7753,
8053,
198,
6738,
616,
8019,
82,
1330,
616,
62,
40664... | 2.854839 | 62 |
from tdlogging.tdlogger import TDLogger
from tdlogging.tdprinter import TDPrinter, BoxPrinter, OneLinerPrinter, CoolPrinter
logger = TDLogger(alias="My Custom Logger", printer=CoolPrinter()).config()
| [
6738,
41560,
6404,
2667,
13,
8671,
6404,
1362,
1330,
13320,
11187,
1362,
198,
6738,
41560,
6404,
2667,
13,
8671,
1050,
3849,
1330,
309,
6322,
81,
3849,
11,
8315,
6836,
3849,
11,
1881,
43,
7274,
6836,
3849,
11,
15226,
6836,
3849,
198,
... | 3.029851 | 67 |
"""
subscribe.py
Subscribe to TACC apis
"""
import getpass
import requests
from .exceptions import AgaveClientError
from ..utils import handle_bad_response_status_code
def clients_subscribe(username, client_name, tenant_url,
api_name, api_version, api_provider):
""" Subscribe an oauth client to an api
"""
# Set request endpoint.
endpoint = "{}/clients/v2/{}/subscriptions".format(tenant_url, client_name)
# Get user's password.
passwd = getpass.getpass(prompt="API password: ")
# Make sure client_name is valid.
if client_name == "" or client_name is None:
raise AgaveClientError("Error creating client: invalid client_name")
# Make request.
try:
data = {
"apiName": api_name,
"apiVersion": api_version,
"apiProvider": api_provider,
}
resp = requests.post(endpoint, data=data, auth=(username, passwd))
del passwd
except Exception as err:
del passwd
raise AgaveClientError(err)
# Handle bad status code.
handle_bad_response_status_code(resp)
| [
37811,
198,
220,
220,
220,
12383,
13,
9078,
198,
198,
27125,
284,
309,
26861,
2471,
271,
198,
37811,
198,
11748,
651,
6603,
198,
11748,
7007,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.26306 | 536 |
import argparse
import os
import tensorflow as tf
import numpy as np
from avatar.util.LoadRelgan import LoadRelgan
from avatar.util.MHGAN import MHGAN
from avatar.util.util import writeToFile, readTraces
from conf.settings import DATA_PATH
WORK_PATH = os.path.abspath(os.getcwd())
if __name__ == "__main__":
np.random.seed(seed=1234)
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--system', help='System to evaluate (e.g. pa_system_2_3)', required=True)
parser.add_argument('-j', '--job', help='0 = beta 100, 1 = beta 1000', required=True)
parser.add_argument('-sfx', '--suffix', help='Which suffix, i.e. final epoch of the trained SGAN to use?', required=True)
parser.add_argument('-gpu', '--gpu', help='GPU on which the training is performed. For example 0.', required=True)
""" Selector """
parser.add_argument('-strategy', '--strategy', help='select "naive" or "mh"', required=True)
""" Parameter for Naively Sampling """
parser.add_argument('-n_n', '--n_samples', help='(NAIVE ONLY) Number of samples to generate? (Default: 10000)', default=10000)
""" Parameter for MH Sampling """
parser.add_argument('-mh_c', '--mh_count', help='(MH ONLY) Number of samples per batch? (Default: 50)',
default=50)
parser.add_argument('-mh_p', '--mh_patience',
help='(MH ONLY) Patience parameter (Default: 5)',
default=5)
parser.add_argument('-mh_k', '--mh_k',
help='(MH ONLY) Length of Markov chain (Default: 500)',
default=500)
parser.add_argument('-mh_mi', '--mh_maxiter',
help='(MH ONLY) Max sampling iterations? (Default: 200)',
default=200)
args = parser.parse_args()
system = args.system
job = int(args.job)
suffix = args.suffix
strategy = args.strategy
n_samples = int(args.n_samples)
mh_count = int(args.mh_count)
mh_patience = int(args.mh_patience)
mh_k = int(args.mh_count)
mh_maxiter = int(args.mh_maxiter)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if strategy == "naive":
tf.reset_default_graph()
print("****** SAMPLE FOR SUFFIX ", suffix, " ******")
relgan = LoadRelgan(system=system, suffix=suffix, job=job)
if DATA_PATH is None:
f_out = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_naive.txt")
else:
f_out = os.path.join(DATA_PATH, "avatar", "variants",system + "_relgan_" + str(suffix) + "_j" + str(job) + "_naive.txt")
print("Start NAIVE SAMPLING")
gen_samples = relgan.generate(n_samples=n_samples)
print("Generated samples - shape:", gen_samples.shape)
print("Writing to file", f_out)
writeToFile(relgan, f_out, gen_samples)
elif strategy == "mh":
if DATA_PATH is None:
eval_path = os.path.join(WORK_PATH, "data", "avatar", "train_data", system + "_eval.txt")
f_out = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_mh.txt")
else:
eval_path = os.path.join(DATA_PATH, "avatar", "train_data", system + "_eval.txt")
f_out = os.path.join(DATA_PATH, "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_mh.txt")
tf.reset_default_graph()
print("****** SAMPLE FOR SUFFIX ", suffix, " ******")
relgan = LoadRelgan(system=system, suffix=suffix, job=job)
calibrate = readTraces(eval_path)
calibrate = relgan.prep(calibrate)
mhgan = MHGAN(relgan, c=mh_count, k=mh_k, real_samples=calibrate)
samples = None
gen_size = 0
iter = 1
cnt_patience = 0
continue_sampling = True
print("Start MH SAMPLING")
while continue_sampling:
print("**** MH-GAN Iteration", iter, ":")
gen_samples, accepts, rejects = mhgan.generate_enhanced(
relgan.sess,
count=mh_count,
k=mh_k
)
if samples is None:
samples = gen_samples
else:
samples = np.concatenate([samples, gen_samples], axis=0)
samples = np.unique(samples, axis=0)
if gen_size != samples.shape[0]:
cnt_patience = 0
else:
cnt_patience += 1
gen_size = samples.shape[0]
print("Generated samples (cumulative): ", gen_size)
iter += 1
if cnt_patience >= mh_patience:
continue_sampling = False
if mh_maxiter != -1 and iter >= mh_maxiter:
continue_sampling = False
print("Generated samples - shape:", samples.shape)
print("Writing to file", f_out)
writeToFile(relgan, f_out, samples)
else:
raise ValueError("Unknown sampling strategy.")
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
30919,
13,
22602,
13,
8912,
6892,
1030,
1330,
8778,
6892,
1030,
198,
6738,
30919,
13,
22602,
13,
36208,
... | 2.166595 | 2,329 |
# bca4abm
# See full license in LICENSE.txt.
import logging
import os
import pandas as pd
from activitysim.core import config
from activitysim.core import inject
from activitysim.core import tracing
from activitysim.core import assign
from bca4abm import bca4abm as bca
from ...util.misc import add_result_columns, add_summary_results
logger = logging.getLogger(__name__)
"""
auto ownership processor
"""
@inject.injectable()
@inject.injectable()
@inject.step()
def auto_ownership_processor(
persons_merged,
auto_ownership_spec,
auto_ownership_settings,
coc_column_names,
chunk_size,
trace_hh_id):
"""
Compute auto ownership benefits
"""
persons_df = persons_merged.to_frame()
logger.info("Running auto_ownership_processor with %d persons (chunk size = %s)"
% (len(persons_df), chunk_size))
locals_dict = config.get_model_constants(auto_ownership_settings)
locals_dict.update(config.setting('globals'))
trace_rows = trace_hh_id and persons_df['household_id'] == trace_hh_id
coc_summary, trace_results, trace_assigned_locals = \
bca.eval_and_sum(assignment_expressions=auto_ownership_spec,
df=persons_df,
locals_dict=locals_dict,
df_alias='persons',
group_by_column_names=coc_column_names,
chunk_size=chunk_size,
trace_rows=trace_rows)
result_prefix = 'AO_'
add_result_columns("coc_results", coc_summary, result_prefix)
add_summary_results(coc_summary, prefix=result_prefix, spec=auto_ownership_spec)
if trace_hh_id:
if trace_results is not None:
tracing.write_csv(trace_results,
file_name="auto_ownership",
index_label='person_id',
column_labels=['label', 'person'])
if trace_assigned_locals:
tracing.write_csv(trace_assigned_locals, file_name="auto_ownership_locals")
| [
2,
275,
6888,
19,
397,
76,
198,
2,
4091,
1336,
5964,
287,
38559,
24290,
13,
14116,
13,
198,
198,
11748,
18931,
198,
198,
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
3842,
14323,
13,
7295,
1330,
4566,
198,
673... | 2.192469 | 956 |
from .helper import helper
| [
6738,
764,
2978,
525,
1330,
31904,
198
] | 3.857143 | 7 |
import re
import nltk
import spacy
import unicodedata
import requests
from spacy_syllables import SpacySyllables
from bs4 import BeautifulSoup
from nltk import TweetTokenizer
from spacy.lang.es import Spanish
from spacy.lang.en import English
from nltk.util import ngrams
if __name__ == '__main__':
tp_es = TextProcessing(lang='es')
result_es = tp_es.nlp(
'Ahora a la gente todo le parece tóxico, más si dices lo que sientes o te molesta…y NO, tóxico es quedarse '
'callado por miedo a arruinar algo. Hay que aprender a quererse primero.')
for i in result_es:
print(i)
tp_en = TextProcessing(lang='en')
result_en = tp_en.nlp("The data doesn’t lie: here's what one of our teams learned when they tried a 4-day workweek.")
for i in result_en:
print(i)
| [
11748,
302,
198,
11748,
299,
2528,
74,
198,
11748,
599,
1590,
198,
11748,
28000,
9043,
1045,
198,
11748,
7007,
198,
6738,
599,
1590,
62,
1837,
297,
2977,
1330,
1338,
1590,
13940,
297,
2977,
198,
6738,
275,
82,
19,
1330,
23762,
50,
104... | 2.523364 | 321 |
"""
Revision ID: 0208_fix_unique_index
Revises: 0207_set_callback_history_type
Create Date: 2018-07-25 13:55:24.941794
"""
from alembic import op
revision = '84c3b6eb16b3'
down_revision = '0207_set_callback_history_type'
| [
37811,
198,
198,
18009,
1166,
4522,
25,
657,
21315,
62,
13049,
62,
34642,
62,
9630,
198,
18009,
2696,
25,
657,
22745,
62,
2617,
62,
47423,
62,
23569,
62,
4906,
198,
16447,
7536,
25,
2864,
12,
2998,
12,
1495,
1511,
25,
2816,
25,
1731... | 2.430108 | 93 |
while True:
try:
n = int(input())
except EOFError: break
lim = int(n // 2)
for i in range(1, n+1, 2):
print(' ' * lim, end='')
lim -= 1
print('*' * i)
print((int(n // 2) * ' ') + '*')
print((int(n // 2) - 1) * ' ' + '***')
print()
| [
4514,
6407,
25,
198,
220,
220,
220,
1949,
25,
198,
220,
220,
220,
220,
220,
220,
220,
299,
796,
493,
7,
15414,
28955,
198,
220,
220,
220,
2845,
412,
19238,
12331,
25,
2270,
198,
220,
220,
220,
1761,
796,
493,
7,
77,
3373,
362,
8... | 1.877419 | 155 |
import sys, os, time
from selenium import webdriver
from selenium.webdriver.chrome.options import *
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
import time
from pynput import keyboard as kb
from pynput.keyboard import Key, Listener
from pynput.keyboard import Controller as kb2
kb = kb2()
driver = None
listener = Listener(on_release=wait_for_esc)
listener.start()
if __name__ == "__main__":
initialize_driver()
while True:
start_detection()
else:
import app_terminator
| [
201,
198,
11748,
25064,
11,
28686,
11,
640,
201,
198,
201,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
201,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
46659,
13,
25811,
1330,
1635,
201,
198,
6738,
384,
11925,
1505,
13,
123... | 2.61753 | 251 |
from six import add_metaclass
from abc import ABCMeta
from abc import abstractmethod
import numpy
import math
@add_metaclass(ABCMeta)
| [
6738,
2237,
1330,
751,
62,
4164,
330,
31172,
198,
6738,
450,
66,
1330,
9738,
48526,
198,
6738,
450,
66,
1330,
12531,
24396,
198,
198,
11748,
299,
32152,
198,
11748,
10688,
628,
198,
31,
2860,
62,
4164,
330,
31172,
7,
24694,
48526,
8,
... | 3.186047 | 43 |
import re
ABCD = 'abcd'
copy_regex = r'cpy (\S+) (\S+)'
inc_regex = r'inc ([abcd])'
dec_regex = r'dec ([abcd])'
jump_regex = r'jnz (\S+) (-?\d+)'
| [
11748,
302,
198,
198,
24694,
35,
796,
705,
397,
10210,
6,
198,
30073,
62,
260,
25636,
796,
374,
6,
66,
9078,
357,
59,
50,
28988,
357,
59,
50,
28988,
6,
198,
1939,
62,
260,
25636,
796,
374,
6,
1939,
29565,
397,
10210,
12962,
6,
1... | 1.873418 | 79 |
from flask import (
render_template,
jsonify,
Blueprint,
redirect,
url_for,
flash,
request,
session,
send_file,
send_from_directory,
current_app as app,
)
from flask_socketio import leave_room, join_room
from flask_login import login_required, current_user, logout_user, login_user
from app.models import users, questions, events
from app.user.forms import RegisterForm, LoginForm
from app import db, socketio, bcrypt
from sqlalchemy import and_, func
import random
import json
import os
user = Blueprint("user", __name__)
@user.route("/")
@user.route("/register", methods=["GET", "POST"])
@user.route("/login", methods=["GET", "POST"])
@user.route("/quiz")
@login_required
@socketio.on("connect", namespace="/quiz")
@socketio.on("disconnect", namespace="/quiz")
@socketio.on("fetch_questions", namespace="/quiz")
@socketio.on("update_time", namespace="/time")
@socketio.on("submit_answer", namespace="/quiz")
@user.route("/logout")
@login_required
@user.route("/finish")
@login_required
| [
6738,
42903,
1330,
357,
198,
220,
220,
220,
8543,
62,
28243,
11,
198,
220,
220,
220,
33918,
1958,
11,
198,
220,
220,
220,
39932,
11,
198,
220,
220,
220,
18941,
11,
198,
220,
220,
220,
19016,
62,
1640,
11,
198,
220,
220,
220,
7644,... | 2.869565 | 368 |
# XXX: Don't put a newline here, or it will add an extra line with
# isympy --help
# |
# v
"""Python shell for SymPy.
This is just a normal Python shell (IPython shell if you have the
IPython package installed), that executes the following commands for
the user:
>>> from __future__ import division
>>> from sympy import *
>>> x, y, z, t = symbols('x y z t')
>>> k, m, n = symbols('k m n', integer=True)
>>> f, g, h = symbols('f g h', cls=Function)
>>> init_printing()
So starting 'isympy' is equivalent to starting Python (or IPython) and
executing the above commands by hand. It is intended for easy and quick
experimentation with SymPy. isympy is a good way to use SymPy as an
interactive calculator. If you have IPython and Matplotlib installed, then
interactive plotting is enabled by default.
COMMAND LINE OPTIONS
--------------------
-c CONSOLE, --console=CONSOLE
Use the specified shell (Python or IPython) shell as the console
backend instead of the default one (IPython if present, Python
otherwise), e.g.:
$isympy -c python
CONSOLE must be one of 'ipython' or 'python'
-p PRETTY, --pretty PRETTY
Setup pretty-printing in SymPy. When pretty-printing is enabled,
expressions can be printed with Unicode or ASCII. The default is
to use pretty-printing (with Unicode if the terminal supports it).
When this option is 'no', expressions will not be pretty-printed
and ASCII will be used:
$isympy -p no
PRETTY must be one of 'unicode', 'ascii', or 'no'
-t TYPES, --types=TYPES
Setup the ground types for the polys. By default, gmpy ground types
are used if gmpy2 or gmpy is installed, otherwise it falls back to python
ground types, which are a little bit slower. You can manually
choose python ground types even if gmpy is installed (e.g., for
testing purposes):
$isympy -t python
TYPES must be one of 'gmpy', 'gmpy1' or 'python'
Note that the ground type gmpy1 is primarily intended for testing; it
forces the use of gmpy version 1 even if gmpy2 is available.
This is the same as setting the environment variable
SYMPY_GROUND_TYPES to the given ground type (e.g.,
SYMPY_GROUND_TYPES='gmpy')
The ground types can be determined interactively from the variable
sympy.polys.domains.GROUND_TYPES.
-o ORDER, --order ORDER
Setup the ordering of terms for printing. The default is lex, which
orders terms lexicographically (e.g., x**2 + x + 1). You can choose
other orderings, such as rev-lex, which will use reverse
lexicographic ordering (e.g., 1 + x + x**2):
$isympy -o rev-lex
ORDER must be one of 'lex', 'rev-lex', 'grlex', 'rev-grlex',
'grevlex', 'rev-grevlex', 'old', or 'none'.
Note that for very large expressions, ORDER='none' may speed up
printing considerably but the terms will have no canonical order.
-q, --quiet
Print only Python's and SymPy's versions to stdout at startup.
-d, --doctest
Use the same format that should be used for doctests. This is
equivalent to -c python -p no.
-C, --no-cache
Disable the caching mechanism. Disabling the cache may slow certain
operations down considerably. This is useful for testing the cache,
or for benchmarking, as the cache can result in deceptive timings.
This is equivalent to setting the environment variable
SYMPY_USE_CACHE to 'no'.
-a, --auto-symbols (requires at least IPython 0.11)
Automatically create missing symbols. Normally, typing a name of a
Symbol that has not been instantiated first would raise NameError,
but with this option enabled, any undefined name will be
automatically created as a Symbol.
Note that this is intended only for interactive, calculator style
usage. In a script that uses SymPy, Symbols should be instantiated
at the top, so that it's clear what they are.
This will not override any names that are already defined, which
includes the single character letters represented by the mnemonic
QCOSINE (see the "Gotchas and Pitfalls" document in the
documentation). You can delete existing names by executing "del
name". If a name is defined, typing "'name' in dir()" will return True.
The Symbols that are created using this have default assumptions.
If you want to place assumptions on symbols, you should create them
using symbols() or var().
Finally, this only works in the top level namespace. So, for
example, if you define a function in isympy with an undefined
Symbol, it will not work.
See also the -i and -I options.
-i, --int-to-Integer (requires at least IPython 0.11)
Automatically wrap int literals with Integer. This makes it so that
things like 1/2 will come out as Rational(1, 2), rather than 0.5. This
works by preprocessing the source and wrapping all int literals with
Integer. Note that this will not change the behavior of int literals
assigned to variables, and it also won't change the behavior of functions
that return int literals.
If you want an int, you can wrap the literal in int(), e.g. int(3)/int(2)
gives 1.5 (with division imported from __future__).
-I, --interactive (requires at least IPython 0.11)
This is equivalent to --auto-symbols --int-to-Integer. Future options
designed for ease of interactive use may be added to this.
-D, --debug
Enable debugging output. This is the same as setting the
environment variable SYMPY_DEBUG to 'True'. The debug status is set
in the variable SYMPY_DEBUG within isympy.
-- IPython options
Additionally you can pass command line options directly to the IPython
interpreter (the standard Python shell is not supported). However you
need to add the '--' separator between two types of options, e.g the
startup banner option and the colors option. You need to enter the
options as required by the version of IPython that you are using, too:
in IPython 0.11,
$isympy -q -- --colors=NoColor
or older versions of IPython,
$isympy -q -- -colors NoColor
See also isympy --help.
"""
import os
import sys
# DO NOT IMPORT SYMPY HERE! Or the setting of the sympy environment variables
# by the command line will break.
if __name__ == "__main__":
main()
| [
2,
27713,
25,
2094,
470,
1234,
257,
649,
1370,
994,
11,
393,
340,
481,
751,
281,
3131,
1627,
351,
198,
2,
318,
88,
3149,
88,
1377,
16794,
198,
2,
220,
930,
198,
2,
220,
410,
198,
37811,
37906,
7582,
329,
15845,
20519,
13,
198,
1... | 3.20806 | 1,985 |
"""The zulu library."""
__version__ = "2.0.0"
from .api import create, now, parse, parse_delta, range, span_range
from .delta import Delta, to_seconds
from .parser import ISO8601, TIMESTAMP, ParseError
from .timer import Timer
from .zulu import Zulu
| [
37811,
464,
1976,
15712,
5888,
526,
15931,
198,
198,
834,
9641,
834,
796,
366,
17,
13,
15,
13,
15,
1,
198,
198,
6738,
764,
15042,
1330,
2251,
11,
783,
11,
21136,
11,
21136,
62,
67,
12514,
11,
2837,
11,
11506,
62,
9521,
198,
6738,
... | 2.964706 | 85 |
import torch as tc
from drl.agents.architectures.stateless.dueling import DuelingArchitecture
from drl.utils.initializers import get_initializer
| [
11748,
28034,
355,
37096,
198,
198,
6738,
1553,
75,
13,
49638,
13,
998,
5712,
942,
13,
5219,
1203,
13,
646,
10809,
1330,
23958,
278,
19895,
5712,
495,
198,
6738,
1553,
75,
13,
26791,
13,
36733,
11341,
1330,
651,
62,
36733,
7509,
628
] | 3.5 | 42 |
from googleapiclient import discovery
import json
import os
from dotenv import load_dotenv
import time
load_dotenv()
# # # Testing connection
# def implicit():
# from google.cloud import storage
# # If you don't specify credentials when constructing the client, the
# # client library will look for credentials in the environment.
# storage_client = storage.Client()
# # Make an authenticated API request
# buckets = list(storage_client.list_buckets())
# print(buckets)
#Test API KEY and Response Analysis
API_KEY = os.getenv('PERSPECTIVE_API_KEY')
client = discovery.build(
"commentanalyzer",
"v1alpha1",
developerKey=API_KEY,
discoveryServiceUrl="https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1",
static_discovery=False,
)
if __name__ == '__main__':
pass
sample = '''We are not in a trade war with China,
that war was lost many years ago by the foolish,
or incompetent, people who represented the U.S.
Now we have a Trade Deficit of $500 Billion a year,
with Intellectual Property Theft of another $300 Billion.
We cannot let this continue!'''
print(score_text(text=sample)) | [
6738,
23645,
499,
291,
75,
1153,
1330,
9412,
198,
11748,
33918,
198,
11748,
28686,
198,
6738,
16605,
24330,
1330,
3440,
62,
26518,
24330,
198,
11748,
640,
198,
2220,
62,
26518,
24330,
3419,
198,
198,
2,
1303,
1303,
23983,
4637,
198,
2,
... | 3.064103 | 390 |
"""Base ABC Client."""
import abc
import asyncio
import base64
import json
import logging
import os
import typing
import urllib.parse
import aiohttp.typedefs
import yarl
from genshin import constants, errors, types, utility
from genshin.client import cache as client_cache
from genshin.client import manager, routes
from genshin.models import hoyolab as hoyolab_models
from genshin.models import model as base_model
from genshin.utility import concurrency, deprecation, ds
__all__ = ["BaseClient"]
class BaseClient(abc.ABC):
"""Base ABC Client."""
__slots__ = ("cookie_manager", "cache", "_authkey", "_lang", "_region", "_default_game", "uids")
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36" # noqa: E501
logger: logging.Logger = logging.getLogger(__name__)
cookie_manager: manager.BaseCookieManager
cache: client_cache.BaseCache
_authkey: typing.Optional[str]
_lang: str
_region: types.Region
_default_game: typing.Optional[types.Game]
uids: typing.Dict[types.Game, int]
@property
def hoyolab_uid(self) -> typing.Optional[int]:
"""The logged-in user's hoyolab uid.
Returns None if not found or not applicable.
"""
return self.cookie_manager.user_id
@property
def lang(self) -> str:
"""The default language, defaults to "en-us" """
return self._lang
@lang.setter
@property
def region(self) -> types.Region:
"""The default region."""
return self._region
@region.setter
@property
def default_game(self) -> typing.Optional[types.Game]:
"""The default game."""
return self._default_game
@default_game.setter
game = default_game
@property
def uid(self) -> typing.Optional[int]:
"""UID of the default game."""
if self.default_game is None:
if len(self.uids) != 1:
return None
(self.default_game,) = self.uids.keys()
return self.uids.get(self.default_game)
@uid.setter
@property
def authkey(self) -> typing.Optional[str]:
"""The default genshin authkey used for paginators."""
return self._authkey
@authkey.setter
@property
def debug(self) -> bool:
"""Whether the debug logs are being shown in stdout"""
return logging.getLogger("genshin").level == logging.DEBUG
@debug.setter
def set_cookies(self, cookies: typing.Optional[manager.AnyCookieOrHeader] = None, **kwargs: typing.Any) -> None:
"""Parse and set cookies."""
if not bool(cookies) ^ bool(kwargs):
raise TypeError("Cannot use both positional and keyword arguments at once")
self.cookie_manager = manager.BaseCookieManager.from_cookies(cookies or kwargs)
def set_browser_cookies(self, browser: typing.Optional[str] = None) -> None:
"""Extract cookies from your browser and set them as client cookies.
Available browsers: chrome, chromium, opera, edge, firefox.
"""
self.cookie_manager = manager.BaseCookieManager.from_browser_cookies(browser)
def set_authkey(self, authkey: typing.Optional[str] = None) -> None:
"""Set an authkey for wish & transaction logs.
Accepts an authkey, a url containing an authkey or a path towards a logfile.
"""
if authkey is None or os.path.isfile(authkey):
authkey = utility.get_authkey(authkey)
else:
authkey = utility.extract_authkey(authkey) or authkey
self.authkey = authkey
def set_cache(
self,
maxsize: int = 1024,
*,
ttl: int = client_cache.HOUR,
static_ttl: int = client_cache.DAY,
) -> None:
"""Create and set a new cache."""
self.cache = client_cache.Cache(maxsize, ttl=ttl, static_ttl=static_ttl)
def set_redis_cache(
self,
url: str,
*,
ttl: int = client_cache.HOUR,
static_ttl: int = client_cache.DAY,
**redis_kwargs: typing.Any,
) -> None:
"""Create and set a new redis cache."""
import aioredis
redis = aioredis.Redis.from_url(url, **redis_kwargs) # pyright: ignore[reportUnknownMemberType]
self.cache = client_cache.RedisCache(redis, ttl=ttl, static_ttl=static_ttl)
@property
def proxy(self) -> typing.Optional[str]:
"""Proxy for http requests."""
if self.cookie_manager.proxy is None:
return None
return str(self.cookie_manager.proxy)
@proxy.setter
async def _request_hook(
self,
method: str,
url: aiohttp.typedefs.StrOrURL,
*,
params: typing.Optional[typing.Mapping[str, typing.Any]] = None,
data: typing.Any = None,
**kwargs: typing.Any,
) -> None:
"""Perform an action before a request.
Debug logging by default.
"""
url = yarl.URL(url)
if params:
params = {k: v for k, v in params.items() if k != "authkey"}
url = url.update_query(params)
if data:
self.logger.debug("%s %s\n%s", method, url, json.dumps(data, separators=(",", ":")))
else:
self.logger.debug("%s %s", method, url)
async def request(
self,
url: aiohttp.typedefs.StrOrURL,
*,
method: typing.Optional[str] = None,
params: typing.Optional[typing.Mapping[str, typing.Any]] = None,
data: typing.Any = None,
headers: typing.Optional[aiohttp.typedefs.LooseHeaders] = None,
cache: typing.Any = None,
static_cache: typing.Any = None,
**kwargs: typing.Any,
) -> typing.Mapping[str, typing.Any]:
"""Make a request and return a parsed json response."""
if cache is not None:
value = await self.cache.get(cache)
if value is not None:
return value
elif static_cache is not None:
value = await self.cache.get_static(static_cache)
if value is not None:
return value
# actual request
headers = dict(headers or {})
headers["User-Agent"] = self.USER_AGENT
if method is None:
method = "POST" if data else "GET"
if "json" in kwargs:
raise TypeError("Use data instead of json in request.")
await self._request_hook(method, url, params=params, data=data, headers=headers, **kwargs)
response = await self.cookie_manager.request(
url,
method=method,
params=params,
json=data,
headers=headers,
**kwargs,
)
# cache
if cache is not None:
await self.cache.set(cache, response)
elif static_cache is not None:
await self.cache.set_static(static_cache, response)
return response
async def request_webstatic(
self,
url: aiohttp.typedefs.StrOrURL,
*,
headers: typing.Optional[aiohttp.typedefs.LooseHeaders] = None,
cache: typing.Any = None,
**kwargs: typing.Any,
) -> typing.Any:
"""Request a static json file."""
if cache is not None:
value = await self.cache.get_static(cache)
if value is not None:
return value
url = routes.WEBSTATIC_URL.get_url().join(yarl.URL(url))
headers = dict(headers or {})
headers["User-Agent"] = self.USER_AGENT
await self._request_hook("GET", url, headers=headers, **kwargs)
async with self.cookie_manager.create_session() as session:
async with session.get(url, headers=headers, proxy=self.proxy, **kwargs) as r:
r.raise_for_status()
data = await r.json()
if cache is not None:
await self.cache.set_static(cache, data)
return data
async def request_hoyolab(
self,
url: aiohttp.typedefs.StrOrURL,
*,
lang: typing.Optional[str] = None,
region: typing.Optional[types.Region] = None,
method: typing.Optional[str] = None,
params: typing.Optional[typing.Mapping[str, typing.Any]] = None,
data: typing.Any = None,
headers: typing.Optional[aiohttp.typedefs.LooseHeaders] = None,
**kwargs: typing.Any,
) -> typing.Mapping[str, typing.Any]:
"""Make a request any hoyolab endpoint."""
if lang is not None and lang not in constants.LANGS:
raise ValueError(f"{lang} is not a valid language, must be one of: " + ", ".join(constants.LANGS))
lang = lang or self.lang
region = region or self.region
url = routes.TAKUMI_URL.get_url(region).join(yarl.URL(url))
if region == types.Region.OVERSEAS:
headers = {
"x-rpc-app_version": "1.5.0",
"x-rpc-client_type": "4",
"x-rpc-language": lang,
"ds": ds.generate_dynamic_secret(),
}
elif region == types.Region.CHINESE:
headers = {
"x-rpc-app_version": "2.11.1",
"x-rpc-client_type": "5",
"ds": ds.generate_cn_dynamic_secret(data, params),
}
else:
raise TypeError(f"{region!r} is not a valid region.")
data = await self.request(url, method=method, params=params, data=data, headers=headers, **kwargs)
return data
@manager.no_multi
async def get_game_accounts(
self,
*,
lang: typing.Optional[str] = None,
) -> typing.Sequence[hoyolab_models.GenshinAccount]:
"""Get the game accounts of the currently logged-in user."""
data = await self.request_hoyolab(
"binding/api/getUserGameRolesByCookie",
lang=lang,
cache=client_cache.cache_key("accounts", hoyolab_uid=self.hoyolab_uid),
)
return [hoyolab_models.GenshinAccount(**i) for i in data["list"]]
@deprecation.deprecated("get_game_accounts")
async def genshin_accounts(
self,
*,
lang: typing.Optional[str] = None,
) -> typing.Sequence[hoyolab_models.GenshinAccount]:
"""Get the genshin accounts of the currently logged-in user."""
accounts = await self.get_game_accounts(lang=lang)
return [account for account in accounts if account.game == types.Game.GENSHIN]
async def _update_cached_uids(self) -> None:
"""Update cached fallback uids."""
mixed_accounts = await self.get_game_accounts()
game_accounts: typing.Dict[types.Game, typing.List[hoyolab_models.GenshinAccount]] = {}
for account in mixed_accounts:
if not isinstance(account.game, types.Game): # pyright: ignore[reportUnnecessaryIsInstance]
continue
game_accounts.setdefault(account.game, []).append(account)
self.uids = {game: max(accounts, key=lambda a: a.level).uid for game, accounts in game_accounts.items()}
if len(self.uids) == 1 and self.default_game is None:
(self.default_game,) = self.uids.keys()
@concurrency.prevent_concurrency
async def _get_uid(self, game: types.Game) -> int:
"""Get a cached fallback uid."""
# TODO: use lock
if uid := self.uids.get(game):
return uid
if self.cookie_manager.multi:
raise RuntimeError("UID must be provided when using multi-cookie managers.")
await self._update_cached_uids()
if uid := self.uids.get(game):
return uid
raise errors.AccountNotFound(msg="No UID provided and account has no game account bound to it.")
async def _fetch_mi18n(self, key: str, lang: str, *, force: bool = False) -> None:
"""Update mi18n for a single url."""
if not force:
if key in base_model.APIModel._mi18n:
return
base_model.APIModel._mi18n[key] = {}
url = routes.MI18N[key]
cache_key = client_cache.cache_key("mi18n", mi18n=key, lang=lang)
data = await self.request_webstatic(url.format(lang=lang), cache=cache_key)
for k, v in data.items():
actual_key = str.lower(key + "/" + k)
base_model.APIModel._mi18n.setdefault(actual_key, {})[lang] = v
async def update_mi18n(self, langs: typing.Iterable[str] = constants.LANGS, *, force: bool = False) -> None:
"""Fetch mi18n for partially localized endpoints."""
if not force:
if base_model.APIModel._mi18n:
return
langs = tuple(langs)
coros: typing.List[typing.Awaitable[None]] = []
for key in routes.MI18N:
for lang in langs:
coros.append(self._fetch_mi18n(key, lang, force=force))
await asyncio.gather(*coros)
| [
37811,
14881,
9738,
20985,
526,
15931,
198,
11748,
450,
66,
198,
11748,
30351,
952,
198,
11748,
2779,
2414,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
19720,
198,
11748,
2956,
297,
571,
13,
29572,
198,
198,
11748... | 2.253656 | 5,744 |
from kotoba_player_py.exceptions import InputFormatError
import pytest
from kotoba_player_py import (
__version__, KotobaPlayer, InputFormatError
)
from kotoba_player_py.api import (
mask_noun_word
)
@pytest.fixture | [
6738,
479,
313,
19981,
62,
7829,
62,
9078,
13,
1069,
11755,
1330,
23412,
26227,
12331,
198,
11748,
12972,
9288,
198,
198,
6738,
479,
313,
19981,
62,
7829,
62,
9078,
1330,
357,
198,
220,
220,
220,
11593,
9641,
834,
11,
21702,
19981,
14... | 2.777778 | 81 |
#!/usr/bin/env python
"""
This example mirrors the 8-direction movement example here:
https://github.com/Mekire/meks-pygame-samples/blob/master/eight_dir_move.py
The difference is that this example uses delta time.
Delta time is a method of assuring that updates are unaffected by
changes in framerate.
"""
import os
import sys
import pygame as pg
CAPTION = "Delta Time"
SCREEN_SIZE = (500, 500)
TRANSPARENT = (0, 0, 0, 0)
BACKGROUND_COLOR = pg.Color("darkslategrey")
DIRECT_DICT = {pg.K_LEFT : (-1, 0),
pg.K_RIGHT : ( 1, 0),
pg.K_UP : ( 0,-1),
pg.K_DOWN : ( 0, 1)}
class Player(object):
"""This class will represent our user controlled character."""
SIZE = (100, 100)
def __init__(self, pos, speed):
"""
Aside from setting up our image and rect as seen previously,
in this example we create a new variable called true_pos.
Rects can only hold integers, so in order to preserve fractional
changes we need this new variable to hold the exact float position.
Without it, a body that moved slower than 1 pixel per frame would
never move.
"""
self.image = self.make_image()
self.rect = self.image.get_rect(center=pos)
self.true_pos = list(self.rect.center) # Exact float position.
self.speed = speed # Speed in pixels per second.
def make_image(self):
"""
Create player image. No differences from previous.
"""
image = pg.Surface(Player.SIZE).convert_alpha()
image.fill(TRANSPARENT)
rect = image.get_rect()
pg.draw.ellipse(image, pg.Color("black"), rect)
pg.draw.ellipse(image, pg.Color("tomato"), rect.inflate(-12, -12))
return image
def update(self, keys, screen_rect, dt):
"""
Update must accept a new argument dt (time delta between frames).
Adjustments to position must be multiplied by this delta.
Set the rect to true_pos once adjusted (automatically converts to int).
"""
for key in DIRECT_DICT:
if keys[key]:
self.true_pos[0] += DIRECT_DICT[key][0]*self.speed*dt
self.true_pos[1] += DIRECT_DICT[key][1]*self.speed*dt
self.rect.center = self.true_pos
self.clamp(screen_rect)
def clamp(self, screen_rect):
"""
Clamp the rect to the screen if needed and reset true_pos to the
rect position so they don't lose sync.
"""
if not screen_rect.contains(self.rect):
self.rect.clamp_ip(screen_rect)
self.true_pos = list(self.rect.center)
def draw(self, surface):
"""
Basic draw function.
"""
surface.blit(self.image, self.rect)
class App(object):
"""
Class responsible for program control flow.
"""
def event_loop(self):
"""
Basic event loop.
"""
for event in pg.event.get():
if event.type == pg.QUIT:
self.done = True
elif event.type in (pg.KEYDOWN, pg.KEYUP):
self.keys = pg.key.get_pressed()
def update(self, dt):
"""
Update must acccept and pass dt to all elements that need to update.
"""
self.player.update(self.keys, self.screen_rect, dt)
def render(self):
"""
Render all needed elements and update the display.
"""
self.screen.fill(BACKGROUND_COLOR)
self.player.draw(self.screen)
pg.display.update()
def main_loop(self):
"""
We now use the return value of the call to self.clock.tick to
get the time delta between frames.
"""
dt = 0
self.clock.tick(self.fps)
while not self.done:
self.event_loop()
self.update(dt)
self.render()
dt = self.clock.tick(self.fps)/1000.0
def main():
"""
Initialize; create an App; and start the main loop.
"""
os.environ['SDL_VIDEO_CENTERED'] = '1'
pg.init()
pg.display.set_caption(CAPTION)
pg.display.set_mode(SCREEN_SIZE)
App().main_loop()
pg.quit()
sys.exit()
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
1212,
1672,
22353,
262,
807,
12,
37295,
3356,
1672,
994,
25,
198,
5450,
1378,
12567,
13,
785,
14,
44,
988,
557,
14,
1326,
591,
12,
9078,
6057,
12,
82,
12629,
14,
2436,
... | 2.2638 | 1,884 |
from box import Box
from popper.translators.translator_task import TaskTranslator
from .test_common import PopperTest
| [
6738,
3091,
1330,
8315,
198,
198,
6738,
745,
2848,
13,
7645,
75,
2024,
13,
7645,
41880,
62,
35943,
1330,
15941,
8291,
41880,
198,
6738,
764,
9288,
62,
11321,
1330,
7695,
2848,
14402,
628
] | 3.636364 | 33 |
print(
if __name == "man:
BANANA
| [
4798,
7,
198,
198,
361,
11593,
3672,
6624,
366,
805,
25,
198,
33,
1565,
31574,
198
] | 2.125 | 16 |
# -*- coding: utf-8 -*-
"""
Module projectparallelprogrammeren.montecarlo_v0
=================================================================
simulatie v0: alles in Python (op genereren van de getallen na)
"""
import math
import numpy as np
from statistics import stdev
import projectparallelprogrammeren
from projectparallelprogrammeren import montecarlo_v1
from projectparallelprogrammeren import montecarlo_v2
from projectparallelprogrammeren import montecarlo_v3
from projectparallelprogrammeren import atomen
from et_stopwatch import Stopwatch
def simulatie(n=20, m=10):
"""
Deze functie doet een simulatie van een gegeven hoeveelheid conformaties van het gegeven aantal atomen.
:param int n: Het aantal atomen dat gebruikt wordt.
:param int m: Het aantal conformaties dat gesimuleerd moet worden.
"""
with Stopwatch(message="v0: Python"):
coordinatenLaagsteE = 0
nummerRunLaagsteE = 0
LaagsteE = math.inf
totalePot = 0
gemiddelde = 0
potentialenlijst = list()
for i in range(m):
#print("Bezig met het simuleren van run", i+1, "van", m)
run = atomen.Atomen(n)
pot = run.berekenLJPot()
totalePot = totalePot + pot
gemiddelde = totalePot / (i + 1)
potentialenlijst.append(pot)
if pot < LaagsteE:
coordinatenLaagsteE = run.getCoordinaten()
nummerRunLaagsteE = i
LaagsteE = pot
print(" ")
print("----------RESULTATEN----------")
print("Run", nummerRunLaagsteE + 1,"van", m, "had de laagste totale Lennard Jones Potentiaal, namelijk:", LaagsteE)
#print("De Coordinaten van de atomen van deze run zijn:", coordinatenLaagsteE)
print("De gemiddelde potentiaal:", gemiddelde)
print("De standaardafwijking is:", stdev(potentialenlijst))
"""montecarlo_v1.simulatie(n, m)
montecarlo_v2.simulatie(n, m)
montecarlo_v3.simulatie(n, m)"""
#eof
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
26796,
1628,
1845,
29363,
23065,
76,
14226,
13,
2144,
660,
7718,
5439,
62,
85,
15,
220,
198,
23926,
28,
198,
198,
14323,
377,
265,
494,
410,
15,
25,
... | 2.511111 | 720 |
# coding=utf8
import unittest
import string
import random
import shlex
from qiniuManager.run import *
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
2,
19617,
28,
40477,
23,
198,
11748,
555,
715,
395,
198,
11748,
4731,
198,
11748,
4738,
198,
11748,
427,
2588,
198,
198,
6738,
10662,
5362,
84,
13511,
13,
5143,
1330,
1635,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
... | 2.733333 | 60 |
# scanner init
from .hashFile import ImageHash
from .hashFile import phash
from .hashFile import hashFile
from .hashFile import getHashDict
from .hashFile import getMd5Hash | [
2,
27474,
2315,
198,
198,
6738,
764,
17831,
8979,
1330,
7412,
26257,
198,
6738,
764,
17831,
8979,
1330,
872,
1077,
198,
6738,
764,
17831,
8979,
1330,
12234,
8979,
198,
6738,
764,
17831,
8979,
1330,
651,
26257,
35,
713,
198,
6738,
764,
... | 3.530612 | 49 |
"""
71
medium
simplify path
"""
path1 = "/home/"
path2 = "/../"
path3 = "/home//foo/"
path4 = "/a/./b/../../c/"
path5 = "/../"
path6 = "/..."
path7 = "/a//b////c/d//././/.."
sol = Solution()
print(sol.simplifyPath(path7))
| [
37811,
198,
4869,
198,
24132,
198,
14323,
489,
1958,
3108,
198,
198,
37811,
628,
198,
198,
6978,
16,
796,
12813,
11195,
30487,
198,
6978,
17,
796,
12813,
492,
30487,
198,
6978,
18,
796,
12813,
11195,
1003,
21943,
30487,
198,
6978,
19,
... | 2.223301 | 103 |
from test.integration.base import DBTIntegrationTest, use_profile
| [
6738,
1332,
13,
18908,
1358,
13,
8692,
1330,
360,
19313,
34500,
1358,
14402,
11,
779,
62,
13317,
628
] | 3.722222 | 18 |
"""Test Authentication and Authorization Service."""
import datetime
import re
from arrow import utcnow
from flask import current_app
import pytest # noqa: F401
from jadetree.domain.models import User
from jadetree.exc import AuthError, DomainError, JwtPayloadError, NoResults
from jadetree.mail import mail
from jadetree.service import auth as auth_service
from jadetree.service.auth import JWT_SUBJECT_BEARER_TOKEN
def test_register_user_adds_user(session):
"""Ensure user is added when register_user is called."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u is not None
assert isinstance(u, User)
assert u.id > 0
assert len(session.query(User).all()) == 1
assert u.email == 'test@jadetree.io'
assert u.pw_hash is not None
assert u.pw_hash != 'hunter2JT'
assert u.uid_hash is not None
assert u.currency is None
assert u.active is False
assert u.confirmed is False
def test_register_user_throws_duplicate_email(session):
"""Ensure two users with the same email cannot be registered."""
auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'test@jadetree.io', 'hunter2JTa', 'Test User')
assert len(session.query(User).all()) == 1
assert 'already exists' in str(exc_data.value)
def test_register_user_throws_bad_email(session):
"""Ensure invalid email addresses are rejected."""
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'jadetree', 'hunter2JT', 'Test User')
assert len(session.query(User).all()) == 0
assert 'Invalid Email Address' in str(exc_data.value)
def test_register_user_throws_bad_pw_short(session):
"""Ensure passwords which are too short are rejected."""
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'test@jadetree.io', 'aBc5', 'Test User')
assert len(session.query(User).all()) == 0
assert 'Password' in str(exc_data.value)
assert 'at least 8 characters' in str(exc_data.value)
def test_register_user_throws_bad_pw_lowercase(session):
"""Ensure passwords with no lowercase letter are rejected."""
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'test@jadetree.io', 'HUNTER2JT', 'Test User')
assert len(session.query(User).all()) == 0
assert str(exc_data.value) == 'Password must contain a lower-case letter'
def test_register_user_throws_bad_pw_uppercase(session):
"""Ensure passwords with no uppercase letter are rejected."""
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'test@jadetree.io', 'hunter2jt', 'Test User')
assert len(session.query(User).all()) == 0
assert str(exc_data.value) == 'Password must contain an upper-case letter'
def test_register_user_throws_bad_pw_number(session):
"""Ensure passwords with no number are rejected."""
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'test@jadetree.io', 'hunter_JT', 'Test User')
assert len(session.query(User).all()) == 0
assert str(exc_data.value) == 'Password must contain a number'
def test_register_user_throws_no_pw_public(app, session, monkeypatch):
"""Ensure a password is required in public mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'test@jadetree.io', '', 'Test User')
assert len(session.query(User).all()) == 0
assert 'Password must be provided' in str(exc_data.value)
def test_register_user_no_pw_personal(app, session, monkeypatch):
"""Ensure a password is not required in personal mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'personal')
u = auth_service.register_user(session, 'test@jadetree.io', '', 'Test User')
assert u is not None
assert u.id > 0
def test_register_user_no_pw_family(app, session, monkeypatch):
"""Ensure a password is not required in family mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'family')
u = auth_service.register_user(session, 'test@jadetree.io', '', 'Test User')
assert u is not None
assert u.id > 0
def test_register_user_throws_bad_pw_personal(app, session, monkeypatch):
"""Ensure bad passwords are rejected in personal mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'personal')
with pytest.raises(ValueError) as exc_data:
auth_service.register_user(session, 'test@jadetree.io', 'hunter_JT', 'Test User')
assert len(session.query(User).all()) == 0
assert str(exc_data.value) == 'Password must contain a number'
def test_register_user_throws_personal_mode(app, session, monkeypatch):
"""Ensure a second user cannot be registered in Personal mode."""
auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
with app.app_context():
monkeypatch.setitem(current_app.config, '_JT_SERVER_MODE', 'personal')
with pytest.raises(DomainError) as exc_data:
auth_service.register_user(session, 'test2@jadetree.io', 'hunter2JT', 'Test User 2')
assert len(session.query(User).all()) == 1
assert str(exc_data.value) == 'Cannot register users when the server mode is set to Personal'
def test_register_user_confirmed_family_mode(app, session, monkeypatch):
"""Ensure new users are automatically confirmed in Family mode."""
auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
with app.app_context():
monkeypatch.setitem(current_app.config, '_JT_SERVER_MODE', 'family')
u = auth_service.register_user(session, 'test2@jadetree.io', 'hunter2JT', 'Test User 2')
assert u.active is True
assert u.confirmed is True
def test_register_user_sends_email_public(app, session, monkeypatch):
"""Ensure a Registration Confirmation email is sent to the user."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
with app.app_context():
u = None
with mail.record_messages() as outbox:
u = auth_service.register_user(
session, 'test@jadetree.io', 'hunter2JT', 'Test User'
)
assert len(outbox) == 1
assert outbox[0].subject == '[Jade Tree] Confirm Your Registration'
# Load Tokens from Email
m = re.search(r'confirm\?token=([A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*)', outbox[0].body)
assert m is not None
confirm_token = str(m.group(1))
m = re.search(r'cancel\?token=([A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*)', outbox[0].body)
assert m is not None
cancel_token = str(m.group(1))
# Check Tokens
confirm_payload = auth_service.decodeJwt(current_app, confirm_token, leeway=10)
assert 'email' in confirm_payload
assert 'uid' in confirm_payload
assert confirm_payload['email'] == u.email
assert confirm_payload['uid'] == u.uid_hash
cancel_payload = auth_service.decodeJwt(current_app, cancel_token, leeway=10)
assert 'email' in cancel_payload
assert cancel_payload['email'] == u.email
def test_resend_email_changes_uid_hash(app, session, monkeypatch):
"""Ensure resending a Confirmation Email changes the UID hash."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
with app.app_context():
u = None
with mail.record_messages() as outbox:
u = auth_service.register_user(
session, 'test@jadetree.io', 'hunter2JT', 'Test User'
)
uid_hash_1 = u.uid_hash
u2 = auth_service.resend_confirmation(session, 'test@jadetree.io')
assert u2.uid_hash != uid_hash_1
assert len(outbox) == 2
assert outbox[0].subject == '[Jade Tree] Confirm Your Registration'
assert outbox[1].subject == '[Jade Tree] Confirm Your Registration'
# Load Tokens from Emails
m = re.search(r'confirm\?token=([A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*)', outbox[0].body)
assert m is not None
confirm_token_1 = str(m.group(1))
m = re.search(r'confirm\?token=([A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*)', outbox[1].body)
assert m is not None
confirm_token_2 = str(m.group(1))
# Check Tokens
assert confirm_token_1 != confirm_token_2
confirm_payload = auth_service.decodeJwt(current_app, confirm_token_2, leeway=10)
assert 'email' in confirm_payload
assert 'uid' in confirm_payload
assert confirm_payload['email'] == u.email
assert confirm_payload['uid'] == u2.uid_hash
def test_register_user_no_email_personal(app, session, monkeypatch):
"""Ensure a confirmation email is not sent in Personal mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'personal')
with app.app_context():
with mail.record_messages() as outbox:
auth_service.register_user(
session, 'test@jadetree.io', 'hunter2JT', 'Test User'
)
assert len(outbox) == 0
def test_register_user_no_email_family(app, session, monkeypatch):
"""Ensure a confirmation email is not sent in Family mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'family')
with app.app_context():
with mail.record_messages() as outbox:
auth_service.register_user(
session, 'test@jadetree.io', 'hunter2JT', 'Test User'
)
assert len(outbox) == 0
def test_register_user_no_email_config(app, session, monkeypatch):
"""Ensure a confirmation email is not sent when CONFIRM_REGISTRATION_EMAIL is set."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
monkeypatch.setitem(app.config, 'CONFIRM_REGISTRATION_EMAIL', False)
with app.app_context():
with mail.record_messages() as outbox:
auth_service.register_user(
session, 'test@jadetree.io', 'hunter2JT', 'Test User'
)
assert len(outbox) == 0
def test_confirm_user(session):
"""Ensure a user can be confirmed."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
assert u.uid_hash is not None
assert u.active is False
assert u.confirmed is False
assert u.confirmed_at is None
u2 = auth_service.confirm_user(session, u.uid_hash, 'test@jadetree.io')
assert u2 == u
assert u2.active is True
assert u2.confirmed is True
assert u2.confirmed is not None
def test_confirm_user_sends_email(app, session, monkeypatch):
"""Ensure a Welcome email is sent after confirmation."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
with app.app_context():
with mail.record_messages() as outbox:
u = auth_service.register_user(
session, 'test@jadetree.io', 'hunter2JT', 'Test User'
)
auth_service.confirm_user(session, u.uid_hash, 'test@jadetree.io')
assert len(outbox) == 2
assert outbox[0].subject == '[Jade Tree] Confirm Your Registration'
assert outbox[1].subject == '[Jade Tree] Welcome to Jade Tree'
def test_confirm_user_personal(app, session, monkeypatch):
"""Ensure a user is automatically confirmed in personal mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'personal')
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
assert u.uid_hash is not None
assert u.active is True
assert u.confirmed is True
assert u.confirmed_at is not None
def test_confirm_user_family(app, session, monkeypatch):
"""Ensure a user is automatically confirmed in family mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'family')
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
assert u.uid_hash is not None
assert u.active is True
assert u.confirmed is True
assert u.confirmed_at is not None
def test_confirm_user_auto(app, session, monkeypatch):
"""Ensure a user can be confirmed in public mode with no email."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
monkeypatch.setitem(app.config, 'CONFIRM_REGISTRATION_EMAIL', False)
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
assert u.uid_hash is not None
assert u.active is True
assert u.confirmed is True
assert u.confirmed_at is not None
def test_confirm_user_not_exists(session):
"""Ensure a non-existent user is not confirmed."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
with pytest.raises(NoResults) as exc_data:
auth_service.confirm_user(session, '0000', None)
assert str(exc_data.value) == 'Could not find a user with the given hash'
assert u.active is False
assert u.confirmed is False
def test_confirm_user_wrong_email(session):
"""Ensure a user cannot be confirmed with a mismatched email."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
with pytest.raises(ValueError) as exc_data:
auth_service.confirm_user(session, u.uid_hash, 'test@bad.io')
assert 'Email address does not match' in str(exc_data.value)
assert u.active is False
assert u.confirmed is False
def test_confirm_user_already_confirmed(session):
"""Ensure a user already confirmed cannot be confirmed again."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
assert u.active is False
assert u.confirmed is False
# Set confirmed to True as in a user who had confirmed but then was
# deactivated
u.confirmed = True
u.confirmed_at = utcnow()
with pytest.raises(DomainError) as exc_data:
auth_service.confirm_user(session, u.uid_hash, 'test@jadetree.io')
assert 'already confirmed' in str(exc_data.value)
assert u.active is False
assert u.confirmed is True
def test_confirm_user_bad_token_no_uid(app, session):
"""Ensure a token without a uid subject is rejected."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
token = auth_service.encodeJwt(
app,
subject=auth_service.JWT_SUBJECT_CONFIRM_EMAIL,
email='test@jadetree.io'
)
with pytest.raises(JwtPayloadError) as exc_data:
auth_service.confirm_user_with_token(session, token)
assert 'uid claim' in str(exc_data.value)
assert u.active is False
assert u.confirmed is False
def test_confirm_user_bad_token_no_email(app, session):
"""Ensure a token without an email subject is rejected."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
token = auth_service.encodeJwt(
app,
subject=auth_service.JWT_SUBJECT_CONFIRM_EMAIL,
uid=u.uid_hash
)
with pytest.raises(JwtPayloadError) as exc_data:
auth_service.confirm_user_with_token(session, token)
assert 'email claim' in str(exc_data.value)
assert u.active is False
assert u.confirmed is False
def test_cancel_user_bad_token_no_email(app, session):
"""Ensure a token without an email subject is rejected."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
token = auth_service.encodeJwt(
app,
subject=auth_service.JWT_SUBJECT_CANCEL_EMAIL,
)
with pytest.raises(JwtPayloadError) as exc_data:
auth_service.cancel_registration_with_token(session, token)
assert 'email claim' in str(exc_data.value)
assert u.active is False
assert u.confirmed is False
def test_get_user(session):
"""Ensure a user can be looked up by ID."""
nu = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert nu.id == 1
u = session.query(User).filter(User.id == nu.id).one_or_none()
assert auth_service.get_user(session, u.id) == nu
def test_get_user_not_exists(session):
"""Ensure a non-existent User ID returns None."""
assert auth_service.get_user(session, 1) is None
def test_get_user_invalid(session):
"""Ensure an invalid User ID returns None."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
assert auth_service.get_user(session, 'one') is None
def test_load_user_by_hash(session):
"""Ensure a user can be looked up by ID Hash."""
nu = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert nu.id == 1
u = session.query(User).filter(User.id == nu.id).one_or_none()
assert u.uid_hash is not None
assert auth_service.load_user_by_hash(session, u.uid_hash) == nu
def test_load_user_by_hash_not_exists(session):
"""Ensure an ID hash which does not exist returns None."""
assert auth_service.load_user_by_hash(session, '00000000000000000000000000000000') is None
def test_load_user_by_hash_invalid(session):
"""Ensure an invalid ID hash returns None."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert u.id == 1
assert auth_service.load_user_by_hash(session, 'one') is None
def test_invalidate_uid_hash(session):
"""Ensure the ID hash can be changed to invalidate login sessions."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
old_id = u.id
old_hash = u.uid_hash
nu = auth_service.invalidate_uid_hash(session, old_hash)
assert nu.id == old_id
assert nu.uid_hash != old_hash
def test_invalidate_uid_hash_invalid(session):
"""Ensure an error is raised when invalidating a non-existent ID hash."""
auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
with pytest.raises(NoResults, match='Could not find a user'):
auth_service.invalidate_uid_hash(session, 'xxx')
def test_load_user_by_email(session):
"""Ensure a user can be looked up by email address."""
nu = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert nu.id == 1
assert auth_service.load_user_by_email(session, 'test@jadetree.io') == nu
def test_load_user_by_token(session):
"""Ensure a user can be looked up by JSON Web Token."""
nu = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert nu.id == 1
# load_user_by_token is hardcoded for JWT_SUBJECT_BEARER_TOKEN
token = auth_service.generate_user_token(nu, JWT_SUBJECT_BEARER_TOKEN)
assert auth_service.load_user_by_token(session, token) == nu
def test_load_user_by_token_no_uid(app, session):
"""Ensure a JWT missing the user ID hash key is rejected."""
nu = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
assert nu.id == 1
# load_user_by_token is hardcoded for JWT_SUBJECT_BEARER_TOKEN
token = auth_service.encodeJwt(
app,
subject=JWT_SUBJECT_BEARER_TOKEN,
exp=datetime.datetime.utcnow() + datetime.timedelta(minutes=1),
)
with pytest.raises(JwtPayloadError, match='Missing uid key') as excinfo:
auth_service.load_user_by_token(session, token)
assert excinfo.value.payload_key == 'uid'
def test_change_user_password(app, session, monkeypatch):
"""Ensure a user password can be changed."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
u_hash = u.uid_hash
assert u.check_password('hunter2JT')
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
u2 = auth_service.confirm_user(session, u.uid_hash, 'test@jadetree.io')
rv = auth_service.change_password(
session,
u2.uid_hash,
'hunter2JT',
'aSecu43Pa55w0rd'
)
assert 'token' in rv
assert 'user' in rv
u3 = auth_service.load_user_by_email(session, 'test@jadetree.io')
assert u3.check_password('aSecu43Pa55w0rd')
assert u_hash != u3.uid_hash
def test_change_user_password_keep_hash(app, session, monkeypatch):
"""Ensure a user password can be changed without changing ID hash."""
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
u_hash = u.uid_hash
assert u.check_password('hunter2JT')
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
u2 = auth_service.confirm_user(session, u.uid_hash, 'test@jadetree.io')
rv = auth_service.change_password(
session,
u2.uid_hash,
'hunter2JT',
'aSecu43Pa55w0rd',
logout_sessions=False,
)
assert 'token' in rv
assert 'user' in rv
u3 = auth_service.load_user_by_email(session, 'test@jadetree.io')
assert u3.check_password('aSecu43Pa55w0rd')
assert u_hash == u3.uid_hash
def test_change_user_password_invalid_hash(app, session, monkeypatch):
"""Ensure a user password with an invalid User ID hash is rejected."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
with pytest.raises(NoResults) as excinfo:
auth_service.change_password(
session,
'0',
'hunter2JT',
'aSecu43Pa55w0rd',
logout_sessions=False,
)
assert 'Could not find' in str(excinfo.value)
def test_change_user_password_inactive(app, session, monkeypatch):
"""Ensure an inactive user cannot change their password."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
with pytest.raises(AuthError) as excinfo:
auth_service.change_password(
session,
u.uid_hash,
'hunter2JT',
'aSecu43Pa55w0rd',
logout_sessions=False,
)
assert 'is not active' in str(excinfo.value)
def test_user_list_personal(app, session, monkeypatch):
"""Ensure a list of authorized users can be generated in Personal mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'personal')
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
users = auth_service.auth_user_list(session)
assert len(users) == 1
assert users[0] == u
def test_user_list_family(app, session, monkeypatch):
"""Ensure a list of authorized users can be generated in Family mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'family')
u1 = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
u2 = auth_service.register_user(session, 'test2@jadetree.io', 'hunter2JT', 'Test User 2')
users = auth_service.auth_user_list(session)
assert len(users) == 2
assert users[0] == u1
assert users[1] == u2
def test_user_list_public(app, session, monkeypatch):
"""Ensure the authorized user list is not available in Public mode."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
with pytest.raises(DomainError):
auth_service.auth_user_list(session)
def test_unconfirmed_user_cannot_log_in(app, session, monkeypatch):
"""Ensure an unconfirmed user cannot log in."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
with pytest.raises(AuthError) as exc_info:
auth_service.login_user(session, 'test@jadetree.io', 'hunter2JT')
assert 'confirmed registration' in str(exc_info.value)
def test_inactive_user_cannot_log_in(app, session, monkeypatch):
"""Ensure an inactivated user cannot log in."""
monkeypatch.setitem(app.config, '_JT_SERVER_MODE', 'public')
u = auth_service.register_user(session, 'test@jadetree.io', 'hunter2JT', 'Test User')
auth_service.confirm_user(session, u.uid_hash, 'test@jadetree.io')
monkeypatch.setattr(u, 'active', False)
with pytest.raises(AuthError) as exc_info:
auth_service.login_user(session, 'test@jadetree.io', 'hunter2JT')
assert 'active' in str(exc_info.value)
| [
37811,
14402,
48191,
290,
35263,
4809,
526,
15931,
198,
198,
11748,
4818,
8079,
198,
11748,
302,
198,
198,
6738,
15452,
1330,
3384,
66,
2197,
198,
6738,
42903,
1330,
1459,
62,
1324,
198,
11748,
12972,
9288,
220,
1303,
645,
20402,
25,
37... | 2.515284 | 9,847 |
# -*- coding: utf-8 -*-
if __name__ == '__main__':
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
201,
198,
220,
220,
220,
1388,
3419,
201,
198
] | 1.775 | 40 |
from django.contrib import admin
from teams.models import Team
admin.site.register(Team, TeamAdmin)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
3466,
13,
27530,
1330,
4816,
628,
198,
28482,
13,
15654,
13,
30238,
7,
15592,
11,
4816,
46787,
8,
198
] | 3.517241 | 29 |
import helpers.ipc as ipc
import numpy as np
from enum import Enum
address = ('localhost', 6000)
router = ipc.Router(address, authkey=b'test')
@router.expose
@router.expose
@router.expose
router.serve()
| [
11748,
49385,
13,
541,
66,
355,
20966,
66,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
33829,
1330,
2039,
388,
628,
198,
198,
21975,
796,
19203,
36750,
3256,
39064,
8,
198,
472,
353,
796,
20966,
66,
13,
49,
39605,
7,
21975,
11,
6... | 2.609756 | 82 |
import random
def get_min_max(ints):
"""
Return a tuple(min, max) out of list of unsorted integers.
Args:
ints(list): list of integers containing one or more integers
"""
if len(ints) == 0:
return None, None
min_v, max_v = ints[0], ints[0]
for num in ints[1:]:
# in case null/empty values (not integers or floats), omit this element
if not(type(num) == int or type(num) == float):
continue
if num < min_v:
min_v = num
if num > max_v:
max_v = num
return min_v, max_v
# Example Test Case of Ten Integers
l = [i for i in range(0, 10)] # a list containing 0 - 9
random.shuffle(l)
print("Pass" if ((0, 9) == get_min_max(l)) else "Fail")
# Pass
# My test cases
# CASE #1 (edge case): Empty list, does not have min/max : returns a tuple of (None,None)
print(get_min_max([]) == (None, None))
# True
# CASE #2: List containing Null values,
alist = [1, 2, 3, None, 4, None, 5]
print(get_min_max(alist) == (1, 5))
# True
# CASE #3 List of all None values
aList = [None for i in range(10)]
print(get_min_max(aList) == (None, None))
# True
# CASE #4 (edge case): big input of randomly shuffled 20 million integers
a = [i for i in range(int(-1e6), int(1e6)+1, 1)]
random.shuffle(a)
print(get_min_max(a) == (-int(1e6), int(1e6)))
# True
# CASE #5 (edge case): input of identical numbers: min==max
a = [22 for i in range(100)]
print(get_min_max(a) == (22, 22))
# True
# CASE #6: a simple test case
a = [5, 4, 2, 1, -10, 12, 15, 235]
print(get_min_max(a) == (-10, 235))
# True
| [
11748,
4738,
628,
198,
4299,
651,
62,
1084,
62,
9806,
7,
29503,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
8229,
257,
46545,
7,
1084,
11,
3509,
8,
503,
286,
1351,
286,
5576,
9741,
37014,
13,
628,
220,
220,
220,
943,
1454... | 2.384731 | 668 |
#Codeing-Utf8
| [
2,
10669,
278,
12,
18274,
69,
23,
198
] | 1.75 | 8 |
from rest_framework import filters
import django_filters
from core.models import ApplicationVersion as ImageVersion
from api.v2.views.base import AuthOptionalViewSet
from api.v2.serializers.details import ImageVersionSerializer
class ImageVersionViewSet(AuthOptionalViewSet):
"""
API endpoint that allows instance actions to be viewed or edited.
"""
queryset = ImageVersion.objects.all()
serializer_class = ImageVersionSerializer
search_fields = ('application__id', 'application__created_by__username')
ordering_fields = ('start_date',)
ordering = ('start_date',)
filter_class = ImageVersionFilter
filter_backends = (filters.OrderingFilter, filters.DjangoFilterBackend)
| [
6738,
1334,
62,
30604,
1330,
16628,
198,
11748,
42625,
14208,
62,
10379,
1010,
198,
198,
6738,
4755,
13,
27530,
1330,
15678,
14815,
355,
7412,
14815,
198,
6738,
40391,
13,
85,
17,
13,
33571,
13,
8692,
1330,
26828,
30719,
7680,
7248,
198... | 3.404762 | 210 |
import RPi.GPIO as GPIO
class Keymap:
"""BCM keymap for joystick buttons"""
UP = 6
DOWN = 19
LEFT = 5
RIGHT = 26
PRESS = 13
| [
11748,
25812,
72,
13,
16960,
9399,
355,
50143,
628,
198,
4871,
7383,
8899,
25,
198,
220,
220,
220,
37227,
2749,
44,
1994,
8899,
329,
49485,
12163,
37811,
198,
220,
220,
220,
15958,
796,
718,
198,
220,
220,
220,
30320,
796,
678,
198,
... | 2.359375 | 64 |
from django.http import JsonResponse
from lms.models import User
from rest_framework import status
import jwt
from app.settings import SECRET_KEY
from datetime import datetime, timedelta
import os
import sys
import json
import re
from django.core.exceptions import ValidationError
from pprint import pprint
from django.db.models import Q
from django.shortcuts import redirect
from django.urls import reverse
from lms.utils import *
@authorize_user | [
6738,
42625,
14208,
13,
4023,
1330,
449,
1559,
31077,
198,
6738,
300,
907,
13,
27530,
1330,
11787,
198,
6738,
1334,
62,
30604,
1330,
3722,
198,
11748,
474,
46569,
198,
6738,
598,
13,
33692,
1330,
10729,
26087,
62,
20373,
198,
6738,
4818... | 3.620968 | 124 |
# Copyright (c) 2010 - 2020, Nordic Semiconductor ASA
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Nordic Semiconductor ASA nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import binascii
import subprocess
import shlex
import sys
import serial
import time
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Please provide the serial number of your device")
print_usage()
exit(1)
if len(sys.argv) < 3:
print("Please provide the COM port of your device")
print_usage()
exit(1)
try:
int(sys.argv[1])
except:
print("Invalid serial number " + sys.argv[1])
print_usage()
exit(1)
bootloader_addr = read_uicr(sys.argv[1])
read_device_page(sys.argv[1])
reset_device(sys.argv[1], sys.argv[2])
echo(sys.argv[2])
print("\nBootloader verification OK.")
| [
2,
15069,
357,
66,
8,
3050,
532,
12131,
11,
35834,
311,
5314,
40990,
49599,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
198,
2,
17613,
11,
389,
10431,
... | 2.993412 | 759 |
from ms_deisotope._c.spectrum_graph import (
PathFinder,
MassWrapper,
PeakGroupNode,
PeakNode,
NodeBase,
Path,
SpectrumGraph)
amino_acids = [
MassWrapper('G', 57.02146372057),
MassWrapper('A', 71.03711378471),
MassWrapper('S', 87.03202840427),
MassWrapper('P', 97.05276384884999),
MassWrapper('V', 99.06841391299),
MassWrapper('T', 101.04767846841),
MassWrapper('C', 103.00918478471),
MassWrapper('J', 113.08406397713),
MassWrapper('N', 114.04292744114),
MassWrapper('D', 115.02694302383),
MassWrapper('Q', 128.05857750528),
MassWrapper('K', 128.094963014),
MassWrapper('E', 129.04259308797),
MassWrapper('M', 131.04048491299),
MassWrapper('H', 137.05891185845),
MassWrapper('F', 147.06841391299),
MassWrapper('R', 156.1011110236),
MassWrapper('Y', 163.06332853255),
MassWrapper('W', 186.07931294986),
]
| [
6738,
13845,
62,
2934,
271,
313,
3008,
13557,
66,
13,
4443,
6582,
62,
34960,
1330,
357,
198,
220,
220,
220,
10644,
37,
5540,
11,
198,
220,
220,
220,
5674,
36918,
2848,
11,
198,
220,
220,
220,
23974,
13247,
19667,
11,
198,
220,
220,
... | 2.174641 | 418 |
#
# $Id$
#
# configuration file for benchmarking platform
#
#
# Copyright (c) 2013, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# hardcoded global variables
list_num_query_mols = [5, 10, 20]
num_reps = 50 # number of repetitions
percent_dcy = 0.2 # percentage of decoys used for training
p_value = 0.05 # confidence level for statistical analysis
# collection of data sets
muv_ids = [
466,
548,
600,
644,
652,
689,
692,
712,
713,
733,
737,
810,
832,
846,
852,
858,
859,
]
# muv_ids = [859]
dud_ids = [
"ace",
"ache",
"ar",
"cdk2",
"cox2",
"dhfr",
"egfr",
"er_agonist",
"fgfr1",
"fxa",
"gpb",
"gr",
"hivrt",
"inha",
"na",
"p38",
"parp",
"pdgfrb",
"sahh",
"src",
"vegfr2",
]
# dud_ids = ["vegfr2"]
chembl_ids = [
11359,
28,
11536,
8,
10434,
12670,
20014,
234,
12261,
12209,
25,
36,
43,
219,
130,
105,
11336,
20174,
126,
11225,
12252,
11682,
134,
116,
11265,
10475,
12679,
10579,
11575,
18061,
237,
276,
11534,
10198,
10498,
12911,
12968,
100579,
100126,
10378,
10417,
10752,
10773,
11631,
10927,
11085,
11442,
11279,
11488,
12840,
]
# chembl_ids = [12840]
set_data = {}
set_data["MUV"] = dict(
fullname="MUV",
ids=muv_ids,
prefix="aid",
suffix="_actives.sdf",
dcy_prefix="aid",
dcy_suffix="_decoys.sdf",
propName="PUBCHEM_COMPOUND_CID",
dcy_propName="PUBCHEM_COMPOUND_CID",
)
set_data["DUD"] = dict(
fullname="DUD",
ids=dud_ids,
prefix="",
suffix="_clustered_3D_MM.sdf",
dcy_prefix="DUD_",
dcy_suffix="_decoys_ID_pass_MWPass_I_MM.sdf",
propName="id",
dcy_propName="Mol_Title",
)
set_data["ChEMBL"] = dict(
fullname="ChEMBL-sereina/diverse_100",
ids=chembl_ids,
prefix="Target_no_",
suffix=".sdf",
dcy_name="decoys_10000_zinc.sdf",
propName="Name",
dcy_propName="_Name",
)
| [
2,
198,
2,
720,
7390,
3,
198,
2,
198,
2,
8398,
2393,
329,
18335,
278,
3859,
198,
2,
198,
2,
198,
2,
220,
15069,
357,
66,
8,
2211,
11,
5267,
433,
271,
33656,
329,
16024,
37158,
4992,
3457,
13,
198,
2,
220,
1439,
2489,
10395,
13... | 2.254103 | 1,645 |
import cv2
camera = cv2.VideoCapture(0)
camera.set(3, 1280)
while True:
ret, image = camera.read()
cv2.imshow('Webcam', image)
if cv2.waitKey(0) & 0xFF == ord('q'):
break
camera.release()
cv2.destroyAllWindows()
| [
11748,
269,
85,
17,
198,
198,
25695,
796,
269,
85,
17,
13,
10798,
49630,
7,
15,
8,
198,
198,
25695,
13,
2617,
7,
18,
11,
37674,
8,
198,
198,
4514,
6407,
25,
198,
220,
220,
220,
1005,
11,
2939,
796,
4676,
13,
961,
3419,
628,
22... | 2.203704 | 108 |
"""Calculate mean and std of data"""
import math
from typing import Dict
import torch
from tqdm import tqdm
import ml.utils
from ml.experiment import Experiment
def calculate_stats(exp: Experiment) -> Dict[str, float]:
"""Calculate mean and std of data"""
avg = ml.utils.RunningAverage()
sq_avg = ml.utils.RunningAverage()
proc = exp.preprocessor
if proc is None:
with tqdm(total=len(exp.dls["train"]), desc="calculating stats",
ncols=100) as tqbar:
for data, _ in exp.dls["train"]:
with torch.no_grad():
data = data.to(exp.device)
output = proc(data)
avg.update(output.mean().item())
sq_avg.update((output**2).mean().item())
tqbar.set_postfix(mean=f"{avg():05.3f}")
tqbar.update()
mean = avg()
return {"mean": mean, "std": math.sqrt(sq_avg() - mean**2)}
| [
37811,
9771,
3129,
378,
1612,
290,
14367,
286,
1366,
37811,
198,
198,
11748,
10688,
198,
6738,
19720,
1330,
360,
713,
198,
198,
11748,
28034,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
11748,
25962,
13,
26791,
198,
6738,... | 2.235294 | 408 |
import os
import sys
import re
if __name__ == "__main__":
data = None
wdir = os.path.dirname(sys.argv[0])
with open(os.path.join(wdir, "input.txt")) as f:
data = f.readlines()
passports = 0
pp = {}
for d in data:
d = d.strip("\n")
if not d:
if validate(pp):
passports += 1
pp = {}
continue
for i in d.split(" "):
tmp = i.split(":")
pp[tmp[0]] = tmp[1]
if validate(pp):
passports += 1
print("valid passports: %d"%passports)
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
302,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1366,
796,
6045,
198,
220,
220,
220,
266,
15908,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
17597,... | 1.916667 | 300 |
import datetime
from collections import OrderedDict
from app import app, db
from flask.ext.login import current_user
from config import ALLOWED_EXTENSIONS
from rauth import OAuth2Service
import json
import urllib2
from flask import request, redirect, url_for, render_template, g, jsonify, abort, flash
from flask.ext.login import login_user
from models import User, Post, ExifStats
from forms import CommentForm
from .emails import follower_notification
from .form_processor import UploadFormProcessor, LoginFormProcessor, PhotosFormProcessor,\
SignupFormProcessor, UpdateFormProcessor, CommentFormProcessor
| [
11748,
4818,
8079,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
598,
1330,
598,
11,
20613,
198,
6738,
42903,
13,
2302,
13,
38235,
1330,
1459,
62,
7220,
198,
6738,
4566,
1330,
11096,
3913,
1961,
62,
13918,
16938,
11053,
198,
... | 3.702381 | 168 |
from .value import _Value, _ValueInstance, _raise_bad_value_error
from .helper import Empty
| [
6738,
764,
8367,
1330,
4808,
11395,
11,
4808,
11395,
33384,
11,
4808,
40225,
62,
14774,
62,
8367,
62,
18224,
198,
6738,
764,
2978,
525,
1330,
33523,
628,
198
] | 3.357143 | 28 |
import csv
import numpy as np
if __name__ == '__main__':
main()
print("End") | [
11748,
269,
21370,
198,
11748,
299,
32152,
355,
45941,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198,
220,
220,
220,
3601,
7203,
12915,
4943
] | 2.416667 | 36 |