hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
18130c6e5fbe0f38293aa354b84d8f09216fa4df
| 280
|
py
|
Python
|
life/app/management/commands/execute_jobs.py
|
coronasafe/life_backend
|
00eacfdc5cd544dc136fb306340fb0d56afa78ff
|
[
"MIT"
] | 1
|
2021-05-15T20:40:00.000Z
|
2021-05-15T20:40:00.000Z
|
life/app/management/commands/execute_jobs.py
|
coronasafe/life_backend
|
00eacfdc5cd544dc136fb306340fb0d56afa78ff
|
[
"MIT"
] | null | null | null |
life/app/management/commands/execute_jobs.py
|
coronasafe/life_backend
|
00eacfdc5cd544dc136fb306340fb0d56afa78ff
|
[
"MIT"
] | 2
|
2021-05-08T01:45:42.000Z
|
2021-05-17T02:00:10.000Z
|
from django.core.management.base import BaseCommand
from life.app.tasks.job_executor import run_jobs
class Command(BaseCommand):
"""
Management command to Force run Jobs.
"""
help = "Force run jobs"
def handle(self, *args, **options):
run_jobs()
| 17.5
| 51
| 0.675
|
bfab9d15326e4255af3e175001e1bba778183579
| 6,838
|
py
|
Python
|
salt/fileserver/gitfs.py
|
l2ol33rt/salt
|
ff68bbd9f4bda992a3e039822fb32f141e94347c
|
[
"Apache-2.0"
] | 1
|
2021-04-05T19:46:35.000Z
|
2021-04-05T19:46:35.000Z
|
salt/fileserver/gitfs.py
|
dv-trading/salt
|
f5d4334178c50d0dfcd205d5a7fb9cfb27fd369e
|
[
"Apache-2.0"
] | null | null | null |
salt/fileserver/gitfs.py
|
dv-trading/salt
|
f5d4334178c50d0dfcd205d5a7fb9cfb27fd369e
|
[
"Apache-2.0"
] | 4
|
2020-11-04T06:28:05.000Z
|
2022-02-09T10:54:49.000Z
|
# -*- coding: utf-8 -*-
'''
Git Fileserver Backend
With this backend, branches and tags in a remote git repository are exposed to
salt as different environments.
To enable, add ``git`` to the :conf_master:`fileserver_backend` option in the
Master config file.
.. code-block:: yaml
fileserver_backend:
- git
The Git fileserver backend supports both pygit2_ and GitPython_, to provide the
Python interface to git. If both are present, the order of preference for which
one will be chosen is the same as the order in which they were listed: pygit2,
then GitPython.
An optional master config parameter (:conf_master:`gitfs_provider`) can be used
to specify which provider should be used, in the event that compatible versions
of both pygit2_ and GitPython_ are installed.
More detailed information on how to use GitFS can be found in the :ref:`GitFS
Walkthrough <tutorial-gitfs>`.
.. note:: Minimum requirements
To use pygit2_ for GitFS requires a minimum pygit2_ version of 0.20.3.
pygit2_ 0.20.3 requires libgit2_ 0.20.0. pygit2_ and libgit2_ are developed
alongside one another, so it is recommended to keep them both at the same
major release to avoid unexpected behavior. For example, pygit2_ 0.21.x
requires libgit2_ 0.21.x, pygit2_ 0.22.x will require libgit2_ 0.22.x, etc.
To use GitPython_ for GitFS requires a minimum GitPython version of 0.3.0,
as well as the git CLI utility. Instructions for installing GitPython can
be found :ref:`here <gitfs-dependencies>`.
To clear stale refs the git CLI utility must also be installed.
.. _pygit2: https://github.com/libgit2/pygit2
.. _libgit2: https://libgit2.github.com/
.. _GitPython: https://github.com/gitpython-developers/GitPython
'''
# Import python libs
from __future__ import absolute_import
import logging
PER_REMOTE_OVERRIDES = ('base', 'mountpoint', 'root', 'ssl_verify',
'env_whitelist', 'env_blacklist', 'refspecs')
PER_REMOTE_ONLY = ('name', 'saltenv')
# Auth support (auth params can be global or per-remote, too)
AUTH_PROVIDERS = ('pygit2',)
AUTH_PARAMS = ('user', 'password', 'pubkey', 'privkey', 'passphrase',
'insecure_auth')
# Import salt libs
import salt.utils.gitfs
from salt.exceptions import FileserverConfigError
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'git'
def __virtual__():
'''
Only load if the desired provider module is present and gitfs is enabled
properly in the master config file.
'''
if __virtualname__ not in __opts__['fileserver_backend']:
return False
try:
salt.utils.gitfs.GitFS(__opts__)
# Initialization of the GitFS object did not fail, so we know we have
# valid configuration syntax and that a valid provider was detected.
return __virtualname__
except FileserverConfigError:
pass
return False
def clear_cache():
'''
Completely clear gitfs cache
'''
gitfs = salt.utils.gitfs.GitFS(__opts__)
return gitfs.clear_cache()
def clear_lock(remote=None, lock_type='update'):
'''
Clear update.lk
'''
gitfs = salt.utils.gitfs.GitFS(__opts__)
gitfs.init_remotes(__opts__['gitfs_remotes'],
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
return gitfs.clear_lock(remote=remote, lock_type=lock_type)
def lock(remote=None):
'''
Place an update.lk
``remote`` can either be a dictionary containing repo configuration
information, or a pattern. If the latter, then remotes for which the URL
matches the pattern will be locked.
'''
gitfs = salt.utils.gitfs.GitFS(__opts__)
gitfs.init_remotes(__opts__['gitfs_remotes'],
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
return gitfs.lock(remote=remote)
def update():
'''
Execute a git fetch on all of the repos
'''
gitfs = salt.utils.gitfs.GitFS(__opts__)
gitfs.init_remotes(__opts__['gitfs_remotes'],
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
gitfs.update()
def envs(ignore_cache=False):
'''
Return a list of refs that can be used as environments
'''
gitfs = salt.utils.gitfs.GitFS(__opts__)
gitfs.init_remotes(__opts__['gitfs_remotes'],
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
return gitfs.envs(ignore_cache=ignore_cache)
def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
'''
Find the first file to match the path and ref, read the file out of git
and send the path to the newly cached file
'''
gitfs = salt.utils.gitfs.GitFS(__opts__)
gitfs.init_remotes(__opts__['gitfs_remotes'],
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
return gitfs.find_file(path, tgt_env=tgt_env, **kwargs)
def init():
'''
Initialize remotes. This is only used by the master's pre-flight checks,
and is not invoked by GitFS.
'''
gitfs = salt.utils.gitfs.GitFS(__opts__)
gitfs.init_remotes(__opts__['gitfs_remotes'],
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
def serve_file(load, fnd):
'''
Return a chunk from a file based on the data received
'''
gitfs = salt.utils.gitfs.GitFS(__opts__)
gitfs.init_remotes(__opts__['gitfs_remotes'],
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
return gitfs.serve_file(load, fnd)
def file_hash(load, fnd):
'''
Return a file hash, the hash type is set in the master config file
'''
gitfs = salt.utils.gitfs.GitFS(__opts__)
gitfs.init_remotes(__opts__['gitfs_remotes'],
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
return gitfs.file_hash(load, fnd)
def file_list(load):
'''
Return a list of all files on the file server in a specified
environment (specified as a key within the load dict).
'''
gitfs = salt.utils.gitfs.GitFS(__opts__)
gitfs.init_remotes(__opts__['gitfs_remotes'],
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
return gitfs.file_list(load)
def file_list_emptydirs(load): # pylint: disable=W0613
'''
Return a list of all empty directories on the master
'''
# Cannot have empty dirs in git
return []
def dir_list(load):
'''
Return a list of all directories on the master
'''
gitfs = salt.utils.gitfs.GitFS(__opts__)
gitfs.init_remotes(__opts__['gitfs_remotes'],
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
return gitfs.dir_list(load)
def symlink_list(load):
'''
Return a dict of all symlinks based on a given path in the repo
'''
gitfs = salt.utils.gitfs.GitFS(__opts__)
gitfs.init_remotes(__opts__['gitfs_remotes'],
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
return gitfs.symlink_list(load)
| 31.511521
| 79
| 0.690553
|
3383e372111159f070699710fd4ef89e145d60c1
| 27,920
|
py
|
Python
|
great_expectations/cli/suite.py
|
OmriBromberg/great_expectations
|
60eb81ebfb08fef5d37d55c316dc962928beb165
|
[
"Apache-2.0"
] | 1
|
2021-11-09T05:07:43.000Z
|
2021-11-09T05:07:43.000Z
|
great_expectations/cli/suite.py
|
OmriBromberg/great_expectations
|
60eb81ebfb08fef5d37d55c316dc962928beb165
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/cli/suite.py
|
OmriBromberg/great_expectations
|
60eb81ebfb08fef5d37d55c316dc962928beb165
|
[
"Apache-2.0"
] | null | null | null |
import copy
import os
import sys
from typing import Any, Dict, List, Optional, Union
import click
from great_expectations import DataContext
from great_expectations import exceptions as ge_exceptions
from great_expectations.cli import toolkit
# noinspection PyPep8Naming
from great_expectations.cli.mark import Mark as mark
from great_expectations.cli.pretty_printing import cli_message, cli_message_list
from great_expectations.core import ExpectationSuite
from great_expectations.core.batch import BatchRequest
from great_expectations.core.usage_statistics.usage_statistics import (
edit_expectation_suite_usage_statistics,
)
from great_expectations.render.renderer.v3.suite_edit_notebook_renderer import (
SuiteEditNotebookRenderer,
)
from great_expectations.render.renderer.v3.suite_profile_notebook_renderer import (
SuiteProfileNotebookRenderer,
)
try:
from sqlalchemy.exc import SQLAlchemyError
except ImportError:
# We'll redefine this error in code below to catch ProfilerError, which is caught above, so SA errors will
# just fall through
SQLAlchemyError = ge_exceptions.ProfilerError
@click.group()
@click.pass_context
def suite(ctx):
"""Expectation Suite operations"""
directory: str = toolkit.parse_cli_config_file_location(
config_file_location=ctx.obj.config_file_location
).get("directory")
context: DataContext = toolkit.load_data_context_with_error_handling(
directory=directory,
from_cli_upgrade_command=False,
)
# TODO consider moving this all the way up in to the CLIState constructor
ctx.obj.data_context = context
usage_stats_prefix = f"cli.suite.{ctx.invoked_subcommand}"
toolkit.send_usage_message(
data_context=context,
event=f"{usage_stats_prefix}.begin",
success=True,
)
ctx.obj.usage_event_end = f"{usage_stats_prefix}.end"
@suite.command(name="new")
@click.option(
"--expectation-suite",
"-e",
default=None,
help="Expectation suite name.",
)
@click.option(
"--interactive",
"-i",
"interactive_flag",
is_flag=True,
default=False,
help="""Use a batch of data to create expectations against (interactive mode).
""",
)
@click.option(
"--manual",
"-m",
"manual_flag",
is_flag=True,
default=False,
help="""Do not use a batch of data to create expectations against (manual mode).
""",
)
@click.option(
"--profile",
"-p",
is_flag=True,
default=False,
help="""Generate a starting expectation suite automatically so you can refine it further. Assumes --interactive
flag.
""",
)
@click.option(
"--batch-request",
"-br",
help="""Arguments to be provided to get_batch when loading the data asset. Must be a path to a valid JSON file.
Assumes --interactive flag.
""",
default=None,
)
@click.option(
"--no-jupyter",
"-nj",
is_flag=True,
default=False,
help="By default launch jupyter notebooks, unless you specify --no-jupyter flag.",
)
@click.pass_context
def suite_new(
ctx,
expectation_suite,
interactive_flag,
manual_flag,
profile,
batch_request,
no_jupyter,
):
"""
Create a new Expectation Suite.
Edit in jupyter notebooks, or skip with the --no-jupyter flag.
"""
context: DataContext = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
processed_flags: Dict[str, Optional[bool]] = _process_suite_new_flags_and_prompt(
context=context,
usage_event_end=usage_event_end,
interactive_flag=interactive_flag,
manual_flag=manual_flag,
profile=profile,
batch_request=batch_request,
)
_suite_new_workflow(
context=context,
expectation_suite_name=expectation_suite,
interactive=processed_flags["interactive"],
profile=processed_flags["profile"],
no_jupyter=no_jupyter,
usage_event=usage_event_end,
batch_request=batch_request,
)
def _process_suite_new_flags_and_prompt(
context: DataContext,
usage_event_end: str,
interactive_flag: bool,
manual_flag: bool,
profile: bool,
batch_request: Optional[str] = None,
) -> Dict[str, Optional[bool]]:
"""
Process various optional suite new flags and prompt if there is not enough information from the flags.
Args:
context: Data Context for use in sending error messages if any
usage_event_end: event name for ending usage stats message
interactive_flag: --interactive from the `suite new` CLI command
manual_flag: --manual from the `suite new` CLI command
profile: --profile from the `suite new` CLI command
batch_request: --batch-request from the `suite new` CLI command
Returns:
Dictionary with keys of processed parameters and boolean values e.g.
{"interactive": True, "profile": False}
"""
error_message: Optional[str] = None
# Convert interactive / no-interactive flags to interactive
interactive: Optional[bool] = None
if interactive_flag is True and manual_flag is True:
error_message = """Please choose either --interactive or --manual, you may not choose both."""
elif interactive_flag is False and manual_flag is False:
interactive = None
elif interactive_flag is True and manual_flag is False:
interactive = True
elif interactive_flag is False and manual_flag is True:
interactive = False
if error_message is not None:
cli_message(string=f"<red>{error_message}</red>")
toolkit.send_usage_message(
data_context=context, event=usage_event_end, success=False
)
sys.exit(1)
user_provided_any_flag_skip_prompt: bool = any(
((interactive is not None), (profile is True), (batch_request is not None))
)
# Note - explicit check for boolean or None for `interactive: Optional[bool]` is necessary because None indicates
# that a user did not supply either flag.
if user_provided_any_flag_skip_prompt:
# Assume batch needed if user passes --profile
if profile and interactive is None:
cli_message(
"<green>Entering interactive mode since you passed the --profile flag</green>"
)
interactive = True
elif profile and interactive is False:
cli_message(
"<yellow>Warning: Ignoring the --manual flag and entering interactive mode since you passed the --profile flag</yellow>"
)
interactive = True
# Assume batch needed if user passes --batch-request
elif (batch_request is not None) and (interactive is None):
cli_message(
"<green>Entering interactive mode since you passed the --batch-request flag</green>"
)
interactive = True
elif (batch_request is not None) and (interactive is False):
cli_message(
"<yellow>Warning: Ignoring the --manual flag and entering interactive mode since you passed the --batch-request flag</yellow>"
)
interactive = True
else:
suite_create_method: str = click.prompt(
"""
How would you like to create your Expectation Suite?
1. Manually, without interacting with a sample batch of data (default)
2. Interactively, with a sample batch of data
3. Automatically, using a profiler
""",
type=click.Choice(["1", "2", "3"]),
show_choices=False,
default="1",
show_default=False,
)
# Default option
if suite_create_method == "":
interactive = False
profile = False
elif suite_create_method == "1":
interactive = False
profile = False
elif suite_create_method == "2":
interactive = True
profile = False
elif suite_create_method == "3":
interactive = True
profile = True
return {"interactive": interactive, "profile": profile}
def _suite_new_workflow(
context: DataContext,
expectation_suite_name: str,
interactive: bool,
profile: bool,
no_jupyter: bool,
usage_event: str,
batch_request: Optional[
Union[str, Dict[str, Union[str, int, Dict[str, Any]]]]
] = None,
):
try:
datasource_name: Optional[str] = None
data_asset_name: Optional[str] = None
additional_batch_request_args: Optional[
Dict[str, Union[str, int, Dict[str, Any]]]
] = {"limit": 1000}
if interactive:
if batch_request is not None and isinstance(batch_request, str):
batch_request = toolkit.get_batch_request_from_json_file(
batch_request_json_file_path=batch_request,
data_context=context,
usage_event=usage_event,
suppress_usage_message=False,
)
if not batch_request:
batch_request = toolkit.get_batch_request_using_datasource_name(
data_context=context,
datasource_name=datasource_name,
usage_event=usage_event,
suppress_usage_message=False,
additional_batch_request_args=additional_batch_request_args,
)
# In this case, we have "consumed" the additional_batch_request_args
additional_batch_request_args = {}
data_asset_name = batch_request.get("data_asset_name")
else:
batch_request = None
# noinspection PyShadowingNames
suite: ExpectationSuite = toolkit.get_or_create_expectation_suite(
expectation_suite_name=expectation_suite_name,
data_context=context,
data_asset_name=data_asset_name,
usage_event=usage_event,
suppress_usage_message=False,
batch_request=batch_request,
create_if_not_exist=True,
)
expectation_suite_name = suite.expectation_suite_name
toolkit.add_citation_with_batch_request(
data_context=context,
expectation_suite=suite,
batch_request=batch_request,
)
toolkit.send_usage_message(
data_context=context, event=usage_event, success=True
)
if batch_request:
datasource_name = batch_request.get("datasource_name")
# This usage event is suppressed via suppress_usage_message but here because usage_event is not optional
usage_event = "cli.suite.edit.begin" # or else we will be sending `cli.suite.new` which is incorrect
# do not want to actually send usage_message, since the function call is not the result of actual usage
_suite_edit_workflow(
context=context,
expectation_suite_name=expectation_suite_name,
profile=profile,
usage_event=usage_event,
interactive=interactive,
no_jupyter=no_jupyter,
create_if_not_exist=True,
datasource_name=datasource_name,
batch_request=batch_request,
additional_batch_request_args=additional_batch_request_args,
suppress_usage_message=True,
assume_yes=False,
)
except (
ge_exceptions.DataContextError,
ge_exceptions.ProfilerError,
ValueError,
OSError,
SQLAlchemyError,
) as e:
cli_message(string=f"<red>{e}</red>")
toolkit.send_usage_message(
data_context=context, event=usage_event, success=False
)
sys.exit(1)
except Exception as e:
toolkit.send_usage_message(
data_context=context, event=usage_event, success=False
)
raise e
@suite.command(name="edit")
@click.argument("expectation_suite")
@click.option(
"--interactive",
"-i",
"interactive_flag",
is_flag=True,
default=False,
help="""Allows to specify explicitly whether or not a batch of data is available to reason about using the language
of expectations; otherwise, best effort is made to determine this automatically (falling back to False). Assumed with
--datasource-name option and with --batch-request option.
""",
)
@click.option(
"--manual",
"-m",
"manual_flag",
is_flag=True,
default=False,
help="""Do not use a batch of data to create expectations against(manual mode).
""",
)
@click.option(
"--datasource-name",
"-ds",
default=None,
help="""The name of the datasource. Assumes --interactive flag. Incompatible with --batch-request option.
""",
)
@click.option(
"--batch-request",
"-br",
help="""Arguments to be provided to get_batch when loading the data asset. Must be a path to a valid JSON file.
Assumes --interactive flag. Incompatible with --datasource-name option.
""",
default=None,
)
@click.option(
"--no-jupyter",
"-nj",
is_flag=True,
default=False,
help="By default launch jupyter notebooks, unless you specify --no-jupyter flag.",
)
@click.pass_context
def suite_edit(
ctx,
expectation_suite,
interactive_flag,
manual_flag,
datasource_name,
batch_request,
no_jupyter,
):
"""
Edit an existing Expectation Suite.
The SUITE argument is required. This is the name you gave to the suite
when you created it.
The edit command will help you specify a batch interactively. Or you can
specify them manually by providing --batch-request in valid JSON format.
Read more about specifying batches of data in the documentation: https://docs.greatexpectations.io/
"""
context: DataContext = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
interactive: bool = _process_suite_edit_flags_and_prompt(
context=context,
usage_event_end=usage_event_end,
interactive_flag=interactive_flag,
manual_flag=manual_flag,
datasource_name=datasource_name,
batch_request=batch_request,
)
additional_batch_request_args: Optional[
Dict[str, Union[str, int, Dict[str, Any]]]
] = {"limit": 1000}
_suite_edit_workflow(
context=context,
expectation_suite_name=expectation_suite,
profile=False,
usage_event=usage_event_end,
interactive=interactive,
no_jupyter=no_jupyter,
create_if_not_exist=False,
datasource_name=datasource_name,
batch_request=batch_request,
additional_batch_request_args=additional_batch_request_args,
suppress_usage_message=False,
assume_yes=False,
)
def _process_suite_edit_flags_and_prompt(
context: DataContext,
usage_event_end: str,
interactive_flag: bool,
manual_flag: bool,
datasource_name: Optional[str] = None,
batch_request: Optional[str] = None,
) -> bool:
"""
Process various optional suite edit flags and prompt if there is not enough information from the flags.
Args:
context: Data Context for use in sending error messages if any
usage_event_end: event name for ending usage stats message
interactive_flag: --interactive from the `suite new` CLI command
manual_flag: --manual from the `suite new` CLI command
datasource_name: --datasource-name from the `suite new` CLI command
batch_request: --batch-request from the `suite new` CLI command
Returns:
boolean of whether to enter interactive mode
"""
error_message: Optional[str] = None
# Convert interactive / no-interactive flags to interactive
interactive: Optional[bool] = None
if interactive_flag is True and manual_flag is True:
error_message = """Please choose either --interactive or --manual, you may not choose both."""
elif interactive_flag is False and manual_flag is False:
interactive = None
elif interactive_flag is True and manual_flag is False:
interactive = True
elif interactive_flag is False and manual_flag is True:
interactive = False
if (datasource_name is not None) and (batch_request is not None):
error_message = """Only one of --datasource-name DATASOURCE_NAME and --batch-request <path to JSON file> \
options can be used.
"""
if error_message is not None:
cli_message(string=f"<red>{error_message}</red>")
toolkit.send_usage_message(
data_context=context, event=usage_event_end, success=False
)
sys.exit(1)
user_provided_any_flag_skip_prompt: bool = any(
(
(interactive is not None),
(datasource_name is not None),
(batch_request is not None),
)
)
# Note - explicit check for boolean or None for `interactive: Optional[bool]` is necessary because None indicates
# that a user did not supply either flag.
if user_provided_any_flag_skip_prompt:
if datasource_name is not None:
if interactive is None:
cli_message(
"<green>Entering interactive mode since you passed the --datasource-name flag</green>"
)
elif interactive is False:
cli_message(
"<yellow>Warning: Ignoring the --manual flag and entering interactive mode since you passed the --datasource-name flag</yellow>"
)
interactive = True
elif batch_request is not None:
if interactive is None:
cli_message(
"<green>Entering interactive mode since you passed the --batch-request flag</green>"
)
elif interactive is False:
cli_message(
"<yellow>Warning: Ignoring the --manual flag and entering interactive mode since you passed the --batch-request flag</yellow>"
)
interactive = True
else:
suite_edit_method: str = click.prompt(
"""
How would you like to edit your Expectation Suite?
1. Manually, without interacting with a sample batch of data (default)
2. Interactively, with a sample batch of data
""",
type=click.Choice(["1", "2"]),
show_choices=False,
default="1",
show_default=False,
)
# Default option
if suite_edit_method == "":
interactive = False
if suite_edit_method == "1":
interactive = False
elif suite_edit_method == "2":
interactive = True
return interactive
def _suite_edit_workflow(
context: DataContext,
expectation_suite_name: str,
profile: bool,
usage_event: str,
interactive: bool,
no_jupyter: bool,
create_if_not_exist: Optional[bool] = False,
datasource_name: Optional[str] = None,
batch_request: Optional[
Union[str, Dict[str, Union[str, int, Dict[str, Any]]]]
] = None,
additional_batch_request_args: Optional[
Dict[str, Union[str, int, Dict[str, Any]]]
] = None,
suppress_usage_message: Optional[bool] = False,
assume_yes: Optional[bool] = False,
):
# suppress_usage_message flag is for the situation where _suite_edit_workflow is called by _suite_new_workflow().
# when called by _suite_new_workflow(), the flag will be set to True, otherwise it will default to False
if suppress_usage_message:
usage_event = None
# noinspection PyShadowingNames
suite: ExpectationSuite = toolkit.load_expectation_suite(
data_context=context,
expectation_suite_name=expectation_suite_name,
usage_event=usage_event,
create_if_not_exist=create_if_not_exist,
)
try:
if interactive or profile:
batch_request_from_citation_is_up_to_date: bool = True
batch_request_from_citation: Optional[
Union[str, Dict[str, Union[str, Dict[str, Any]]]]
] = toolkit.get_batch_request_from_citations(expectation_suite=suite)
if batch_request is not None and isinstance(batch_request, str):
batch_request = toolkit.get_batch_request_from_json_file(
batch_request_json_file_path=batch_request,
data_context=context,
usage_event=usage_event,
suppress_usage_message=suppress_usage_message,
)
if batch_request != batch_request_from_citation:
batch_request_from_citation_is_up_to_date = False
if not (
batch_request
and isinstance(batch_request, dict)
and BatchRequest(**batch_request)
):
if (
batch_request_from_citation
and isinstance(batch_request_from_citation, dict)
and BatchRequest(**batch_request_from_citation)
):
batch_request = copy.deepcopy(batch_request_from_citation)
else:
batch_request = toolkit.get_batch_request_using_datasource_name(
data_context=context,
datasource_name=datasource_name,
usage_event=usage_event,
suppress_usage_message=False,
additional_batch_request_args=additional_batch_request_args,
)
if batch_request != batch_request_from_citation:
batch_request_from_citation_is_up_to_date = False
if not batch_request_from_citation_is_up_to_date:
toolkit.add_citation_with_batch_request(
data_context=context,
expectation_suite=suite,
batch_request=batch_request,
)
notebook_name: str = f"edit_{expectation_suite_name}.ipynb"
notebook_path: str = _get_notebook_path(context, notebook_name)
if profile:
if not assume_yes:
toolkit.prompt_profile_to_create_a_suite(
data_context=context, expectation_suite_name=expectation_suite_name
)
renderer: SuiteProfileNotebookRenderer = SuiteProfileNotebookRenderer(
context=context,
expectation_suite_name=expectation_suite_name,
batch_request=batch_request,
)
renderer.render_to_disk(notebook_file_path=notebook_path)
else:
SuiteEditNotebookRenderer.from_data_context(
data_context=context
).render_to_disk(
suite=suite,
notebook_file_path=notebook_path,
batch_request=batch_request,
)
if no_jupyter:
cli_message(
string=f"To continue editing this suite, run <green>jupyter notebook {notebook_path}</green>"
)
else:
cli_message(
string="""<green>Opening a notebook for you now to edit your expectation suite!
If you wish to avoid this you can add the `--no-jupyter` flag.</green>\n\n"""
)
payload: dict = edit_expectation_suite_usage_statistics(
data_context=context, expectation_suite_name=suite.expectation_suite_name
)
if not suppress_usage_message:
toolkit.send_usage_message(
data_context=context,
event=usage_event,
event_payload=payload,
success=True,
)
if not no_jupyter:
toolkit.launch_jupyter_notebook(notebook_path=notebook_path)
except (
ge_exceptions.DataContextError,
ge_exceptions.ProfilerError,
ValueError,
OSError,
SQLAlchemyError,
) as e:
cli_message(string=f"<red>{e}</red>")
if not suppress_usage_message:
toolkit.send_usage_message(
data_context=context, event=usage_event, success=False
)
sys.exit(1)
except Exception as e:
if not suppress_usage_message:
toolkit.send_usage_message(
data_context=context, event=usage_event, success=False
)
raise e
@mark.cli_as_deprecation
@suite.command(name="demo")
@click.pass_context
def suite_demo(ctx):
"""This command is not supported in the v3 (Batch Request) API."""
context: DataContext = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
toolkit.send_usage_message(
data_context=context, event=usage_event_end, success=True
)
cli_message(
string="This command is not supported in the v3 (Batch Request) API. Please use `suite new` instead."
)
# noinspection PyShadowingNames
@suite.command(name="delete")
@click.argument("suite")
@click.pass_context
def suite_delete(ctx, suite):
"""
Delete an Expectation Suite from the Expectation Store.
"""
context: DataContext = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
try:
suite_names: List[str] = context.list_expectation_suite_names()
except Exception as e:
toolkit.send_usage_message(
data_context=context, event=usage_event_end, success=False
)
raise e
if not suite_names:
toolkit.exit_with_failure_message_and_stats(
data_context=context,
usage_event=usage_event_end,
suppress_usage_message=False,
message="<red>No expectation suites found in the project.</red>",
)
if suite not in suite_names:
toolkit.exit_with_failure_message_and_stats(
data_context=context,
usage_event=usage_event_end,
suppress_usage_message=False,
message=f"<red>No expectation suite named {suite} found.</red>",
)
if not (
ctx.obj.assume_yes
or toolkit.confirm_proceed_or_exit(
exit_on_no=False, data_context=context, usage_stats_event=usage_event_end
)
):
cli_message(string=f"Suite `{suite}` was not deleted.")
sys.exit(0)
context.delete_expectation_suite(suite)
cli_message(string=f"Deleted the expectation suite named: {suite}")
toolkit.send_usage_message(
data_context=context, event=usage_event_end, success=True
)
@suite.command(name="list")
@click.pass_context
def suite_list(ctx):
"""List existing Expectation Suites."""
context: DataContext = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
try:
suite_names: List[str] = context.list_expectation_suite_names()
except Exception as e:
toolkit.send_usage_message(
data_context=context, event=usage_event_end, success=False
)
raise e
suite_names_styled: List[str] = [
f" - <cyan>{suite_name}</cyan>" for suite_name in suite_names
]
if len(suite_names_styled) == 0:
cli_message(string="No Expectation Suites found")
toolkit.send_usage_message(
data_context=context, event=usage_event_end, success=True
)
return
list_intro_string: str
if len(suite_names_styled) == 1:
list_intro_string = "1 Expectation Suite found:"
else:
list_intro_string = f"{len(suite_names_styled)} Expectation Suites found:"
cli_message_list(
string_list=suite_names_styled, list_intro_string=list_intro_string
)
toolkit.send_usage_message(
data_context=context, event=usage_event_end, success=True
)
def _get_notebook_path(context, notebook_name):
return os.path.abspath(
os.path.join(
context.root_directory, context.GE_EDIT_NOTEBOOK_DIR, notebook_name
)
)
| 34.726368
| 148
| 0.65111
|
26a9fe4b0c05d452143783cfe917eb9ed314c813
| 3,707
|
py
|
Python
|
utilities/waypoint_generator/waypoint_generator_obstacle_avoidance.py
|
Maritime-Robotics-Student-Society/sailing-robot
|
2c53d41b643ae199a4e51d97a91ded7f6cf813e3
|
[
"MIT"
] | 87
|
2016-02-04T08:44:00.000Z
|
2022-03-19T19:53:48.000Z
|
utilities/waypoint_generator/waypoint_generator_obstacle_avoidance.py
|
LaurieChen/sailing-robot
|
840fb10d18026ea0f2ea546691c9bf958b8842d3
|
[
"MIT"
] | 269
|
2016-01-29T08:19:59.000Z
|
2020-02-13T12:33:26.000Z
|
utilities/waypoint_generator/waypoint_generator_obstacle_avoidance.py
|
LaurieChen/sailing-robot
|
840fb10d18026ea0f2ea546691c9bf958b8842d3
|
[
"MIT"
] | 45
|
2016-02-11T22:59:53.000Z
|
2020-12-10T02:58:50.000Z
|
#!/usr/bin/python2
# READY FOR MIT
# Script to generate waypoints for the obstacle avoidance challenge
# man:
# waypoint_generator path/to/waypoint.yaml
from __future__ import print_function
import os
import sys
my_dir = os.path.dirname(__file__)
robot_src_dir = os.path.abspath(os.path.join(my_dir, '../../src/sailing_robot/src'))
sys.path.append(robot_src_dir)
import yaml
import numpy as np
from sailing_robot.navigation import Navigation
# See http://www.dmap.co.uk/utmworld.htm to find the right zone
UTM_ZONE = 30
# Load yaml file given in argument
input_file = sys.argv[1]
#with open(input_file, 'r') as f:
# yaml_data = yaml.safe_load(f)
output_file = input_file[:-5] + "_gen_obstacle.yaml"
margin = 10 # [m]
#wp1 = yaml_data['wp/table']['wp1']
#wp2 = yaml_data['wp/table']['wp2']
#wp3 = yaml_data['wp/table']['wp3']
#wp4 = yaml_data['wp/table']['wp4']
# course 1
wp1 = (50.82082570829222,-1.311508136900023)
wp2 = (50.820720237801176,-1.31363073133094)
wp4 = (50.820646434491884,-1.3114859407167054)
# course 2
wp1 = (50.82086875660889,-1.3136046333659215)
wp2 = (50.822136584453986,-1.3128785101937752)
wp4 = (50.820930082615554,-1.313871497810471)
nav = Navigation(utm_zone=UTM_ZONE)
wp1_utm = nav.latlon_to_utm(wp1[0], wp1[1])
wp2_utm = nav.latlon_to_utm(wp2[0], wp2[1])
#wp3_utm = nav.latlon_to_utm(wp3[0], wp3[1])
wp4_utm = nav.latlon_to_utm(wp4[0], wp4[1])
# Unit vector 12
v12 = np.array([wp2_utm[0] - wp1_utm[0], wp2_utm[1] - wp1_utm[1]])
d12 = np.linalg.norm(v12)
v12_unit = v12 / d12
# Unit vector 14
v14 = np.array([wp4_utm[0] - wp1_utm[0], wp4_utm[1] - wp1_utm[1]])
d14 = np.linalg.norm(v14)
v14_unit = v14 / d14
# Coordinates of waypoints (see scheme in the wiki
utm_wp = {}
utm_wp['A'] = wp1_utm -margin*v12_unit + 0.5*v14
utm_wp['B'] = wp1_utm + v12/6 + 0.5*v14
utm_wp['B1'] = wp1_utm + v12/6 - 1.5*v14
utm_wp['C'] = wp1_utm + v12/3 + 0.5*v14
utm_wp['C1'] = wp1_utm + v12/3 - 1.5*v14
utm_wp['D'] = wp1_utm + 0.5*v12 + 0.5*v14
utm_wp['D1'] = wp1_utm + 0.5*v12 - 1.5*v14
utm_wp['E'] = wp2_utm - v12/3 + 0.5*v14
utm_wp['E1'] = wp2_utm - v12/3 - 1.5*v14
utm_wp['F'] = wp2_utm - v12/6 + 0.5*v14
utm_wp['F1'] = wp2_utm - v12/6 - 1.5*v14
utm_wp['G'] = wp2_utm + margin*v12_unit + 0.5*v14
def to_wp(wp):
latlon = nav.utm_to_latlon(wp[0], wp[1])
return [float(latlon.lat), float(latlon.lon) ]
# Convert new waypoints to lat/long pairs
latlon_wp = {k: to_wp(v) for (k,v) in utm_wp.items()}
yaml_data = {}
yaml_data['wp/table'] = latlon_wp
# Task order
yaml_data['wp/tasks'] = [{'kind': 'to_waypoint', 'waypoint': 'A'},
{'kind': 'to_waypoint', 'waypoint': 'B'},
{'kind': 'obstacle_waypoints', 'normal': 'C', 'obstacle': 'B1'},
{'kind': 'obstacle_waypoints', 'normal': 'D', 'obstacle': 'C1'},
{'kind': 'obstacle_waypoints', 'normal': 'E', 'obstacle': 'D1'},
{'kind': 'obstacle_waypoints', 'normal': 'F', 'obstacle': 'E1'},
{'kind': 'to_waypoint', 'waypoint': 'G'},
{'kind': 'to_waypoint', 'waypoint': 'F'},
{'kind': 'obstacle_waypoints', 'normal': 'E', 'obstacle': 'F1'},
{'kind': 'obstacle_waypoints', 'normal': 'D', 'obstacle': 'E1'},
{'kind': 'obstacle_waypoints', 'normal': 'C', 'obstacle': 'D1'},
{'kind': 'obstacle_waypoints', 'normal': 'B', 'obstacle': 'C1'},
{'kind': 'to_waypoint', 'waypoint': 'A'},
]
with open(output_file, 'w') as f:
yaml.dump(yaml_data, f)
print(yaml.dump(yaml_data))
print()
print('Written to:', output_file)
| 32.80531
| 88
| 0.610467
|
d5b03b9e057b3b8f837ef63bc6179aa7419c6065
| 7,316
|
py
|
Python
|
communauto/__init__.py
|
hadrien/communauto
|
9f8e195deb38f233914e8c4703ad8dcc383bf3c0
|
[
"MIT"
] | null | null | null |
communauto/__init__.py
|
hadrien/communauto
|
9f8e195deb38f233914e8c4703ad8dcc383bf3c0
|
[
"MIT"
] | null | null | null |
communauto/__init__.py
|
hadrien/communauto
|
9f8e195deb38f233914e8c4703ad8dcc383bf3c0
|
[
"MIT"
] | null | null | null |
import csv
import io
import sys
import warnings
from collections import ChainMap
from datetime import datetime
from decimal import Decimal
from enum import Enum
from importlib.resources import read_text
from itertools import chain
from typing import Dict, Iterable, List
import pytz
import typer
import httpx
import structlog
from lark import Lark, Transformer
from pydantic import BaseModel, root_validator
from PyPDF3 import PdfFileReader
from tabulate import tabulate
structlog.configure(logger_factory=lambda: structlog.PrintLogger(file=sys.stderr))
warnings.filterwarnings("ignore")
log = structlog.get_logger()
grammar = read_text("communauto", "grammar.lark")
parser = Lark(grammar, start="invoice")
class City(int, Enum):
Montreal: int = 59
class Line(BaseModel):
days: int
hours: Decimal
time_price: Decimal
km: int
km_price: Decimal
total_cost: Decimal
fare: str
start_date: datetime
end_date: datetime
def __repr__(self):
return f"Line({self.km}km, {self.total_cost}$)"
@root_validator
def check_dates(cls, values):
if values["start_date"] > values["end_date"]:
d = values["start_date"]
values["start_date"] = datetime(
d.year - 1, d.month, d.day, d.hour, d.minute, tzinfo=d.tzinfo
)
return values
class Rate(str, Enum):
eco_extra = "Économique Extra"
eco_plus = "Économique Plus"
eco = "Économique"
def total_cost(self, nb_invoices: int):
mapping = {
Rate.eco_extra: Decimal(nb_invoices * 30),
Rate.eco_plus: Decimal(nb_invoices * 12.5),
Rate.eco: Decimal(nb_invoices / 12 * 40),
}
return mapping[self]
class Estimated(Line):
estimates: Dict[Rate, Decimal]
@staticmethod
def csv_fieldnames():
return list(Line.schema()["properties"].keys()) + list(Rate)
def csv_dict(self):
result = self.dict(exclude={"estimates"})
result.update({rate.value: self.estimates[rate] for rate in list(Rate)})
return result
def estimate(
invoices: List[typer.FileBinaryRead],
output: typer.FileTextWrite = typer.Option(
"-", help="Where to output result in csv format. Default to stdout."
),
):
lines = chain(*(extract_lines(invoice) for invoice in invoices))
# only estimate the lines in invoice that actually were charged.
lines = filter(lambda line: line.total_cost > Decimal(0), lines)
with httpx.Client() as client:
estimated_list = [estimate_line(client, line) for line in lines]
result = {
Rate.eco_extra: Decimal(0),
Rate.eco_plus: Decimal(0),
Rate.eco: Decimal(0),
}
writer = csv.DictWriter(output, Estimated.csv_fieldnames())
writer.writeheader()
for estimated in estimated_list:
writer.writerow(estimated.csv_dict())
for estimated in estimated_list:
for rate in result:
result[rate] += estimated.estimates[rate]
print(
tabulate([k.value, v + k.total_cost(len(invoices))] for k, v in result.items()),
file=sys.stderr,
)
def estimate_line(client: httpx.Client, line: Line) -> Estimated:
params = {
"CityId": City.Montreal.value,
"StartDate": line.start_date.isoformat(),
"EndDate": line.end_date.isoformat(),
"Distance": line.km,
"AcceptLanguage": "en",
"ExcludePromotion": "true",
}
# API returns an ordered list of estimates. Order from best plans to lower
res = client.get(
"https://restapifrontoffice.reservauto.net/api/v2/Billing/TripCostEstimate",
params=params,
)
if res.status_code != 200:
log.error("API failed", message=res.content)
raise Exception("API failed", res.content)
data = res.json()
# filter out flex estimates
estimates_data = list(
filter(
lambda e: e["serviceType"] == "StationBased",
data["tripPackageCostEstimateList"],
)
)[:3]
estimates = {
rate: estimate["totalCost"]
for estimate, rate in zip(
estimates_data, [Rate.eco_extra, Rate.eco_plus, Rate.eco]
)
}
log.debug("estimated", line=line, estimates=estimates)
return Estimated(estimates=estimates, **line.dict())
def extract_lines(invoice: io.BufferedReader) -> Iterable[Line]:
year_value = None
for info in extract_info(invoice):
try:
tree = parser.parse(info)
except Exception:
log.error("Failed", info=info)
raise
transformer = TreeToLines()
transformer.year_value = year_value
try:
lines = transformer.transform(tree)
except Exception:
log.error("Failed", info=info, tree=tree)
raise
log.debug("extracted", length=len(lines), lines=lines)
yield from lines
year_value = transformer.year_value
def extract_info(invoice: io.BufferedReader) -> Iterable[str]:
pdf = PdfFileReader(invoice)
for index, page in enumerate(pdf.pages):
text = page.extractText()
start_index = text.find("Période du")
stop_index0 = text.find("Total trajets :")
stop_index1 = text.find("Définition des abréviations")
stop_index = min(stop_index0, stop_index1) if stop_index1 != -1 else stop_index0
info = text[max(0, start_index) : stop_index]
no_info_on_page = (start_index, stop_index) == (-1, -1)
if no_info_on_page:
continue
yield info
class TreeToLines(Transformer):
def year(self, y):
(year,) = y
self.year_value = int(year)
return y
def integer(self, i):
(i,) = i
return i
def decimal(self, f):
if len(f) == 1:
(f,) = f
return Decimal(str(f))
elif len(f) == 2:
f0, f1 = f
return Decimal(f"{f0}.{f1}")
elif len(f) == 3:
f0, f1, f2 = f
return Decimal(f"{f0}{f1}.{f2}")
raise NotImplementedError()
def date(self, d):
day, month, hour, minute = d
return datetime(
self.year_value,
int(month),
int(day),
int(hour),
int(minute),
tzinfo=pytz.timezone("America/Toronto"),
)
def start_date(self, s):
(s,) = s
return {"start_date": s}
def end_date(self, s):
(s,) = s
return {"end_date": s}
def price(self, p):
(p,) = p
return p
def fare(self, f):
return {"fare": " ".join(str(t) for t in f)}
def line(self, line):
return Line(**ChainMap(*[item for item in line if isinstance(item, dict)]))
def invoice(self, v):
return [line for line in v if isinstance(line, Line)]
def to_dict(key, casting):
def factory(self, value):
(value,) = value
return {key: casting(value)}
return factory
days = to_dict("days", int)
hours = to_dict("hours", Decimal)
time_price = to_dict("time_price", Decimal)
km = to_dict("km", int)
km_price = to_dict("km_price", Decimal)
total = to_dict("total_cost", Decimal)
def main(): # pragma no cover
typer.run(estimate)
| 27.712121
| 88
| 0.609759
|
39cf587afcfdba7e73943517380a28f244f0ac99
| 4,720
|
py
|
Python
|
python/cuml/test/test_incremental_pca.py
|
Nicholas-7/cuml
|
324d4490dc5254e1188d1678e704622eb69678cb
|
[
"Apache-2.0"
] | 2,743
|
2018-10-11T17:28:58.000Z
|
2022-03-31T19:20:50.000Z
|
python/cuml/test/test_incremental_pca.py
|
Nicholas-7/cuml
|
324d4490dc5254e1188d1678e704622eb69678cb
|
[
"Apache-2.0"
] | 4,280
|
2018-10-11T22:29:57.000Z
|
2022-03-31T22:02:44.000Z
|
python/cuml/test/test_incremental_pca.py
|
Nicholas-7/cuml
|
324d4490dc5254e1188d1678e704622eb69678cb
|
[
"Apache-2.0"
] | 454
|
2018-10-11T17:40:56.000Z
|
2022-03-25T17:07:09.000Z
|
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import cupy as cp
import cupyx
from sklearn.decomposition import IncrementalPCA as skIPCA
from cuml.datasets import make_blobs
from cuml.decomposition import IncrementalPCA as cuIPCA
from cuml.decomposition.incremental_pca import _svd_flip
from cuml.test.utils import array_equal
from cuml.common.exceptions import NotFittedError
@pytest.mark.parametrize(
'nrows, ncols, n_components, sparse_input, density, sparse_format,'
' batch_size_divider, whiten', [
(500, 15, 2, True, 0.4, 'csr', 5, True),
(5000, 25, 12, False, 0.07, 'csc', 10, False),
(5000, 15, None, True, 0.4, 'csc', 5, False),
(500, 25, 2, False, 0.07, 'csr', 10, False),
(5000, 25, 12, False, 0.07, 'csr', 10, True),
(500, 2500, 9, False, 0.07, 'csr', 50, True),
(500, 250, 14, True, 0.07, 'csr', 1, True),
]
)
@pytest.mark.no_bad_cuml_array_check
def test_fit(nrows, ncols, n_components, sparse_input, density,
sparse_format, batch_size_divider, whiten):
if sparse_format == 'csc':
pytest.skip("cupyx.scipy.sparse.csc.csc_matrix does not support"
" indexing as of cupy 7.6.0")
if sparse_input:
X = cupyx.scipy.sparse.random(nrows, ncols, density=density,
random_state=10, format=sparse_format)
else:
X, _ = make_blobs(n_samples=nrows, n_features=ncols, random_state=10)
cu_ipca = cuIPCA(n_components=n_components, whiten=whiten,
batch_size=int(nrows / batch_size_divider))
cu_ipca.fit(X)
cu_t = cu_ipca.transform(X)
cu_inv = cu_ipca.inverse_transform(cu_t)
sk_ipca = skIPCA(n_components=n_components, whiten=whiten,
batch_size=int(nrows / batch_size_divider))
if sparse_input:
X = X.get()
else:
X = cp.asnumpy(X)
sk_ipca.fit(X)
sk_t = sk_ipca.transform(X)
sk_inv = sk_ipca.inverse_transform(sk_t)
assert array_equal(cu_inv, sk_inv,
5e-5, with_sign=True)
@pytest.mark.parametrize(
'nrows, ncols, n_components, density, batch_size_divider, whiten', [
(500, 15, 2, 0.07, 5, False),
(500, 15, 2, 0.07, 5, True),
(5000, 25, 12, 0.07, 10, False),
(5000, 15, 2, 0.4, 5, True),
(500, 25, 12, 0.4, 10, False),
(5000, 4, 2, 0.1, 100, False)
]
)
@pytest.mark.no_bad_cuml_array_check
def test_partial_fit(nrows, ncols, n_components, density,
batch_size_divider, whiten):
X, _ = make_blobs(n_samples=nrows, n_features=ncols, random_state=10)
cu_ipca = cuIPCA(n_components=n_components, whiten=whiten)
sample_size = int(nrows / batch_size_divider)
for i in range(0, nrows, sample_size):
cu_ipca.partial_fit(X[i:i + sample_size].copy())
cu_t = cu_ipca.transform(X)
cu_inv = cu_ipca.inverse_transform(cu_t)
sk_ipca = skIPCA(n_components=n_components, whiten=whiten)
X = cp.asnumpy(X)
for i in range(0, nrows, sample_size):
sk_ipca.partial_fit(X[i:i + sample_size].copy())
sk_t = sk_ipca.transform(X)
sk_inv = sk_ipca.inverse_transform(sk_t)
assert array_equal(cu_inv, sk_inv,
5e-5, with_sign=True)
def test_exceptions():
X = cupyx.scipy.sparse.eye(10)
ipca = cuIPCA()
with pytest.raises(TypeError):
ipca.partial_fit(X)
X = X.toarray()
with pytest.raises(NotFittedError):
ipca.transform(X)
with pytest.raises(NotFittedError):
ipca.inverse_transform(X)
with pytest.raises(ValueError):
cuIPCA(n_components=8).fit(X[:5])
with pytest.raises(ValueError):
cuIPCA(n_components=8).fit(X[:, :5])
def test_svd_flip():
x = cp.array(range(-10, 80)).reshape((9, 10))
u, s, v = cp.linalg.svd(x, full_matrices=False)
u_true, v_true = _svd_flip(u, v, u_based_decision=True)
reco_true = cp.dot(u_true * s, v_true)
u_false, v_false = _svd_flip(u, v, u_based_decision=False)
reco_false = cp.dot(u_false * s, v_false)
assert array_equal(reco_true, x)
assert array_equal(reco_false, x)
| 32.328767
| 77
| 0.654237
|
e0c6b3bdf29fafcfc415526dab10f791ca52a4eb
| 10,037
|
py
|
Python
|
lib/galaxy/jobs/mapper.py
|
igorhollaender/sirv_dashboard
|
85aec60b80ef6f561d89398e3da5963d3d0f2aa4
|
[
"CC-BY-3.0"
] | 2
|
2018-10-14T16:42:39.000Z
|
2018-10-14T16:42:41.000Z
|
lib/galaxy/jobs/mapper.py
|
igorhollaender/OBSOLETE_sirv_dashboard
|
85aec60b80ef6f561d89398e3da5963d3d0f2aa4
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/jobs/mapper.py
|
igorhollaender/OBSOLETE_sirv_dashboard
|
85aec60b80ef6f561d89398e3da5963d3d0f2aa4
|
[
"CC-BY-3.0"
] | null | null | null |
import logging
import inspect
import os
import sys
import galaxy.jobs.rules
from galaxy.jobs import stock_rules
from .rule_helper import RuleHelper
log = logging.getLogger( __name__ )
DYNAMIC_RUNNER_NAME = "dynamic"
DYNAMIC_DESTINATION_ID = "dynamic_legacy_from_url"
ERROR_MESSAGE_NO_RULE_FUNCTION = "Galaxy misconfigured - cannot find dynamic rule function name for destination %s."
ERROR_MESSAGE_RULE_FUNCTION_NOT_FOUND = "Galaxy misconfigured - no rule function named %s found in dynamic rule modules."
class JobMappingException( Exception ):
def __init__( self, failure_message ):
self.failure_message = failure_message
class JobNotReadyException( Exception ):
def __init__( self, job_state=None, message=None ):
self.job_state = job_state
self.message = message
STOCK_RULES = dict(
choose_one=stock_rules.choose_one,
burst=stock_rules.burst,
docker_dispatch=stock_rules.docker_dispatch,
)
class JobRunnerMapper( object ):
"""
This class is responsible to managing the mapping of jobs
(in the form of job_wrappers) to job runner url strings.
"""
def __init__( self, job_wrapper, url_to_destination, job_config ):
self.job_wrapper = job_wrapper
self.url_to_destination = url_to_destination
self.job_config = job_config
self.rules_module = galaxy.jobs.rules
if job_config.dynamic_params is not None:
rules_module_name = job_config.dynamic_params['rules_module']
__import__(rules_module_name)
self.rules_module = sys.modules[rules_module_name]
def __get_rule_modules( self ):
unsorted_module_names = self.__get_rule_module_names( )
# Load modules in reverse order to allow hierarchical overrides
# i.e. 000_galaxy_rules.py, 100_site_rules.py, 200_instance_rules.py
module_names = sorted( unsorted_module_names, reverse=True )
modules = []
for rule_module_name in module_names:
try:
module = __import__( rule_module_name )
for comp in rule_module_name.split( "." )[1:]:
module = getattr( module, comp )
modules.append( module )
except BaseException, exception:
exception_str = str( exception )
message = "%s rule module could not be loaded: %s" % ( rule_module_name, exception_str )
log.debug( message )
continue
return modules
def __get_rule_module_names( self ):
rules_dir = self.rules_module.__path__[0]
names = []
for fname in os.listdir( rules_dir ):
if not( fname.startswith( "_" ) ) and fname.endswith( ".py" ):
base_name = self.rules_module.__name__
rule_module_name = "%s.%s" % (base_name, fname[:-len(".py")])
names.append( rule_module_name )
return names
def __invoke_expand_function( self, expand_function, destination_params ):
function_arg_names = inspect.getargspec( expand_function ).args
app = self.job_wrapper.app
possible_args = {
"job_id": self.job_wrapper.job_id,
"tool": self.job_wrapper.tool,
"tool_id": self.job_wrapper.tool.id,
"job_wrapper": self.job_wrapper,
"rule_helper": RuleHelper( app ),
"app": app
}
actual_args = {}
# Send through any job_conf.xml defined args to function
for destination_param in destination_params.keys():
if destination_param in function_arg_names:
actual_args[ destination_param ] = destination_params[ destination_param ]
# Populate needed args
for possible_arg_name in possible_args:
if possible_arg_name in function_arg_names:
actual_args[ possible_arg_name ] = possible_args[ possible_arg_name ]
# Don't hit the DB to load the job object if not needed
require_db = False
for param in ["job", "user", "user_email", "resource_params", "workflow_invocation_uuid"]:
if param in function_arg_names:
require_db = True
break
if require_db:
job = self.job_wrapper.get_job()
user = job.user
user_email = user and str(user.email)
if "job" in function_arg_names:
actual_args[ "job" ] = job
if "user" in function_arg_names:
actual_args[ "user" ] = user
if "user_email" in function_arg_names:
actual_args[ "user_email" ] = user_email
if "resource_params" in function_arg_names:
# Find the dymically inserted resource parameters and give them
# to rule.
param_values = self.__job_params( job )
resource_params = {}
try:
resource_params_raw = param_values[ "__job_resource" ]
if resource_params_raw[ "__job_resource__select" ].lower() in [ "1", "yes", "true" ]:
for key, value in resource_params_raw.iteritems():
resource_params[ key ] = value
except KeyError:
pass
actual_args[ "resource_params" ] = resource_params
if "workflow_invocation_uuid" in function_arg_names:
param_values = job.raw_param_dict( )
workflow_invocation_uuid = param_values.get( "__workflow_invocation_uuid__", None )
actual_args[ "workflow_invocation_uuid" ] = workflow_invocation_uuid
return expand_function( **actual_args )
def __job_params( self, job ):
app = self.job_wrapper.app
param_values = job.get_param_values( app, ignore_errors=True )
return param_values
def __convert_url_to_destination( self, url ):
"""
Job runner URLs are deprecated, but dynamic mapper functions may still
be returning them. Runners are expected to be able to convert these to
destinations.
This method calls
JobHandlerQueue.DefaultJobDispatcher.url_to_destination, which in turn
calls the url_to_destination method for the appropriate runner.
"""
dest = self.url_to_destination( url )
dest['id'] = DYNAMIC_DESTINATION_ID
return dest
def __determine_expand_function_name( self, destination ):
# default look for function with name matching an id of tool, unless one specified
expand_function_name = destination.params.get('function', None)
if not expand_function_name:
for tool_id in self.job_wrapper.tool.all_ids:
if self.__last_rule_module_with_function( tool_id ):
expand_function_name = tool_id
break
return expand_function_name
def __get_expand_function( self, expand_function_name ):
matching_rule_module = self.__last_rule_module_with_function( expand_function_name )
if matching_rule_module:
expand_function = getattr( matching_rule_module, expand_function_name )
return expand_function
else:
message = ERROR_MESSAGE_RULE_FUNCTION_NOT_FOUND % ( expand_function_name )
raise Exception( message )
def __last_rule_module_with_function( self, function_name ):
# self.rule_modules is sorted in reverse order, so find first
# wiht function
for rule_module in self.__get_rule_modules( ):
if hasattr( rule_module, function_name ):
return rule_module
return None
def __handle_dynamic_job_destination( self, destination ):
expand_type = destination.params.get('type', "python")
expand_function = None
if expand_type == "python":
expand_function_name = self.__determine_expand_function_name( destination )
if not expand_function_name:
message = ERROR_MESSAGE_NO_RULE_FUNCTION % destination
raise Exception( message )
expand_function = self.__get_expand_function( expand_function_name )
elif expand_type in STOCK_RULES:
expand_function = STOCK_RULES[ expand_type ]
else:
raise Exception( "Unhandled dynamic job runner type specified - %s" % expand_type )
return self.__handle_rule( expand_function, destination )
def __handle_rule( self, rule_function, destination ):
job_destination = self.__invoke_expand_function( rule_function, destination.params )
if not isinstance(job_destination, galaxy.jobs.JobDestination):
job_destination_rep = str(job_destination) # Should be either id or url
if '://' in job_destination_rep:
job_destination = self.__convert_url_to_destination(job_destination_rep)
else:
job_destination = self.job_config.get_destination(job_destination_rep)
return job_destination
def __cache_job_destination( self, params, raw_job_destination=None ):
if raw_job_destination is None:
raw_job_destination = self.job_wrapper.tool.get_job_destination( params )
if raw_job_destination.runner == DYNAMIC_RUNNER_NAME:
job_destination = self.__handle_dynamic_job_destination( raw_job_destination )
else:
job_destination = raw_job_destination
self.cached_job_destination = job_destination
def get_job_destination( self, params ):
"""
Cache the job_destination to avoid recalculation.
"""
if not hasattr( self, 'cached_job_destination' ):
self.__cache_job_destination( params )
return self.cached_job_destination
def cache_job_destination( self, raw_job_destination ):
self.__cache_job_destination( None, raw_job_destination=raw_job_destination )
return self.cached_job_destination
| 41.135246
| 121
| 0.653582
|
7aee2b3df4ad049bfc6df65a7faae31dabc454c3
| 1,209
|
py
|
Python
|
onebarangay_psql/users/tests/test_forms.py
|
PrynsTag/oneBarangay-PostgreSQL
|
11d7b97b57603f4c88948905560a22a5314409ce
|
[
"Apache-2.0"
] | null | null | null |
onebarangay_psql/users/tests/test_forms.py
|
PrynsTag/oneBarangay-PostgreSQL
|
11d7b97b57603f4c88948905560a22a5314409ce
|
[
"Apache-2.0"
] | 43
|
2022-02-07T00:18:35.000Z
|
2022-03-21T04:42:48.000Z
|
onebarangay_psql/users/tests/test_forms.py
|
PrynsTag/oneBarangay-PostgreSQL
|
11d7b97b57603f4c88948905560a22a5314409ce
|
[
"Apache-2.0"
] | null | null | null |
"""Module for all Form Tests."""
import pytest
from django.utils.translation import gettext_lazy as _
from onebarangay_psql.users.forms import UserAdminCreationForm
from onebarangay_psql.users.models import User
pytestmark = pytest.mark.django_db
class TestUserAdminCreationForm:
"""Test class for all tests related to the UserAdminCreationForm."""
def test_username_validation_error_msg(self, user: User) -> None:
"""Tests UserAdminCreation Form's unique validator functions correctly by testing.
Steps:
1) A new user with an existing username cannot be added.
2) Only 1 error is raised by the UserCreation Form
3) The desired error message is raised
"""
# The user already exists,
# hence cannot be created.
form = UserAdminCreationForm(
{
"username": user.username,
"password1": user.password,
"password2": user.password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
assert form.errors["username"][0] == _("This username has already been taken.")
| 33.583333
| 90
| 0.647643
|
d2054715f11c47b8fc3bd73288fd13c0fd5e71e8
| 14,660
|
py
|
Python
|
tensorflow/contrib/distribute/python/minimize_loss_test.py
|
P-Hidringer/tensorflow
|
2008b941ca9cc27121c55a4f5b4fde0231d315d2
|
[
"Apache-2.0"
] | 1
|
2018-05-30T00:34:05.000Z
|
2018-05-30T00:34:05.000Z
|
tensorflow/contrib/distribute/python/minimize_loss_test.py
|
jwplayer/tensorflow
|
c8731009708d4694fc553562a267d75064fc5ab4
|
[
"Apache-2.0"
] | 1
|
2018-05-11T18:18:05.000Z
|
2018-05-11T18:18:05.000Z
|
tensorflow/contrib/distribute/python/minimize_loss_test.py
|
jwplayer/tensorflow
|
c8731009708d4694fc553562a267d75064fc5ab4
|
[
"Apache-2.0"
] | 1
|
2021-11-16T19:59:48.000Z
|
2021-11-16T19:59:48.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for running legacy optimizer code with DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python.single_loss_example import batchnorm_example
from tensorflow.contrib.distribute.python.single_loss_example import minimize_loss_example
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.ops.losses import losses_impl
class MinimizeLossStepTest(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.times(
combinations.distributions_and_v1_optimizers(),
combinations.combine(mode=["graph"], use_callable_loss=[True, False])
+ combinations.combine(mode=["eager"], use_callable_loss=[True]),
combinations.combine(is_tpu=[False])) + combinations.combine(
distribution=[combinations.tpu_strategy],
optimizer_fn=[
combinations.adam_optimizer_v1_fn,
# TODO(isaprykin): Make Adam v2 work with while_loops
# and TPUs.
],
mode=["graph"],
use_callable_loss=[False],
is_tpu=[True]))
def testTrainNetwork(self, distribution, optimizer_fn, use_callable_loss,
is_tpu):
with distribution.scope():
model_fn, dataset_fn, layer = minimize_loss_example(
optimizer_fn, use_bias=True, use_callable_loss=use_callable_loss)
# TODO(isaprykin): Eliminate `is_tpu`. Probably add a
# `DistributionStrategy.create_monitor` so that each DistributionStrategy
# could influence its training loop. That method would return an instance
# of Monitor. TPUMonitor would execute tpu.initialize_system() and
# tpu.shutdown_system().
iterator = distribution.distribute_dataset(
dataset_fn).make_one_shot_iterator()
def run_step():
return distribution.group(
distribution.call_for_each_tower(
model_fn, iterator.get_next(), run_concurrently=layer.built))
if not context.executing_eagerly():
with self.test_session() as sess:
if is_tpu:
sess.run(tpu.initialize_system())
run_step = sess.make_callable(run_step())
self.evaluate(variables_lib.global_variables_initializer())
weights, biases = [], []
for _ in range(10):
run_step()
weights.append(self.evaluate(distribution.fetch(layer.kernel)))
biases.append(self.evaluate(distribution.fetch(layer.bias)))
if is_tpu:
with self.test_session() as sess:
sess.run(tpu.shutdown_system())
error = abs(numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1)
is_not_increasing = all(y <= x for x, y in zip(error, error[1:]))
self.assertTrue(is_not_increasing)
@combinations.generate(
combinations.times(
combinations.distributions_and_v1_optimizers() +
combinations.distributions_and_v2_optimizers(),
combinations.combine(mode=["graph", "eager"], is_tpu=[False])) +
combinations.combine(
distribution=[combinations.tpu_strategy],
optimizer_fn=[
combinations.adam_optimizer_v1_fn,
combinations.gradient_descent_optimizer_v1_fn,
combinations.gradient_descent_optimizer_v2_fn,
],
mode=["graph"],
is_tpu=[True]))
def testOptimizerInsideModelFn(self, distribution, optimizer_fn, is_tpu):
created_variables = []
trainable_variables = []
def appending_creator(next_creator, *args, **kwargs):
v = next_creator(*args, **kwargs)
created_variables.append(v.name)
if "trainable" in kwargs and kwargs["trainable"]:
trainable_variables.append(v.name)
return v
# Creator scope needs to be set before it's used inside
# `distribution.scope`.
with variable_scope.variable_creator_scope(
appending_creator), distribution.scope():
model_fn, dataset_fn, layer = minimize_loss_example(
optimizer_fn,
use_bias=True,
use_callable_loss=True,
create_optimizer_inside_model_fn=True)
iterator = distribution.distribute_dataset(
dataset_fn).make_one_shot_iterator()
def run_step():
return distribution.group(
distribution.call_for_each_tower(
model_fn, iterator.get_next(), run_concurrently=layer.built))
if not context.executing_eagerly():
with self.test_session() as sess:
if is_tpu:
sess.run(tpu.initialize_system())
run_step = sess.make_callable(run_step())
self.evaluate(variables_lib.global_variables_initializer())
run_step()
if is_tpu:
with self.test_session() as sess:
sess.run(tpu.shutdown_system())
def get_expected_variables(optimizer_fn, num_parameter_devices):
variables_map = {
"GradientDescent": ["dense/kernel", "dense/bias"],
"Adam": [
"dense/kernel", "dense/bias", "beta1_power", "beta2_power",
"dense/kernel/Adam", "dense/kernel/Adam_1", "dense/bias/Adam",
"dense/bias/Adam_1"
]
}
variables = variables_map[optimizer_fn().get_name()]
variables.extend([
v + "/replica_{}".format(replica)
for v in variables
for replica in range(1, num_parameter_devices)
])
return set([v + ":0" for v in variables])
self.assertEqual(
get_expected_variables(optimizer_fn,
len(distribution.parameter_devices)),
set(created_variables))
@combinations.generate(
combinations.times(
combinations.combine(momentum=[0.8, 0.9, 0.99], renorm=[False, True]),
combinations.times(
combinations.distributions_and_v1_optimizers(),
combinations.combine(
mode=["graph", "eager"],
is_tpu=[False],
# TODO(isaprykin): Allow False here. Currently subsequent
# towers will re-execute UPDATE_OPS of previous towers.
update_ops_in_cross_tower_mode=[True])) +
combinations.combine(
distribution=[combinations.tpu_strategy_single_iteration],
optimizer_fn=[
combinations.gradient_descent_optimizer_v1_fn,
combinations.gradient_descent_optimizer_v2_fn
],
mode=["graph"],
is_tpu=[True],
update_ops_in_cross_tower_mode=[False])))
def testTrainNetworkWithBatchNorm(self, distribution, optimizer_fn, momentum,
renorm, is_tpu,
update_ops_in_cross_tower_mode):
"""Verifies that moving mean updates are reduced across towers."""
with distribution.scope():
num_towers = len(distribution.worker_devices)
model_fn, dataset_fn, batchnorm = batchnorm_example(
optimizer_fn,
batch_per_epoch=num_towers,
momentum=momentum,
renorm=renorm,
update_ops_in_tower_mode=not update_ops_in_cross_tower_mode)
# Disable prefetching since that makes the specific input on each device
# to be non deterministic, and this test relies on specific input being
# on each device.
if isinstance(distribution, mirrored_strategy.MirroredStrategy):
distribution._prefetch_on_device = False
iterator = distribution.distribute_dataset(
dataset_fn).make_one_shot_iterator()
def run_step():
fetches = distribution.unwrap(
distribution.call_for_each_tower(
model_fn, iterator.get_next(),
run_concurrently=batchnorm.built))
if update_ops_in_cross_tower_mode:
fetches += ops.get_collection(ops.GraphKeys.UPDATE_OPS)
return control_flow_ops.group(fetches)
if not context.executing_eagerly():
with self.test_session() as sess:
if is_tpu:
sess.run(tpu.initialize_system())
run_step = sess.make_callable(run_step())
self.evaluate(variables_lib.global_variables_initializer())
expected_moving_means = [0.] * 8
def averaged_batch_mean(i):
# Each batch has shape [16, 8] where the ith element in jth list is
# (8 * j + i + tower_id * 100). So the batch mean in each tower is
# (60 + i + tower_id * 100). So here comes its batch mean over all
# towers:
return 60. + i + (num_towers - 1.) / 2. * 100.
for _ in range(10):
run_step()
moving_means = self.evaluate(distribution.fetch(batchnorm.moving_mean))
# We make sure that the moving_mean is updated as if the sample mean is
# calculated over all towers.
for i, expected_moving_mean in enumerate(expected_moving_means):
expected_moving_means[i] -= ((
expected_moving_mean - averaged_batch_mean(i)) * (1.0 - momentum))
self.assertNear(expected_moving_means[i], moving_means[i], 0.0001)
if is_tpu:
with self.test_session() as sess:
sess.run(tpu.shutdown_system())
@combinations.generate(
combinations.times(
combinations.combine(
optimizer_fn=[
combinations.gradient_descent_optimizer_v1_fn,
combinations.gradient_descent_optimizer_v2_fn
],
loss_reduction=[
losses_impl.Reduction.SUM, losses_impl.Reduction.MEAN,
losses_impl.Reduction.SUM_OVER_BATCH_SIZE,
losses_impl.Reduction.SUM_OVER_NONZERO_WEIGHTS
]),
combinations.times(
combinations.combine(
distribution=[
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus
],
is_tpu=[False]),
combinations.combine(
mode=["graph"], use_callable_loss=[True, False]) +
combinations.combine(mode=["eager"], use_callable_loss=[True])) +
combinations.combine(
distribution=[combinations.tpu_strategy_single_iteration],
is_tpu=[True],
mode=["graph"],
use_callable_loss=[True, False])))
def testMeanVsSum(self, distribution, optimizer_fn, loss_reduction,
use_callable_loss, is_tpu):
with distribution.scope():
all_vars = []
def model_fn(x, y):
def loss_fn():
# Use fixed initialization to make the steps deterministic.
w = variable_scope.get_variable("w", initializer=[[2.]])
all_vars.append(w)
predict = math_ops.matmul(x, w)
return losses_impl.mean_squared_error(
y, predict, reduction=loss_reduction)
optimizer = optimizer_fn() # GradientDescent with 0.2 learning rate
if use_callable_loss:
return optimizer.minimize(loss_fn)
else:
return optimizer.minimize(loss_fn())
def dataset_fn():
features = dataset_ops.Dataset.from_tensors([[2.], [7.]])
labels = dataset_ops.Dataset.from_tensors([[6.], [21.]])
return dataset_ops.Dataset.zip((features, labels)).repeat()
iterator = distribution.distribute_dataset(
dataset_fn).make_one_shot_iterator()
def run_step():
return distribution.group(
distribution.call_for_each_tower(
model_fn, *iterator.get_next(), run_concurrently=False))
if not context.executing_eagerly():
with self.test_session() as sess:
if is_tpu:
sess.run(tpu.initialize_system())
run_step = sess.make_callable(run_step())
self.evaluate(variables_lib.global_variables_initializer())
run_step()
v = all_vars[0]
self.assertTrue(all([v is vi for vi in all_vars[1:]]))
weight = numpy.squeeze(self.evaluate(distribution.fetch(v)))
# Our model is:
# predict = x * w
# loss = (predict - y)^2
# dloss/dpredict = 2*(predict - y)
# dloss/dw = 2 * x^T @ (predict - y)
# For our batch size of 2, assuming sum loss reduction:
# x = [2, 7]
# y = [6, 21]
# w_initial = 2
# predict = [4, 14]
# predict - y = [-2, -7]
# dloss/dw = 2 <[2, 7], [-2, -7]> = - 2(4 + 49) = -106
# So unreplicated the update to w with lr=0.2 is -0.2 * -106 = 21.2
# with sum loss reduction, or 10.6 with mean.
if loss_reduction == losses_impl.Reduction.SUM:
# Note that the "distribution.num_towers" factor will go away once
# we split the input across towers, instead of pulling a complete
# batch of input per tower.
self.assertNear(weight, 2 + 21.2 * distribution.num_towers, 0.0001)
else:
# One of the mean loss reductions.
self.assertNear(weight, 2 + 10.6, 0.0001)
if is_tpu:
with self.test_session() as sess:
sess.run(tpu.shutdown_system())
if __name__ == "__main__":
test.main()
| 40.385675
| 90
| 0.637381
|
edb1920cdfa9bb7ccbb11aace1f04c7518c1e644
| 11,170
|
py
|
Python
|
dopamine/utils/example_viz_lib.py
|
Mehooz/DRDRL_object
|
3a3ffd75505a48eb1d7bdde463579cd9c1fa92bb
|
[
"Apache-2.0"
] | null | null | null |
dopamine/utils/example_viz_lib.py
|
Mehooz/DRDRL_object
|
3a3ffd75505a48eb1d7bdde463579cd9c1fa92bb
|
[
"Apache-2.0"
] | null | null | null |
dopamine/utils/example_viz_lib.py
|
Mehooz/DRDRL_object
|
3a3ffd75505a48eb1d7bdde463579cd9c1fa92bb
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library used by example_viz.py to generate visualizations.
This file illustrates the following:
- How to subclass an existing agent to add visualization functionality.
- For DQN we visualize the cumulative rewards and the Q-values for each
action (MyDQNAgent).
- For Rainbow we visualize the cumulative rewards and the Q-value
distributions for each action (MyRainbowAgent).
- How to subclass Runner to run in eval mode, lay out the different subplots,
generate the visualizations, and compile them into a video (MyRunner).
- The function `run()` is the main entrypoint for running everything.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from dopamine.agents.dqn import dqn_agent
from dopamine.agents.rainbow import rainbow_agent
from dopamine.discrete_domains import atari_lib
from dopamine.discrete_domains import iteration_statistics
from dopamine.discrete_domains import run_experiment
from dopamine.utils import agent_visualizer
from dopamine.utils import atari_plotter
from dopamine.utils import bar_plotter
from dopamine.utils import line_plotter
import gin
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim as contrib_slim
class MyDQNAgent(dqn_agent.DQNAgent):
"""Sample DQN agent to visualize Q-values and rewards."""
def __init__(self, sess, num_actions, summary_writer=None):
super(MyDQNAgent, self).__init__(sess, num_actions,
summary_writer=summary_writer)
self.q_values = [[] for _ in range(num_actions)]
self.rewards = []
def step(self, reward, observation):
self.rewards.append(reward)
return super(MyDQNAgent, self).step(reward, observation)
def _select_action(self):
action = super(MyDQNAgent, self)._select_action()
q_vals = self._sess.run(self._net_outputs.q_values,
{self.state_ph: self.state})[0]
for i in range(len(q_vals)):
self.q_values[i].append(q_vals[i])
return action
def reload_checkpoint(self, checkpoint_path, use_legacy_checkpoint=False):
if use_legacy_checkpoint:
variables_to_restore = atari_lib.maybe_transform_variable_names(
tf.all_variables(), legacy_checkpoint_load=True)
else:
global_vars = set([x.name for x in tf.global_variables()])
ckpt_vars = [
'{}:0'.format(name)
for name, _ in tf.train.list_variables(checkpoint_path)
]
include_vars = list(global_vars.intersection(set(ckpt_vars)))
variables_to_restore = contrib_slim.get_variables_to_restore(
include=include_vars)
if variables_to_restore:
reloader = tf.train.Saver(var_list=variables_to_restore)
reloader.restore(self._sess, checkpoint_path)
tf.logging.info('Done restoring from %s', checkpoint_path)
else:
tf.logging.info('Nothing to restore!')
def get_q_values(self):
return self.q_values
def get_rewards(self):
return [np.cumsum(self.rewards)]
class MyRainbowAgent(rainbow_agent.RainbowAgent):
"""Sample Rainbow agent to visualize Q-values and rewards."""
def __init__(self, sess, num_actions, summary_writer=None):
super(MyRainbowAgent, self).__init__(sess, num_actions,
summary_writer=summary_writer)
self.rewards = []
def step(self, reward, observation):
self.rewards.append(reward)
return super(MyRainbowAgent, self).step(reward, observation)
def reload_checkpoint(self, checkpoint_path, use_legacy_checkpoint=False):
if use_legacy_checkpoint:
variables_to_restore = atari_lib.maybe_transform_variable_names(
tf.all_variables(), legacy_checkpoint_load=True)
else:
global_vars = set([x.name for x in tf.global_variables()])
ckpt_vars = [
'{}:0'.format(name)
for name, _ in tf.train.list_variables(checkpoint_path)
]
include_vars = list(global_vars.intersection(set(ckpt_vars)))
variables_to_restore = contrib_slim.get_variables_to_restore(
include=include_vars)
if variables_to_restore:
reloader = tf.train.Saver(var_list=variables_to_restore)
reloader.restore(self._sess, checkpoint_path)
tf.logging.info('Done restoring from %s', checkpoint_path)
else:
tf.logging.info('Nothing to restore!')
def get_probabilities(self):
return self._sess.run(tf.squeeze(self._net_outputs.probabilities),
{self.state_ph: self.state})
def get_rewards(self):
return [np.cumsum(self.rewards)]
class MyRunner(run_experiment.Runner):
"""Sample Runner class to generate visualizations."""
def __init__(self, base_dir, trained_agent_ckpt_path, create_agent_fn,
use_legacy_checkpoint=False):
self._trained_agent_ckpt_path = trained_agent_ckpt_path
self._use_legacy_checkpoint = use_legacy_checkpoint
super(MyRunner, self).__init__(base_dir, create_agent_fn)
def _initialize_checkpointer_and_maybe_resume(self, checkpoint_file_prefix):
self._agent.reload_checkpoint(self._trained_agent_ckpt_path,
self._use_legacy_checkpoint)
self._start_iteration = 0
def _run_one_iteration(self, iteration):
statistics = iteration_statistics.IterationStatistics()
tf.logging.info('Starting iteration %d', iteration)
_, _ = self._run_eval_phase(statistics)
return statistics.data_lists
def visualize(self, record_path, num_global_steps=500):
if not tf.gfile.Exists(record_path):
tf.gfile.MakeDirs(record_path)
self._agent.eval_mode = True
# Set up the game playback rendering.
atari_params = {'environment': self._environment}
atari_plot = atari_plotter.AtariPlotter(parameter_dict=atari_params)
# Plot the rewards received next to it.
reward_params = {'x': atari_plot.parameters['width'],
'xlabel': 'Timestep',
'ylabel': 'Reward',
'title': 'Rewards',
'get_line_data_fn': self._agent.get_rewards}
reward_plot = line_plotter.LinePlotter(parameter_dict=reward_params)
action_names = [
'Action {}'.format(x) for x in range(self._agent.num_actions)]
# Plot Q-values (DQN) or Q-value distributions (Rainbow).
q_params = {'x': atari_plot.parameters['width'] // 2,
'y': atari_plot.parameters['height'],
'legend': action_names}
if 'DQN' in self._agent.__class__.__name__:
q_params['xlabel'] = 'Timestep'
q_params['ylabel'] = 'Q-Value'
q_params['title'] = 'Q-Values'
q_params['get_line_data_fn'] = self._agent.get_q_values
q_plot = line_plotter.LinePlotter(parameter_dict=q_params)
else:
q_params['xlabel'] = 'Return'
q_params['ylabel'] = 'Return probability'
q_params['title'] = 'Return distribution'
q_params['get_bar_data_fn'] = self._agent.get_probabilities
q_plot = bar_plotter.BarPlotter(parameter_dict=q_params)
screen_width = (
atari_plot.parameters['width'] + reward_plot.parameters['width'])
screen_height = (
atari_plot.parameters['height'] + q_plot.parameters['height'])
# Dimensions need to be divisible by 2:
if screen_width % 2 > 0:
screen_width += 1
if screen_height % 2 > 0:
screen_height += 1
visualizer = agent_visualizer.AgentVisualizer(
record_path=record_path, plotters=[atari_plot, reward_plot, q_plot],
screen_width=screen_width, screen_height=screen_height)
global_step = 0
while global_step < num_global_steps:
initial_observation = self._environment.reset()
action = self._agent.begin_episode(initial_observation)
while True:
observation, reward, is_terminal, _ = self._environment.step(action)
global_step += 1
visualizer.visualize()
if self._environment.game_over or global_step >= num_global_steps:
break
elif is_terminal:
self._agent.end_episode(reward)
action = self._agent.begin_episode(observation)
else:
action = self._agent.step(reward, observation)
self._end_episode(reward)
visualizer.generate_video()
def create_dqn_agent(sess, environment, summary_writer=None):
return MyDQNAgent(sess, num_actions=environment.action_space.n,
summary_writer=summary_writer)
def create_rainbow_agent(sess, environment, summary_writer=None):
return MyRainbowAgent(sess, num_actions=environment.action_space.n,
summary_writer=summary_writer)
def create_runner(base_dir, trained_agent_ckpt_path, agent='dqn',
use_legacy_checkpoint=False):
create_agent = create_dqn_agent if agent == 'dqn' else create_rainbow_agent
return MyRunner(base_dir, trained_agent_ckpt_path, create_agent,
use_legacy_checkpoint)
def run(agent, game, num_steps, root_dir, restore_ckpt, use_legacy_checkpoint):
"""Main entrypoint for running and generating visualizations.
Args:
agent: str, agent type to use.
game: str, Atari 2600 game to run.
num_steps: int, number of steps to play game.
root_dir: str, root directory where files will be stored.
restore_ckpt: str, path to the checkpoint to reload.
use_legacy_checkpoint: bool, whether to restore from a legacy (pre-Keras)
checkpoint.
"""
config = """
atari_lib.create_atari_environment.game_name = '{}'
WrappedReplayBuffer.replay_capacity = 300
""".format(game)
base_dir = os.path.join(root_dir, 'agent_viz', game, agent)
gin.parse_config(config)
runner = create_runner(base_dir, restore_ckpt, agent, use_legacy_checkpoint)
runner.visualize(os.path.join(base_dir, 'images'), num_global_steps=num_steps)
| 43.294574
| 84
| 0.66598
|
e891126d93d235f3ee04ecf6c2986031f24f498c
| 7,411
|
py
|
Python
|
daal4py/sklearn/ensemble/AdaBoostClassifier.py
|
amgrigoriev/daal4py
|
97fbe7a9181410dac348dc724178e8605492e3c4
|
[
"Apache-2.0"
] | null | null | null |
daal4py/sklearn/ensemble/AdaBoostClassifier.py
|
amgrigoriev/daal4py
|
97fbe7a9181410dac348dc724178e8605492e3c4
|
[
"Apache-2.0"
] | null | null | null |
daal4py/sklearn/ensemble/AdaBoostClassifier.py
|
amgrigoriev/daal4py
|
97fbe7a9181410dac348dc724178e8605492e3c4
|
[
"Apache-2.0"
] | null | null | null |
# *******************************************************************************
# Copyright 2014-2020 Intel Corporation
# All Rights Reserved.
#
# This software is licensed under the Apache License, Version 2.0 (the
# "License"), the following terms apply:
#
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
# *******************************************************************************
# daal4py AdaBoost (Adaptive Boosting) scikit-learn-compatible estimator class
import numpy as np
import numbers
import warnings
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn import preprocessing
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils import check_random_state
import daal4py as d4p
from .._utils import getFPType
from sklearn import __version__ as sklearn_version
from distutils.version import LooseVersion
class AdaBoostClassifier(BaseEstimator, ClassifierMixin):
def __init__(self,
split_criterion='gini',
max_tree_depth=1,
min_observations_in_leaf_node=1,
max_iterations=100,
learning_rate=1.0,
accuracy_threshold=0.01):
self.split_criterion = split_criterion
self.max_tree_depth = max_tree_depth
self.min_observations_in_leaf_node = min_observations_in_leaf_node
self.max_iterations = max_iterations
self.learning_rate = learning_rate
self.accuracy_threshold = accuracy_threshold
def fit(self, X, y):
if not self.split_criterion in ('gini', 'infoGain'):
raise ValueError('Parameter "split_criterion" must be '
'"gini" or "infoGain".')
if not ((isinstance(self.max_tree_depth, numbers.Integral))
and (self.max_tree_depth >= 0)):
raise ValueError('Parameter "max_tree_depth" must be '
'positive integer value or zero.')
if not ((isinstance(self.min_observations_in_leaf_node, numbers.Integral))
and (self.min_observations_in_leaf_node > 0)):
raise ValueError('Parameter "min_observations_in_leaf_node" must be '
'non-zero positive integer value.')
if not ((isinstance(self.max_iterations, numbers.Integral))
and (self.max_iterations > 0)):
raise ValueError('Parameter "max_iterations" must be '
'non-zero positive integer value.')
if not (self.learning_rate > 0):
raise ValueError('Parameter "learning_rate" must be '
'non-zero positive value.')
# it is not clear why it is so but we will get error from DAAL otherwise
if not ((self.accuracy_threshold >= 0)
and (self.accuracy_threshold < 1)):
raise ValueError('Parameter "accuracy_threshold" must be '
'more or equal to 0 and less than 1.')
# Check that X and y have correct shape
X, y = check_X_y(X, y, y_numeric=False, dtype=[np.single, np.double])
check_classification_targets(y)
# Encode labels
le = preprocessing.LabelEncoder()
le.fit(y)
self.classes_ = le.classes_
y_ = le.transform(y)
# Convert to 2d array
y_ = y_.reshape((-1, 1))
self.n_classes_ = len(self.classes_)
self.n_features_ = X.shape[1]
# Classifier can't train when only one class is present.
# Trivial case
if self.n_classes_ == 1:
return self
# Define type of data
fptype = getFPType(X)
# Fit the model
tr = d4p.decision_tree_classification_training(fptype=fptype,
nClasses=self.n_classes_,
maxTreeDepth=self.max_tree_depth + 1, # this parameter is strict upper bound in DAAL
minObservationsInLeafNodes=self.min_observations_in_leaf_node,
splitCriterion=self.split_criterion,
pruning='none'
)
pr = d4p.decision_tree_classification_prediction(fptype=fptype,
nClasses=self.n_classes_)
train_algo = d4p.adaboost_training(fptype=fptype,
nClasses=self.n_classes_,
weakLearnerTraining=tr,
weakLearnerPrediction=pr,
maxIterations=self.max_iterations,
learningRate=self.learning_rate,
accuracyThreshold=self.accuracy_threshold)
train_result = train_algo.compute(X, y_)
# Store the model
self.daal_model_ = train_result.model
# Return the classifier
return self
def predict(self, X):
# Check is fit had been called
if LooseVersion(sklearn_version) >= LooseVersion("0.22"):
check_is_fitted(self)
else:
check_is_fitted(self, ['n_features_', 'n_classes_'])
# Input validation
X = check_array(X, dtype=[np.single, np.double])
if X.shape[1] != self.n_features_:
raise ValueError('Shape of input is different from what was seen in `fit`')
# Trivial case
if self.n_classes_ == 1:
return np.full(X.shape[0], self.classes_[0])
if not hasattr(self, 'daal_model_'):
raise ValueError(("The class {} instance does not have 'daal_model_' attribute set. "
"Call 'fit' with appropriate arguments before using this method.").format(type(self).__name__))
# Define type of data
fptype = getFPType(X)
pr = d4p.decision_tree_classification_prediction(fptype=fptype,
nClasses=self.n_classes_)
# Prediction
predict_algo = d4p.adaboost_prediction(fptype=fptype,
nClasses=self.n_classes_,
weakLearnerPrediction=pr)
predict_result = predict_algo.compute(X, self.daal_model_)
prediction = predict_result.prediction
# in binary classification labels "-1, 1" are returned but "0, 1" are expected
if self.n_classes_ == 2:
prediction[prediction == -1] = 0
# Decode labels
le = preprocessing.LabelEncoder()
le.classes_ = self.classes_
return le.inverse_transform(prediction.ravel().astype(np.int64, copy=False))
| 43.087209
| 139
| 0.57833
|
641b42f8e8eb0f6494e98fc4dec8f894bd62c5bb
| 559
|
py
|
Python
|
web/migrations/0011_auto_20150914_1041.py
|
codeschule/koodikoulu-site
|
dde9932564f36dce6f4dbfd31e7923f1bae83293
|
[
"MIT"
] | 5
|
2015-09-16T10:50:53.000Z
|
2016-01-16T09:10:37.000Z
|
web/migrations/0011_auto_20150914_1041.py
|
codeschule/koodikoulu-site
|
dde9932564f36dce6f4dbfd31e7923f1bae83293
|
[
"MIT"
] | 10
|
2015-09-07T05:58:03.000Z
|
2019-02-15T10:36:48.000Z
|
web/migrations/0011_auto_20150914_1041.py
|
codeschule/koodikoulu-site
|
dde9932564f36dce6f4dbfd31e7923f1bae83293
|
[
"MIT"
] | 6
|
2015-09-06T19:42:46.000Z
|
2019-12-29T21:31:07.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('web', '0010_auto_20150908_1426'),
]
operations = [
migrations.AlterField(
model_name='event',
name='category',
field=models.CharField(max_length=30, default=('ENSIASKELEET', 'Koodikoulun ensiaskeleet'), choices=[('ENSIASKELEET', 'Koodikoulun ensiaskeleet'), ('ILTIS', 'Koodikoulun iltis'), ('OTHER', 'Muu')]),
),
]
| 27.95
| 210
| 0.631485
|
f9b82bc3799fd2e7d2dee853d432ed2c29e9eb34
| 14,282
|
bzl
|
Python
|
swift/internal/swift_protoc_gen_aspect.bzl
|
Chenyang1112/rules_swift.0.9.0
|
f796267edac24be4345c73032540c5be3fee59ad
|
[
"Apache-2.0"
] | 1
|
2020-03-15T08:23:28.000Z
|
2020-03-15T08:23:28.000Z
|
swift/internal/swift_protoc_gen_aspect.bzl
|
Chenyang1112/rules_swift.0.9.0
|
f796267edac24be4345c73032540c5be3fee59ad
|
[
"Apache-2.0"
] | null | null | null |
swift/internal/swift_protoc_gen_aspect.bzl
|
Chenyang1112/rules_swift.0.9.0
|
f796267edac24be4345c73032540c5be3fee59ad
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An aspect attached to `proto_library` targets to generate Swift artifacts."""
load("@bazel_skylib//lib:dicts.bzl", "dicts")
load(":api.bzl", "swift_common")
load(":features.bzl", "SWIFT_FEATURE_ENABLE_TESTING", "SWIFT_FEATURE_NO_GENERATED_HEADER")
load(
":proto_gen_utils.bzl",
"declare_generated_files",
"extract_generated_dir_path",
"register_module_mapping_write_action",
)
load(":providers.bzl", "SwiftInfo", "SwiftProtoInfo", "SwiftToolchainInfo")
load(":utils.bzl", "workspace_relative_path")
# The paths of well known type protos that should not be generated by the aspect
# because they are already included in the SwiftProtobuf runtime. The plugin
# provides the mapping from these protos to the SwiftProtobuf module for us.
# TODO(b/63389580): Once we migrate to proto_lang_toolchain, this information
# can go in the blacklisted_protos list instead.
_WELL_KNOWN_TYPE_PATHS = [
"google/protobuf/any.proto",
"google/protobuf/api.proto",
"google/protobuf/duration.proto",
"google/protobuf/empty.proto",
"google/protobuf/field_mask.proto",
"google/protobuf/source_context.proto",
"google/protobuf/struct.proto",
"google/protobuf/timestamp.proto",
"google/protobuf/type.proto",
"google/protobuf/wrappers.proto",
]
def _filter_out_well_known_types(srcs):
"""Returns the given list of files, excluding any well-known type protos.
Args:
srcs: A list of `.proto` files.
Returns:
The given list of files with any well-known type protos (those living under
the `google.protobuf` package) removed.
"""
return [
f
for f in srcs
if workspace_relative_path(f) not in _WELL_KNOWN_TYPE_PATHS
]
def _register_pbswift_generate_action(
label,
actions,
direct_srcs,
transitive_descriptor_sets,
module_mapping_file,
mkdir_and_run,
protoc_executable,
protoc_plugin_executable):
"""Registers the actions that generate `.pb.swift` files from `.proto` files.
Args:
label: The label of the target being analyzed.
actions: The context's actions object.
direct_srcs: The direct `.proto` sources belonging to the target being analyzed, which
will be passed to `protoc-gen-swift`.
transitive_descriptor_sets: The transitive `DescriptorSet`s from the `proto_library` being
analyzed.
module_mapping_file: The `File` containing the mapping between `.proto` files and Swift
modules for the transitive dependencies of the target being analyzed. May be `None`, in
which case no module mapping will be passed (the case for leaf nodes in the dependency
graph).
mkdir_and_run: The `File` representing the `mkdir_and_run` executable.
protoc_executable: The `File` representing the `protoc` executable.
protoc_plugin_executable: The `File` representing the `protoc` plugin executable.
Returns:
A list of generated `.pb.swift` files corresponding to the `.proto` sources.
"""
generated_files = declare_generated_files(label.name, actions, "pb", direct_srcs)
generated_dir_path = extract_generated_dir_path(label.name, "pb", generated_files)
mkdir_args = actions.args()
mkdir_args.add(generated_dir_path)
protoc_executable_args = actions.args()
protoc_executable_args.add(protoc_executable)
protoc_args = actions.args()
# protoc takes an arg of @NAME as something to read, and expects one
# arg per line in that file.
protoc_args.set_param_file_format("multiline")
protoc_args.use_param_file("@%s")
protoc_args.add(
protoc_plugin_executable,
format = "--plugin=protoc-gen-swift=%s",
)
protoc_args.add(generated_dir_path, format = "--swift_out=%s")
protoc_args.add("--swift_opt=FileNaming=FullPath")
protoc_args.add("--swift_opt=Visibility=Public")
if module_mapping_file:
protoc_args.add(
module_mapping_file,
format = "--swift_opt=ProtoPathModuleMappings=%s",
)
protoc_args.add("--descriptor_set_in")
protoc_args.add_joined(transitive_descriptor_sets, join_with = ":")
protoc_args.add_all([workspace_relative_path(f) for f in direct_srcs])
additional_command_inputs = []
if module_mapping_file:
additional_command_inputs.append(module_mapping_file)
# TODO(b/23975430): This should be a simple `actions.run_shell`, but until the
# cited bug is fixed, we have to use the wrapper script.
actions.run(
arguments = [mkdir_args, protoc_executable_args, protoc_args],
executable = mkdir_and_run,
inputs = depset(
direct = additional_command_inputs,
transitive = [transitive_descriptor_sets],
),
mnemonic = "ProtocGenSwift",
outputs = generated_files,
progress_message = "Generating Swift sources for {}".format(label),
tools = [
mkdir_and_run,
protoc_executable,
protoc_plugin_executable,
],
)
return generated_files
def _build_swift_proto_info_provider(
pbswift_files,
transitive_module_mappings,
deps):
"""Builds the `SwiftProtoInfo` provider to propagate for a proto library.
Args:
pbswift_files: The `.pb.swift` files that were generated for the propagating
target. This sequence should only contain the direct sources.
transitive_module_mappings: A sequence of `structs` with `module_name` and
`proto_file_paths` fields that denote the transitive mappings from
`.proto` files to Swift modules.
deps: The direct dependencies of the propagating target, from which the
transitive sources will be computed.
Returns:
An instance of `SwiftProtoInfo`.
"""
return SwiftProtoInfo(
module_mappings = transitive_module_mappings,
pbswift_files = depset(
direct = pbswift_files,
transitive = [dep[SwiftProtoInfo].pbswift_files for dep in deps],
),
)
def _build_module_mapping_from_srcs(target, proto_srcs):
"""Returns the sequence of module mapping `struct`s for the given sources.
Args:
target: The `proto_library` target whose module mapping is being rendered.
proto_srcs: The `.proto` files that belong to the target.
Returns:
A string containing the module mapping for the target in protobuf text
format.
"""
# TODO(allevato): The previous use of f.short_path here caused problems with
# cross-repo references; protoc-gen-swift only processes the file correctly if
# the workspace-relative path is used (which is the same as the short_path for
# same-repo references, so this issue had never been caught). However, this
# implies that if two repos have protos with the same workspace-relative
# paths, there will be a clash. Figure out what to do here; it may require an
# update to protoc-gen-swift?
return struct(
module_name = swift_common.derive_module_name(target.label),
proto_file_paths = [workspace_relative_path(f) for f in proto_srcs],
)
def _gather_transitive_module_mappings(targets):
"""Returns the set of transitive module mappings for the given targets.
This function eliminates duplicates among the targets so that if two or more
targets transitively depend on the same `proto_library`, the mapping is only
present in the sequence once.
Args:
targets: The targets whose module mappings should be returned.
Returns:
A sequence containing the transitive module mappings for the given targets,
without duplicates.
"""
unique_mappings = {}
for target in targets:
mappings = target[SwiftProtoInfo].module_mappings
for mapping in mappings:
module_name = mapping.module_name
if module_name not in unique_mappings:
unique_mappings[module_name] = mapping.proto_file_paths
return [struct(
module_name = module_name,
proto_file_paths = file_paths,
) for module_name, file_paths in unique_mappings.items()]
def _swift_protoc_gen_aspect_impl(target, aspect_ctx):
toolchain = aspect_ctx.attr._toolchain[SwiftToolchainInfo]
direct_srcs = _filter_out_well_known_types(target[ProtoInfo].direct_sources)
# Direct sources are passed as arguments to protoc to generate *only* the
# files in this target, but we need to pass the transitive sources as inputs
# to the generating action so that all the dependent files are available for
# protoc to parse.
# Instead of providing all those files and opening/reading them, we use
# protoc's support for reading descriptor sets to resolve things.
transitive_descriptor_sets = target[ProtoInfo].transitive_descriptor_sets
deps = [dep for dep in aspect_ctx.rule.attr.deps if SwiftProtoInfo in dep]
minimal_module_mappings = []
if direct_srcs:
minimal_module_mappings.append(
_build_module_mapping_from_srcs(target, direct_srcs),
)
if deps:
minimal_module_mappings.extend(_gather_transitive_module_mappings(deps))
transitive_module_mapping_file = register_module_mapping_write_action(
target,
aspect_ctx.actions,
minimal_module_mappings,
)
if direct_srcs:
# Generate the Swift sources from the .proto files.
pbswift_files = _register_pbswift_generate_action(
target.label,
aspect_ctx.actions,
direct_srcs,
transitive_descriptor_sets,
transitive_module_mapping_file,
aspect_ctx.executable._mkdir_and_run,
aspect_ctx.executable._protoc,
aspect_ctx.executable._protoc_gen_swift,
)
# Compile the generated Swift sources and produce a static library and a
# .swiftmodule as outputs. In addition to the other proto deps, we also pass
# support libraries like the SwiftProtobuf runtime as deps to the compile
# action.
compile_deps = deps + aspect_ctx.attr._proto_support
feature_configuration = swift_common.configure_features(
requested_features = aspect_ctx.features + [SWIFT_FEATURE_NO_GENERATED_HEADER],
swift_toolchain = toolchain,
unsupported_features = aspect_ctx.disabled_features + [SWIFT_FEATURE_ENABLE_TESTING],
)
compile_results = swift_common.compile_as_library(
actions = aspect_ctx.actions,
bin_dir = aspect_ctx.bin_dir,
label = target.label,
module_name = swift_common.derive_module_name(target.label),
srcs = pbswift_files,
toolchain = toolchain,
deps = compile_deps,
feature_configuration = feature_configuration,
genfiles_dir = aspect_ctx.genfiles_dir,
# Prevent conflicts with C++ protos in the same output directory, which
# use the `lib{name}.a` pattern. This will produce `lib{name}.swift.a`
# instead.
library_name = "{}.swift".format(target.label.name),
)
providers = compile_results.providers
else:
# If there are no srcs, merge the SwiftInfo providers and propagate them. Do
# likewise for apple_common.Objc providers if the toolchain supports
# Objective-C interop.
pbswift_files = []
providers = [swift_common.merge_swift_infos(
[dep[SwiftInfo] for dep in deps if SwiftInfo in dep],
)]
if toolchain.supports_objc_interop:
objc_providers = [
dep[apple_common.Objc]
for dep in deps
if apple_common.Objc in dep
]
objc_provider = apple_common.new_objc_provider(providers = objc_providers)
providers.append(objc_provider)
providers.append(_build_swift_proto_info_provider(
pbswift_files,
minimal_module_mappings,
deps,
))
return providers
swift_protoc_gen_aspect = aspect(
attr_aspects = ["deps"],
attrs = dicts.add(
swift_common.toolchain_attrs(),
{
"_mkdir_and_run": attr.label(
cfg = "host",
default = Label(
"@build_bazel_rules_swift//tools/mkdir_and_run",
),
executable = True,
),
# TODO(b/63389580): Migrate to proto_lang_toolchain.
"_proto_support": attr.label_list(
default = [
Label("@com_github_apple_swift_protobuf//:SwiftProtobuf"),
],
),
"_protoc": attr.label(
cfg = "host",
default = Label("@com_google_protobuf//:protoc"),
executable = True,
),
"_protoc_gen_swift": attr.label(
cfg = "host",
default = Label("@com_github_apple_swift_protobuf//:ProtoCompilerPlugin"),
executable = True,
),
},
),
doc = """
Generates Swift artifacts for a `proto_library` target.
For each `proto_library` (more specifically, any target that propagates a
`proto` provider) to which this aspect is applied, the aspect will register
actions that generate Swift artifacts and propagate them in a `SwiftProtoInfo`
provider.
Most users should not need to use this aspect directly; it is an implementation
detail of the `swift_proto_library` rule.
""",
implementation = _swift_protoc_gen_aspect_impl,
)
| 39.128767
| 99
| 0.680157
|
74a624ea7517acb9f3c0701df7584615d00e4b87
| 2,464
|
py
|
Python
|
manif.py
|
TheWitchers3/Flask_Server
|
b8a66e007cd2dd3fb2e045841b4460678546c304
|
[
"bzip2-1.0.6"
] | 3
|
2020-07-17T18:27:10.000Z
|
2020-10-13T03:11:02.000Z
|
manif.py
|
TheWitchers3/Flask_Server
|
b8a66e007cd2dd3fb2e045841b4460678546c304
|
[
"bzip2-1.0.6"
] | 1
|
2021-06-02T01:05:16.000Z
|
2021-06-02T01:05:16.000Z
|
manif.py
|
TheWitchers3/Flask_Server
|
b8a66e007cd2dd3fb2e045841b4460678546c304
|
[
"bzip2-1.0.6"
] | null | null | null |
import re
import tweepy
from tweepy import OAuthHandler
from textblob import TextBlob
def clean_tweet(tweet):
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t]) |(\w+:\/\/\S+)", " ", tweet).split())
def get_tweet_sentiment(tweet):
analysis = TextBlob(clean_tweet(tweet))
if analysis.sentiment.polarity > 0:
return 'positive'
elif analysis.sentiment.polarity == 0:
return 'neutral'
else:
return 'negative'
def get_tweets(query, count = "10",ck="",cs="",at="",ats=""):
#Enter your credentials
consumer_key = ck
consumer_secret = cs
access_token = at
access_token_secret =ats
try:
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth,wait_on_rate_limit=True)
except:
print("Error: Authentication Failed")
tweets = []
try:
fetched_tweets = api.search(q = query, count = count)
for tweet in fetched_tweets:
parsed_tweet = {}
if tweet.lang == "en":
parsed_tweet['text'] = clean_tweet(tweet.text)
parsed_tweet['sentiment'] = get_tweet_sentiment(tweet.text)
if tweet.retweet_count > 0:
if parsed_tweet not in tweets:
tweets.append(parsed_tweet)
else:
tweets.append(parsed_tweet)
return tweets
except tweepy.TweepError as e:
print("Error : " + str(e))
def getAnalysis(query,count,ck,cs,at,ats):
if(query=="" or count=="" or ck == "" or cs == "" or at=="" or ats == ""):
return None
ftweets = get_tweets(query,count,ck,cs,at,ats)
tweets=[t for t in ftweets if t]
ptweets = [tweet for tweet in tweets if tweet['sentiment'] == 'positive']
ptpercentage=100*len(ptweets)/len(tweets)
ntweets = [tweet for tweet in tweets if tweet['sentiment'] == 'negative']
ntpercentage = 100 * len(ntweets) / len(tweets)
neutpercentage = 100 * ((len(tweets) - len(ntweets) - len(ptweets)) / len(tweets))
analysis = {}
analysis['ptweets'] = ptweets
analysis['ntweets'] = ntweets
analysis['ptpercentage'] = ptpercentage
analysis['ntpercentage'] = ntpercentage
analysis['neutpercentage'] = neutpercentage
return analysis
if __name__ == "__main__":
ana = getAnalysis("modi","20",ck="",cs="",at="",ats="")
print(ana)
| 30.8
| 99
| 0.60836
|
40e5bd0c7d49821de61aa290e167439bf30de8e8
| 669
|
py
|
Python
|
app/core/management/commands/wait_for_db.py
|
j-dunham/recipe-app-api
|
120be6de09ac99517b0833280fe49a327d1c4833
|
[
"MIT"
] | 1
|
2019-04-18T17:31:07.000Z
|
2019-04-18T17:31:07.000Z
|
app/core/management/commands/wait_for_db.py
|
j-dunham/recipe-app-api
|
120be6de09ac99517b0833280fe49a327d1c4833
|
[
"MIT"
] | null | null | null |
app/core/management/commands/wait_for_db.py
|
j-dunham/recipe-app-api
|
120be6de09ac99517b0833280fe49a327d1c4833
|
[
"MIT"
] | null | null | null |
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Django command to pause execution until database is available"""
def handle(self, *args, **options):
self.stdout.write("Waiting for database...")
db_conn = None
while not db_conn:
try:
db_conn = connections["default"]
except OperationalError:
self.stdout.write("Database unavailable.. waiting 1 seconds...")
time.sleep(1)
self.stdout.write(self.style.SUCCESS("Database Ready!"))
| 31.857143
| 80
| 0.650224
|
0ab6cda6faf8376791f8d948f7c0e8a4ff115fb4
| 564
|
py
|
Python
|
adminapp/migrations/0007_alter_subscriber_user.py
|
mofresh27/MuseumExperience-Group2-Python-BE-1
|
d6ca7aceeddfcfdefdf112ab5e40cf74d6b472ce
|
[
"MIT"
] | null | null | null |
adminapp/migrations/0007_alter_subscriber_user.py
|
mofresh27/MuseumExperience-Group2-Python-BE-1
|
d6ca7aceeddfcfdefdf112ab5e40cf74d6b472ce
|
[
"MIT"
] | 1
|
2021-07-19T14:27:28.000Z
|
2021-07-19T14:27:28.000Z
|
adminapp/migrations/0007_alter_subscriber_user.py
|
mofresh27/MuseumExperience-Group2-Python-BE-1
|
d6ca7aceeddfcfdefdf112ab5e40cf74d6b472ce
|
[
"MIT"
] | 2
|
2021-07-14T21:56:46.000Z
|
2021-07-15T16:11:41.000Z
|
# Generated by Django 3.2.4 on 2021-07-08 03:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('adminapp', '0006_alter_subscriber_user'),
]
operations = [
migrations.AlterField(
model_name='subscriber',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='subscriber_id', to=settings.AUTH_USER_MODEL, unique=True),
),
]
| 26.857143
| 156
| 0.679078
|
f29bd888a63e41afe0f312b1541b6de4b6c59c5a
| 13,439
|
py
|
Python
|
test/unit/session/test_session.py
|
DaveSawyer/box-python-sdk
|
6a5134ff9bbbbc0478bb39d7f622fe3829048cbf
|
[
"Apache-2.0"
] | null | null | null |
test/unit/session/test_session.py
|
DaveSawyer/box-python-sdk
|
6a5134ff9bbbbc0478bb39d7f622fe3829048cbf
|
[
"Apache-2.0"
] | null | null | null |
test/unit/session/test_session.py
|
DaveSawyer/box-python-sdk
|
6a5134ff9bbbbc0478bb39d7f622fe3829048cbf
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
from functools import partial
from io import IOBase
from numbers import Number
from mock import MagicMock, Mock, PropertyMock, call, patch, ANY
import pytest
from boxsdk.auth.oauth2 import OAuth2
from boxsdk.config import API, Proxy
from boxsdk.exception import BoxAPIException, BoxException
from boxsdk.network.default_network import DefaultNetwork, DefaultNetworkResponse
from boxsdk.session.box_response import BoxResponse
from boxsdk.session.session import Session, Translator, AuthorizedSession
@pytest.fixture(scope='function', params=[False, True])
def translator(default_translator, request): # pylint:disable=unused-argument
if request.param:
return Translator(extend_default_translator=True, new_child=True)
return None
@pytest.fixture
def initial_access_token():
return 'fake_access_token'
@pytest.fixture
def mock_oauth(initial_access_token):
mock_oauth = MagicMock(OAuth2)
mock_oauth.access_token = initial_access_token
return mock_oauth
@pytest.fixture
def mock_network_layer():
return Mock(DefaultNetwork)
@pytest.fixture
def unauthorized_session(mock_network_layer, translator):
# pylint:disable=redefined-outer-name
return Session(network_layer=mock_network_layer, translator=translator)
@pytest.fixture
def box_session(mock_oauth, mock_network_layer, translator):
# pylint:disable=redefined-outer-name
return AuthorizedSession(oauth=mock_oauth, network_layer=mock_network_layer, translator=translator)
@pytest.mark.parametrize('test_method', [
Session.get,
Session.post,
Session.put,
Session.delete,
Session.options,
])
def test_box_session_handles_unauthorized_response(
test_method,
box_session,
mock_oauth,
mock_network_layer,
unauthorized_response,
generic_successful_response,
test_url,
):
# pylint:disable=redefined-outer-name
def get_access_token_from_auth_object():
return mock_oauth.access_token
mock_network_layer.request.side_effect = mock_responses = [unauthorized_response, generic_successful_response]
for mock_response in mock_responses:
type(mock_response).access_token_used = PropertyMock(side_effect=get_access_token_from_auth_object)
def refresh(access_token_used):
assert access_token_used == mock_oauth.access_token
mock_oauth.access_token = 'fake_new_access_token'
return (mock_oauth.access_token, None)
mock_oauth.refresh.side_effect = refresh
box_response = test_method(box_session, url=test_url)
assert box_response.status_code == 200
@pytest.mark.parametrize('test_method', [
Session.get,
Session.post,
Session.put,
Session.delete,
Session.options,
])
@pytest.mark.parametrize('initial_access_token', [None])
def test_box_session_gets_access_token_before_request(
test_method,
box_session,
mock_oauth,
mock_network_layer,
generic_successful_response,
test_url,
):
# pylint:disable=redefined-outer-name
def get_access_token_from_auth_object():
return mock_oauth.access_token
mock_network_layer.request.side_effect = mock_responses = [generic_successful_response]
for mock_response in mock_responses:
type(mock_response).access_token_used = PropertyMock(side_effect=get_access_token_from_auth_object)
def refresh(access_token_used):
assert access_token_used == mock_oauth.access_token
mock_oauth.access_token = 'fake_new_access_token'
return (mock_oauth.access_token, None)
mock_oauth.refresh.side_effect = refresh
box_response = test_method(box_session, url=test_url, auto_session_renewal=True)
assert box_response.status_code == 200
@pytest.mark.parametrize('test_method', [
Session.get,
Session.post,
Session.put,
Session.delete,
Session.options,
partial(Session.request, method='head'),
])
def test_box_session_retries_response_after_retry_after(
test_method,
box_session,
mock_network_layer,
retry_after_response,
generic_successful_response,
test_url,
):
# pylint:disable=redefined-outer-name
mock_network_layer.request.side_effect = [retry_after_response, generic_successful_response]
mock_network_layer.retry_after.side_effect = lambda delay, request, *args, **kwargs: request(*args, **kwargs)
with patch('random.uniform', return_value=0.68):
box_response = test_method(box_session, url=test_url)
assert box_response.status_code == 200
assert len(mock_network_layer.retry_after.call_args_list) == 1
assert isinstance(mock_network_layer.retry_after.call_args[0][0], Number)
assert round(mock_network_layer.retry_after.call_args[0][0], 4) == 1
@pytest.mark.parametrize('test_method', [
Session.get,
Session.post,
Session.put,
Session.delete,
Session.options,
partial(Session.request, method='head'),
])
def test_box_session_retries_request_after_server_error(
test_method,
box_session,
mock_network_layer,
server_error_response,
generic_successful_response,
test_url,
):
# pylint:disable=redefined-outer-name
mock_network_layer.request.side_effect = [server_error_response, server_error_response, generic_successful_response]
mock_network_layer.retry_after.side_effect = lambda delay, request, *args, **kwargs: request(*args, **kwargs)
with patch('random.uniform', return_value=0.68):
box_response = test_method(box_session, url=test_url)
assert box_response.status_code == 200
assert box_response.json() == generic_successful_response.json()
assert box_response.ok == generic_successful_response.ok
assert box_response.content == generic_successful_response.content
assert len(mock_network_layer.retry_after.call_args_list) == 2
assert isinstance(mock_network_layer.retry_after.call_args_list[0][0][0], Number)
assert isinstance(mock_network_layer.retry_after.call_args_list[1][0][0], Number)
assert round(mock_network_layer.retry_after.call_args_list[0][0][0], 4) == 1.18
assert round(mock_network_layer.retry_after.call_args_list[1][0][0], 4) == 2.36
def test_box_session_seeks_file_after_retry(box_session, mock_network_layer, server_error_response, generic_successful_response, test_url):
# pylint:disable=redefined-outer-name
mock_network_layer.request.side_effect = [server_error_response, generic_successful_response]
mock_network_layer.retry_after.side_effect = lambda delay, request, *args, **kwargs: request(*args, **kwargs)
mock_file_1, mock_file_2 = MagicMock(IOBase), MagicMock(IOBase)
mock_file_1.tell.return_value = 0
mock_file_2.tell.return_value = 3
files = {'file': ('unused', mock_file_1), 'f2': ('unused', mock_file_2)}
box_response = box_session.post(url=test_url, files=files)
assert box_response.status_code == 200
assert box_response.json() == generic_successful_response.json()
assert box_response.ok == generic_successful_response.ok
mock_file_1.tell.assert_called_with()
mock_file_2.tell.assert_called_with()
mock_file_1.seek.assert_called_with(0)
assert mock_file_1.seek.call_count == 2
assert mock_file_1.seek.has_calls(call(0) * 2)
mock_file_2.seek.assert_called_with(3)
assert mock_file_2.seek.call_count == 2
assert mock_file_2.seek.has_calls(call(3) * 2)
def test_box_session_raises_for_non_json_response(box_session, mock_network_layer, non_json_response, test_url):
# pylint:disable=redefined-outer-name
mock_network_layer.request.side_effect = [non_json_response]
with pytest.raises(BoxAPIException):
box_session.get(url=test_url)
def test_box_session_raises_for_failed_response(box_session, mock_network_layer, bad_network_response, test_url):
# pylint:disable=redefined-outer-name
mock_network_layer.request.side_effect = [bad_network_response]
with pytest.raises(BoxAPIException):
box_session.get(url=test_url)
def test_box_session_raises_for_failed_response_with_error_and_error_description(box_session, mock_network_layer, bad_network_response_400, test_url):
mock_network_layer.request.side_effect = [bad_network_response_400]
try:
box_session.get(url=test_url)
pytest.fail('Should throw exception because of bad network response')
except BoxAPIException as exception:
assert exception.code == 'Example Error'
assert exception.message == 'Example Error Description'
def test_box_session_raises_for_failed_non_json_response(box_session, mock_network_layer, failed_non_json_response, test_url):
# pylint:disable=redefined-outer-name
mock_network_layer.request.side_effect = [failed_non_json_response]
with pytest.raises(BoxAPIException):
box_session.get(url=test_url, expect_json_response=False)
def test_box_response_properties_pass_through_to_network_response_properties():
mock_network_response = Mock(DefaultNetworkResponse)
box_result = BoxResponse(mock_network_response)
assert box_result.json() == mock_network_response.json()
assert box_result.content == mock_network_response.content
assert box_result.ok == mock_network_response.ok
assert box_result.status_code == mock_network_response.status_code
assert box_result.network_response == mock_network_response
def test_translator(box_session, translator, default_translator, original_default_translator):
assert isinstance(box_session.translator, Translator)
assert box_session.translator == default_translator
if translator:
assert box_session.translator is translator
# Test that adding new registrations works.
class Foo:
pass
item_type = u'ƒøø'
box_session.translator.register(item_type, Foo)
assert box_session.translator.get(item_type) is Foo
# Test that adding new registrations does not affect global state.
assert default_translator == original_default_translator
assert (set(box_session.translator) - set(default_translator)) == set([item_type])
def test_session_uses_global_config(box_session, mock_network_layer, generic_successful_response, monkeypatch):
mock_network_layer.request.side_effect = generic_successful_response
example_dot_com = 'https://example.com/'
monkeypatch.setattr(API, 'BASE_API_URL', example_dot_com)
assert example_dot_com in box_session.get_url('foo', 'bar')
def test_session_uses_local_config(box_session, mock_network_layer, generic_successful_response, monkeypatch):
mock_network_layer.request.side_effect = generic_successful_response
example_dot_com = 'https://example.com/'
box_session.api_config.BASE_API_URL = example_dot_com
monkeypatch.setattr(API, 'BASE_API_URL', 'https://api.box.com')
assert example_dot_com in box_session.get_url('foo', 'bar')
@pytest.mark.parametrize(
'attempt_number,retry_after_header,expected_result',
[
(0, '', 1.18),
(1, '', 2.36),
(2, '', 4.72),
(3, '', 9.44),
(4, '', 18.88),
]
)
def test_get_retry_after_time(box_session, attempt_number, retry_after_header, expected_result):
with patch('random.uniform', return_value=0.68):
retry_time = box_session.get_retry_after_time(attempt_number, retry_after_header) # pylint: disable=protected-access
retry_time = round(retry_time, 4)
assert retry_time == expected_result
@pytest.mark.parametrize(
'test_proxy_url,test_proxy_auth,expected_proxy_dict',
[
('http://example-proxy.com', {'user': 'test_user', 'password': 'test_password', },
{'http': 'http://test_user:test_password@example-proxy.com', 'https': 'http://test_user:test_password@example-proxy.com'}),
('http://example-proxy.com', None, {'http': 'http://example-proxy.com', 'https': 'http://example-proxy.com'}),
]
)
def test_proxy_attaches_to_request_correctly(
box_session,
monkeypatch,
mock_network_layer,
generic_successful_response,
test_proxy_url, test_proxy_auth,
expected_proxy_dict):
monkeypatch.setattr(Proxy, 'URL', test_proxy_url)
monkeypatch.setattr(Proxy, 'AUTH', test_proxy_auth)
mock_network_layer.request.side_effect = [generic_successful_response]
box_session.request('GET', test_proxy_url)
mock_network_layer.request.assert_called_once_with(
'GET',
test_proxy_url,
access_token='fake_access_token',
headers=ANY,
proxies=expected_proxy_dict,
)
def test_proxy_malformed_dict_does_not_attach(box_session, monkeypatch, mock_network_layer, generic_successful_response):
test_proxy_url = 'http://example.com'
test_proxy_auth = {
'foo': 'bar',
}
monkeypatch.setattr(Proxy, 'URL', test_proxy_url)
monkeypatch.setattr(Proxy, 'AUTH', test_proxy_auth)
mock_network_layer.request.side_effect = [generic_successful_response]
with pytest.raises(BoxException) as exc_info:
box_session.request('GET', test_proxy_url)
assert isinstance(exc_info.value, BoxException)
assert exc_info.value.args[0] == "The proxy auth dict you provided does not match pattern " \
"{'user': 'example_user', 'password': 'example_password'}"
def test_proxy_network_config_property(box_session):
assert isinstance(box_session.proxy_config, Proxy)
| 38.287749
| 150
| 0.754
|
2f818680599014adfab1550717056ab2186b5d3a
| 15,601
|
py
|
Python
|
api_tests/wikis/views/test_wiki_detail.py
|
chennan47/osf.io
|
270608592b39a94941a3e329c0dc16d295a82472
|
[
"Apache-2.0"
] | null | null | null |
api_tests/wikis/views/test_wiki_detail.py
|
chennan47/osf.io
|
270608592b39a94941a3e329c0dc16d295a82472
|
[
"Apache-2.0"
] | 18
|
2020-03-24T16:16:14.000Z
|
2022-03-03T22:37:48.000Z
|
api_tests/wikis/views/test_wiki_detail.py
|
kounoAkihiro/SV-RDM-OSF
|
76fb0c739f4cdabf03b5bfd2bc63d83b1c2d4796
|
[
"Apache-2.0"
] | 1
|
2021-10-04T21:16:56.000Z
|
2021-10-04T21:16:56.000Z
|
import mock
import pytest
import furl
from urlparse import urlparse
from nose.tools import * # flake8: noqa
from api.base.settings.defaults import API_BASE
from osf.models import Guid
from addons.wiki.models import NodeWikiPage
from tests.base import ApiWikiTestCase
from osf_tests.factories import (ProjectFactory, RegistrationFactory,
PrivateLinkFactory, CommentFactory)
from addons.wiki.tests.factories import NodeWikiFactory
class TestWikiDetailView(ApiWikiTestCase):
def _set_up_public_project_with_wiki_page(self, project_options=None):
project_options = project_options or {}
self.public_project = ProjectFactory(
is_public=True, creator=self.user, **project_options)
self.public_wiki = self._add_project_wiki_page(
self.public_project, self.user)
self.public_url = '/{}wikis/{}/'.format(API_BASE, self.public_wiki._id)
def _set_up_private_project_with_wiki_page(self):
self.private_project = ProjectFactory(creator=self.user)
self.private_wiki = self._add_project_wiki_page(
self.private_project, self.user)
self.private_url = '/{}wikis/{}/'.format(
API_BASE, self.private_wiki._id)
def _set_up_public_registration_with_wiki_page(self):
self._set_up_public_project_with_wiki_page()
self.public_registration = RegistrationFactory(
project=self.public_project, user=self.user, is_public=True)
self.public_registration_wiki_id = self.public_registration.wiki_pages_versions[
'home'][0]
self.public_registration.wiki_pages_current = {
'home': self.public_registration_wiki_id}
self.public_registration.save()
self.public_registration_url = '/{}wikis/{}/'.format(
API_BASE, self.public_registration_wiki_id)
def _set_up_private_registration_with_wiki_page(self):
self._set_up_private_project_with_wiki_page()
self.private_registration = RegistrationFactory(
project=self.private_project, user=self.user)
self.private_registration_wiki_id = self.private_registration.wiki_pages_versions[
'home'][0]
self.private_registration.wiki_pages_current = {
'home': self.private_registration_wiki_id}
self.private_registration.save()
self.private_registration_url = '/{}wikis/{}/'.format(
API_BASE, self.private_registration_wiki_id)
def test_public_node_logged_out_user_can_view_wiki(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.public_wiki._id)
def test_public_node_logged_in_non_contributor_can_view_wiki(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url, auth=self.non_contributor.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.public_wiki._id)
def test_public_node_logged_in_contributor_can_view_wiki(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.public_wiki._id)
def test_private_node_logged_out_user_cannot_view_wiki(self):
self._set_up_private_project_with_wiki_page()
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_equal(res.json['errors'][0]['detail'],
'Authentication credentials were not provided.')
def test_private_node_logged_in_non_contributor_cannot_view_wiki(self):
self._set_up_private_project_with_wiki_page()
res = self.app.get(
self.private_url,
auth=self.non_contributor.auth,
expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(
res.json['errors'][0]['detail'],
'You do not have permission to perform this action.')
def test_private_node_logged_in_contributor_can_view_wiki(self):
self._set_up_private_project_with_wiki_page()
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.private_wiki._id)
def test_private_node_user_with_anonymous_link_can_view_wiki(self):
self._set_up_private_project_with_wiki_page()
private_link = PrivateLinkFactory(anonymous=True)
private_link.nodes.add(self.private_project)
private_link.save()
url = furl.furl(
self.private_url).add(
query_params={
'view_only': private_link.key}).url
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.private_wiki._id)
def test_private_node_user_with_view_only_link_can_view_wiki(self):
self._set_up_private_project_with_wiki_page()
private_link = PrivateLinkFactory(anonymous=False)
private_link.nodes.add(self.private_project)
private_link.save()
url = furl.furl(
self.private_url).add(
query_params={
'view_only': private_link.key}).url
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.private_wiki._id)
def test_public_registration_logged_out_user_cannot_view_wiki(self):
self._set_up_public_registration_with_wiki_page()
res = self.app.get(self.public_registration_url, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.public_registration_wiki_id)
def test_public_registration_logged_in_non_contributor_cannot_view_wiki(
self):
self._set_up_public_registration_with_wiki_page()
res = self.app.get(
self.public_registration_url,
auth=self.non_contributor.auth,
expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.public_registration_wiki_id)
def test_public_registration_contributor_can_view_wiki(self):
self._set_up_public_registration_with_wiki_page()
res = self.app.get(self.public_registration_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.public_registration_wiki_id)
def test_user_cannot_view_withdrawn_registration_wikis(self):
self._set_up_public_registration_with_wiki_page()
# TODO: Remove mocking when StoredFileNode is implemented
with mock.patch('osf.models.AbstractNode.update_search'):
withdrawal = self.public_registration.retract_registration(
user=self.user, save=True)
token = withdrawal.approval_state.values()[0]['approval_token']
withdrawal.approve_retraction(self.user, token)
withdrawal.save()
res = self.app.get(
self.public_registration_url,
auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(
res.json['errors'][0]['detail'],
'You do not have permission to perform this action.')
def test_private_registration_logged_out_user_cannot_view_wiki(self):
self._set_up_private_registration_with_wiki_page()
res = self.app.get(self.private_registration_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_equal(res.json['errors'][0]['detail'],
'Authentication credentials were not provided.')
def test_private_registration_logged_in_non_contributor_cannot_view_wiki(
self):
self._set_up_private_registration_with_wiki_page()
res = self.app.get(
self.private_registration_url,
auth=self.non_contributor.auth,
expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(
res.json['errors'][0]['detail'],
'You do not have permission to perform this action.')
def test_private_registration_contributor_can_view_wiki(self):
self._set_up_private_registration_with_wiki_page()
res = self.app.get(self.private_registration_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.private_registration_wiki_id)
def test_wiki_has_user_link(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url)
url = res.json['data']['relationships']['user']['links']['related']['href']
expected_url = '/{}users/{}/'.format(API_BASE, self.user._id)
assert_equal(res.status_code, 200)
assert_equal(urlparse(url).path, expected_url)
def test_wiki_has_node_link(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url)
url = res.json['data']['relationships']['node']['links']['related']['href']
expected_url = '/{}nodes/{}/'.format(API_BASE, self.public_project._id)
assert_equal(res.status_code, 200)
assert_equal(urlparse(url).path, expected_url)
def test_wiki_has_comments_link(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
url = res.json['data']['relationships']['comments']['links']['related']['href']
comment = CommentFactory(
node=self.public_project,
target=Guid.load(
self.public_wiki._id),
user=self.user)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(res.json['data'][0]['type'], 'comments')
def test_only_project_contrib_can_comment_on_closed_project(self):
self._set_up_public_project_with_wiki_page(
project_options={'comment_level': 'private'})
res = self.app.get(self.public_url, auth=self.user.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, True)
res = self.app.get(self.public_url, auth=self.non_contributor.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, False)
def test_any_loggedin_user_can_comment_on_open_project(self):
self._set_up_public_project_with_wiki_page(
project_options={'comment_level': 'public'})
res = self.app.get(self.public_url, auth=self.non_contributor.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, True)
def test_non_logged_in_user_cant_comment(self):
self._set_up_public_project_with_wiki_page(
project_options={'comment_level': 'public'})
res = self.app.get(self.public_url)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, False)
def test_wiki_has_download_link(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url)
url = res.json['data']['links']['download']
expected_url = '/{}wikis/{}/content/'.format(
API_BASE, self.public_wiki._id)
assert_equal(res.status_code, 200)
assert_in(expected_url, url)
def test_wiki_invalid_id_not_found(self):
url = '/{}wikis/{}/'.format(API_BASE, 'abcde')
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 404)
def test_old_wiki_versions_not_returned(self):
self._set_up_public_project_with_wiki_page()
# TODO: Remove mocking when StoredFileNode is implemented
with mock.patch('osf.models.AbstractNode.update_search'):
current_wiki = NodeWikiFactory(
node=self.public_project, user=self.user)
old_version_id = self.public_project.wiki_pages_versions[current_wiki.page_name][-2]
old_version = NodeWikiPage.load(old_version_id)
url = '/{}wikis/{}/'.format(API_BASE, old_version._id)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 404)
def test_public_node_wiki_relationship_links(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url)
expected_nodes_relationship_url = '{}nodes/{}/'.format(
API_BASE, self.public_project._id)
expected_comments_relationship_url = '{}nodes/{}/comments/'.format(
API_BASE, self.public_project._id)
assert_in(
expected_nodes_relationship_url,
res.json['data']['relationships']['node']['links']['related']['href'])
assert_in(
expected_comments_relationship_url,
res.json['data']['relationships']['comments']['links']['related']['href'])
def test_private_node_wiki_relationship_links(self):
self._set_up_private_project_with_wiki_page()
res = self.app.get(self.private_url, auth=self.user.auth)
expected_nodes_relationship_url = '{}nodes/{}/'.format(
API_BASE, self.private_project._id)
expected_comments_relationship_url = '{}nodes/{}/comments/'.format(
API_BASE, self.private_project._id)
assert_in(
expected_nodes_relationship_url,
res.json['data']['relationships']['node']['links']['related']['href'])
assert_in(
expected_comments_relationship_url,
res.json['data']['relationships']['comments']['links']['related']['href'])
def test_public_registration_wiki_relationship_links(self):
self._set_up_public_registration_with_wiki_page()
res = self.app.get(self.public_registration_url)
expected_nodes_relationship_url = '{}registrations/{}/'.format(
API_BASE, self.public_registration._id)
expected_comments_relationship_url = '{}registrations/{}/comments/'.format(
API_BASE, self.public_registration._id)
assert_in(
expected_nodes_relationship_url,
res.json['data']['relationships']['node']['links']['related']['href'])
assert_in(
expected_comments_relationship_url,
res.json['data']['relationships']['comments']['links']['related']['href'])
def test_private_registration_wiki_relationship_links(self):
self._set_up_private_registration_with_wiki_page()
res = self.app.get(self.private_registration_url, auth=self.user.auth)
expected_nodes_relationship_url = '{}registrations/{}/'.format(
API_BASE, self.private_registration._id)
expected_comments_relationship_url = '{}registrations/{}/comments/'.format(
API_BASE, self.private_registration._id)
assert_in(
expected_nodes_relationship_url,
res.json['data']['relationships']['node']['links']['related']['href'])
assert_in(
expected_comments_relationship_url,
res.json['data']['relationships']['comments']['links']['related']['href'])
| 46.84985
| 92
| 0.684123
|
5161faa99fd57d8572d2fc066b9b77b40a95cee5
| 330
|
py
|
Python
|
Section1/taximeter.py
|
koltpython/python-exercises-fall2019
|
716493c5c3c93c3c68723f099376d017ff27ac4d
|
[
"MIT"
] | 2
|
2019-11-07T10:59:51.000Z
|
2019-11-07T12:24:54.000Z
|
Section1/taximeter.py
|
koltpython/python-exercises
|
716493c5c3c93c3c68723f099376d017ff27ac4d
|
[
"MIT"
] | null | null | null |
Section1/taximeter.py
|
koltpython/python-exercises
|
716493c5c3c93c3c68723f099376d017ff27ac4d
|
[
"MIT"
] | 1
|
2020-03-09T10:49:04.000Z
|
2020-03-09T10:49:04.000Z
|
name = input('Welcome, what is your name: ')
loc = input(f'Hi, {name}! Where are you going from?')
dest = input('And, where are we going?')
dist = float(input('What is the total distance in km?'))
total = 4 + 1.15*dist
print(f'Okay, {name}. You are travelling from {loc} to {dest} which is {dist} kms and your total is {total}')
| 41.25
| 109
| 0.669697
|
bb99314c6c2294ad43d659308b7534b853294545
| 1,512
|
py
|
Python
|
PSME/lui/OS/rootfs/opt/intel/rackscale/include/psme_xml_structure/lui_model_managers/cpu_id_manager.py
|
opencomputeproject/HWMgmt-DeviceMgr-PSME
|
2a00188aab6f4bef3776987f0842ef8a8ea972ac
|
[
"Apache-2.0"
] | 5
|
2021-10-07T15:36:37.000Z
|
2022-03-01T07:21:49.000Z
|
PSME/lui/OS/rootfs/opt/intel/rackscale/include/psme_xml_structure/lui_model_managers/cpu_id_manager.py
|
opencomputeproject/DM-Redfish-PSME
|
912f7b6abf5b5c2aae33c75497de4753281c6a51
|
[
"Apache-2.0"
] | null | null | null |
PSME/lui/OS/rootfs/opt/intel/rackscale/include/psme_xml_structure/lui_model_managers/cpu_id_manager.py
|
opencomputeproject/DM-Redfish-PSME
|
912f7b6abf5b5c2aae33c75497de4753281c6a51
|
[
"Apache-2.0"
] | 1
|
2021-03-24T19:37:58.000Z
|
2021-03-24T19:37:58.000Z
|
"""
* @section LICENSE
*
* @copyright
* Copyright (c) 2015-2017 Intel Corporation
*
* @copyright
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* @copyright
* http://www.apache.org/licenses/LICENSE-2.0
*
* @copyright
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
"""
from include.psme_xml_structure.managers.cpu_id_manager import CpuIdManager as CpuIdManager_abstract
from include.psme_xml_structure.managers.managers_list import ManagersTypes
from include.libs.cpuinfo import cpuinfo
from include.common.globals import *
class CpuIdManager(CpuIdManager_abstract):
@classmethod
def set_fields(cls, cpu_id, data, context=None):
if context == ManagersTypes.PROCESSOR_MANAGER:
cpu_info = cpuinfo.get_cpu_info()
cpu_id.vendorId = cpu_info[CPUID_VENDOR_ID]
cpu_id.numericId = 0
cpu_id.family = cpu_info[CPUID_FAMILY]
cpu_id.model = cpu_info[CPUID_MODEL]
cpu_id.step = cpu_info[CPUID_STEPPING]
cpu_id.microcodeInfo = cpu_info[CPUID_MICROCODE]
return cpu_id
| 34.363636
| 100
| 0.725529
|
dafe6a918f761a32a82d45486b0400d9aa653315
| 6,592
|
py
|
Python
|
reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/cameras/handlers/follow_track_offset_camera.py
|
timmattison/amazon-sagemaker-examples
|
4277a89d4279827bcdcacb30076ec18bf355b00d
|
[
"Apache-2.0"
] | 4
|
2020-03-26T14:26:13.000Z
|
2021-06-29T11:14:58.000Z
|
reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/cameras/handlers/follow_track_offset_camera.py
|
timmattison/amazon-sagemaker-examples
|
4277a89d4279827bcdcacb30076ec18bf355b00d
|
[
"Apache-2.0"
] | 5
|
2020-09-26T00:44:52.000Z
|
2022-02-10T01:06:32.000Z
|
reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/cameras/handlers/follow_track_offset_camera.py
|
timmattison/amazon-sagemaker-examples
|
4277a89d4279827bcdcacb30076ec18bf355b00d
|
[
"Apache-2.0"
] | 8
|
2020-12-14T15:49:24.000Z
|
2022-03-23T18:38:36.000Z
|
import numpy as np
import rospy
import math
from gazebo_msgs.srv import SetModelState, GetModelState
from gazebo_msgs.msg import ModelState, ModelStates
from markov.deepracer_exceptions import GenericRolloutException
from markov.rospy_wrappers import ServiceProxyWrapper
from markov.track_geom.track_data import TrackData
from markov.cameras import utils
from markov.track_geom.utils import euler_to_quaternion, apply_orientation
from markov.cameras.abs_camera import BaseCamera
from shapely.geometry import Point
class FollowTrackOffsetCamera(BaseCamera):
"""this class is for follow-track-with-offset third-person camera singelton"""
_instance_ = None
name = "follow_track_offset_camera"
@staticmethod
def get_instance():
"""Method for geting a reference to the camera object"""
if FollowTrackOffsetCamera._instance_ is None:
FollowTrackOffsetCamera()
return FollowTrackOffsetCamera._instance_
def __init__(self):
if FollowTrackOffsetCamera._instance_ is not None:
raise GenericRolloutException("Attempting to construct multiple follow track camera")
super(FollowTrackOffsetCamera, self).__init__(FollowTrackOffsetCamera.name)
rospy.wait_for_service('/gazebo/set_model_state')
self.model_state_client = ServiceProxyWrapper('/gazebo/set_model_state', SetModelState)
self.track_data = TrackData.get_instance()
# Camera Configuration constants
self.look_down_angle_rad = math.pi / 6.0 # 30 degree
self.cam_dist_offset = 1.2
self.cam_fixed_height = 1.0
self.damping = 1.0
# Camera states
self.last_yaw = 0.0
self.last_camera_state = None
# there should be only one video camera instance
FollowTrackOffsetCamera._instance_ = self
def _reset(self, car_model_state):
camera_model_state = ModelState()
camera_model_state.model_name = self.topic_name
# Calculate target Camera position based on nearest center track from the car.
# 1. Project the car position to 1-d global distance of track
# 2. Minus camera offset from the point of center track
near_dist = self.track_data._center_line_.project(
Point(car_model_state.pose.position.x, car_model_state.pose.position.y))
near_pnt_ctr = self.track_data._center_line_.interpolate(near_dist)
yaw = self.track_data._center_line_.interpolate_yaw(distance=near_dist, normalized=False, reverse_dir=False,
position=near_pnt_ctr)
quaternion = np.array(euler_to_quaternion(pitch=self.look_down_angle_rad, yaw=yaw))
target_camera_location = np.array([near_pnt_ctr.x,
near_pnt_ctr.y,
0.0]) + \
apply_orientation(quaternion, np.array([-self.cam_dist_offset, 0, 0]))
# Calculate camera rotation quaternion based on lookAt yaw with respect to
# current camera position and car position
look_at_yaw = utils.get_angle_between_two_points_2d_rad(Point(target_camera_location[0],
target_camera_location[1]),
car_model_state.pose.position)
cam_quaternion = euler_to_quaternion(pitch=self.look_down_angle_rad, yaw=look_at_yaw)
camera_model_state.pose.position.x = target_camera_location[0]
camera_model_state.pose.position.y = target_camera_location[1]
camera_model_state.pose.position.z = self.cam_fixed_height
camera_model_state.pose.orientation.x = cam_quaternion[0]
camera_model_state.pose.orientation.y = cam_quaternion[1]
camera_model_state.pose.orientation.z = cam_quaternion[2]
camera_model_state.pose.orientation.w = cam_quaternion[3]
self.model_state_client(camera_model_state)
self.last_camera_state = camera_model_state
self.last_yaw = yaw
def _update(self, car_model_state, delta_time):
# Calculate target Camera position based on nearest center track from the car.
near_dist = self.track_data._center_line_.project(
Point(car_model_state.pose.position.x, car_model_state.pose.position.y))
near_pnt_ctr = self.track_data._center_line_.interpolate(near_dist)
yaw = self.track_data._center_line_.interpolate_yaw(distance=near_dist, normalized=False, reverse_dir=False,
position=near_pnt_ctr)
yaw = utils.lerp_angle_rad(self.last_yaw, yaw, delta_time * self.damping)
quaternion = np.array(euler_to_quaternion(pitch=self.look_down_angle_rad, yaw=yaw))
target_camera_location = np.array([near_pnt_ctr.x,
near_pnt_ctr.y,
0.0]) + \
apply_orientation(quaternion, np.array([-self.cam_dist_offset, 0, 0]))
target_camera_location_2d = target_camera_location[0:2]
# Linear interpolate Camera position to target position
cur_camera_2d_pos = np.array([self.last_camera_state.pose.position.x,
self.last_camera_state.pose.position.y])
new_cam_pos_2d = utils.lerp(cur_camera_2d_pos, target_camera_location_2d, delta_time * self.damping)
# Calculate camera rotation quaternion based on lookAt yaw
look_at_yaw = utils.get_angle_between_two_points_2d_rad(self.last_camera_state.pose.position,
car_model_state.pose.position)
cam_quaternion = euler_to_quaternion(pitch=self.look_down_angle_rad, yaw=look_at_yaw)
# Configure Camera Model State
camera_model_state = ModelState()
camera_model_state.model_name = self.topic_name
camera_model_state.pose.position.x = new_cam_pos_2d[0]
camera_model_state.pose.position.y = new_cam_pos_2d[1]
camera_model_state.pose.position.z = self.cam_fixed_height
camera_model_state.pose.orientation.x = cam_quaternion[0]
camera_model_state.pose.orientation.y = cam_quaternion[1]
camera_model_state.pose.orientation.z = cam_quaternion[2]
camera_model_state.pose.orientation.w = cam_quaternion[3]
self.model_state_client(camera_model_state)
self.last_camera_state = camera_model_state
self.last_yaw = yaw
| 52.31746
| 116
| 0.68037
|
08168373ee3fb5fa618c1293796a6a9042bd9b66
| 16,472
|
py
|
Python
|
statsmodels/tsa/vector_ar/tests/test_var.py
|
kevindavenport/statsmodels
|
c8e980da57cb9d5be64872ad5e13893e6ae7cd0d
|
[
"BSD-3-Clause"
] | 1
|
2016-09-02T20:31:32.000Z
|
2016-09-02T20:31:32.000Z
|
statsmodels/tsa/vector_ar/tests/test_var.py
|
kevindavenport/statsmodels
|
c8e980da57cb9d5be64872ad5e13893e6ae7cd0d
|
[
"BSD-3-Clause"
] | null | null | null |
statsmodels/tsa/vector_ar/tests/test_var.py
|
kevindavenport/statsmodels
|
c8e980da57cb9d5be64872ad5e13893e6ae7cd0d
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Test VAR Model
"""
from __future__ import print_function
# pylint: disable=W0612,W0231
from statsmodels.compat.python import iteritems, StringIO, lrange, BytesIO, range
from nose.tools import assert_raises
import nose
import os
import sys
import numpy as np
import statsmodels.api as sm
import statsmodels.tsa.vector_ar.var_model as model
import statsmodels.tsa.vector_ar.util as util
import statsmodels.tools.data as data_util
from statsmodels.tsa.vector_ar.var_model import VAR
from numpy.testing import assert_almost_equal, assert_equal, assert_
DECIMAL_12 = 12
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
class CheckVAR(object):
# just so pylint won't complain
res1 = None
res2 = None
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_3)
def test_neqs(self):
assert_equal(self.res1.neqs, self.res2.neqs)
def test_nobs(self):
assert_equal(self.res1.avobs, self.res2.nobs)
def test_df_eq(self):
assert_equal(self.res1.df_eq, self.res2.df_eq)
def test_rmse(self):
results = self.res1.results
for i in range(len(results)):
assert_almost_equal(results[i].mse_resid**.5,
eval('self.res2.rmse_'+str(i+1)), DECIMAL_6)
def test_rsquared(self):
results = self.res1.results
for i in range(len(results)):
assert_almost_equal(results[i].rsquared,
eval('self.res2.rsquared_'+str(i+1)), DECIMAL_3)
def test_llf(self):
results = self.res1.results
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_2)
for i in range(len(results)):
assert_almost_equal(results[i].llf,
eval('self.res2.llf_'+str(i+1)), DECIMAL_2)
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic)
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic)
def test_hqic(self):
assert_almost_equal(self.res1.hqic, self.res2.hqic)
def test_fpe(self):
assert_almost_equal(self.res1.fpe, self.res2.fpe)
def test_detsig(self):
assert_almost_equal(self.res1.detomega, self.res2.detsig)
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def get_macrodata():
data = sm.datasets.macrodata.load().data[['realgdp','realcons','realinv']]
names = data.dtype.names
nd = data.view((float,3))
nd = np.diff(np.log(nd), axis=0)
return nd.ravel().view(data.dtype)
def generate_var():
from rpy2.robjects import r
import pandas.rpy.common as prp
r.source('tests/var.R')
return prp.convert_robj(r['result'], use_pandas=False)
def write_generate_var():
result = generate_var()
np.savez('tests/results/vars_results.npz', **result)
class RResults(object):
"""
Simple interface with results generated by "vars" package in R.
"""
def __init__(self):
#data = np.load(resultspath + 'vars_results.npz')
from .results.results_var_data import var_results
data = var_results.__dict__
self.names = data['coefs'].dtype.names
self.params = data['coefs'].view((float, len(self.names)))
self.stderr = data['stderr'].view((float, len(self.names)))
self.irf = data['irf'].item()
self.orth_irf = data['orthirf'].item()
self.nirfs = int(data['nirfs'][0])
self.nobs = int(data['obs'][0])
self.totobs = int(data['totobs'][0])
crit = data['crit'].item()
self.aic = crit['aic'][0]
self.sic = self.bic = crit['sic'][0]
self.hqic = crit['hqic'][0]
self.fpe = crit['fpe'][0]
self.detomega = data['detomega'][0]
self.loglike = data['loglike'][0]
self.nahead = int(data['nahead'][0])
self.ma_rep = data['phis']
self.causality = data['causality']
def close_plots():
try:
import matplotlib.pyplot as plt
plt.close('all')
except ImportError:
pass
_orig_stdout = None
def setup_module():
global _orig_stdout
_orig_stdout = sys.stdout
sys.stdout = StringIO()
def teardown_module():
sys.stdout = _orig_stdout
close_plots()
def have_matplotlib():
try:
import matplotlib
return True
except ImportError:
return False
class CheckIRF(object):
ref = None; res = None; irf = None
k = None
#---------------------------------------------------------------------------
# IRF tests
def test_irf_coefs(self):
self._check_irfs(self.irf.irfs, self.ref.irf)
self._check_irfs(self.irf.orth_irfs, self.ref.orth_irf)
def _check_irfs(self, py_irfs, r_irfs):
for i, name in enumerate(self.res.names):
ref_irfs = r_irfs[name].view((float, self.k))
res_irfs = py_irfs[:, :, i]
assert_almost_equal(ref_irfs, res_irfs)
def test_plot_irf(self):
if not have_matplotlib():
raise nose.SkipTest
self.irf.plot()
self.irf.plot(plot_stderr=False)
self.irf.plot(impulse=0, response=1)
self.irf.plot(impulse=0)
self.irf.plot(response=0)
self.irf.plot(orth=True)
self.irf.plot(impulse=0, response=1, orth=True)
close_plots()
def test_plot_cum_effects(self):
if not have_matplotlib():
raise nose.SkipTest
self.irf.plot_cum_effects()
self.irf.plot_cum_effects(plot_stderr=False)
self.irf.plot_cum_effects(impulse=0, response=1)
self.irf.plot_cum_effects(orth=True)
self.irf.plot_cum_effects(impulse=0, response=1, orth=True)
close_plots()
class CheckFEVD(object):
fevd = None
#---------------------------------------------------------------------------
# FEVD tests
def test_fevd_plot(self):
if not have_matplotlib():
raise nose.SkipTest
self.fevd.plot()
close_plots()
def test_fevd_repr(self):
self.fevd
def test_fevd_summary(self):
self.fevd.summary()
def test_fevd_cov(self):
# test does not crash
# not implemented
# covs = self.fevd.cov()
pass
class TestVARResults(CheckIRF, CheckFEVD):
@classmethod
def setupClass(cls):
cls.p = 2
cls.data = get_macrodata()
cls.model = VAR(cls.data)
cls.names = cls.model.endog_names
cls.ref = RResults()
cls.k = len(cls.ref.names)
cls.res = cls.model.fit(maxlags=cls.p)
cls.irf = cls.res.irf(cls.ref.nirfs)
cls.nahead = cls.ref.nahead
cls.fevd = cls.res.fevd()
def test_constructor(self):
# make sure this works with no names
ndarr = self.data.view((float, 3))
model = VAR(ndarr)
res = model.fit(self.p)
def test_names(self):
assert_equal(self.model.endog_names, self.ref.names)
model2 = VAR(self.data)
assert_equal(model2.endog_names, self.ref.names)
def test_get_eq_index(self):
assert(type(self.res.names) is list)
for i, name in enumerate(self.names):
idx = self.res.get_eq_index(i)
idx2 = self.res.get_eq_index(name)
assert_equal(idx, i)
assert_equal(idx, idx2)
assert_raises(Exception, self.res.get_eq_index, 'foo')
def test_repr(self):
# just want this to work
foo = str(self.res)
bar = repr(self.res)
def test_params(self):
assert_almost_equal(self.res.params, self.ref.params, DECIMAL_3)
def test_cov_params(self):
# do nothing for now
self.res.cov_params
def test_cov_ybar(self):
self.res.cov_ybar()
def test_tstat(self):
self.res.tvalues
def test_pvalues(self):
self.res.pvalues
def test_summary(self):
summ = self.res.summary()
def test_detsig(self):
assert_almost_equal(self.res.detomega, self.ref.detomega)
def test_aic(self):
assert_almost_equal(self.res.aic, self.ref.aic)
def test_bic(self):
assert_almost_equal(self.res.bic, self.ref.bic)
def test_hqic(self):
assert_almost_equal(self.res.hqic, self.ref.hqic)
def test_fpe(self):
assert_almost_equal(self.res.fpe, self.ref.fpe)
def test_lagorder_select(self):
ics = ['aic', 'fpe', 'hqic', 'bic']
for ic in ics:
res = self.model.fit(maxlags=10, ic=ic, verbose=True)
assert_raises(Exception, self.model.fit, ic='foo')
def test_nobs(self):
assert_equal(self.res.nobs, self.ref.nobs)
def test_stderr(self):
assert_almost_equal(self.res.stderr, self.ref.stderr, DECIMAL_4)
def test_loglike(self):
assert_almost_equal(self.res.llf, self.ref.loglike)
def test_ma_rep(self):
ma_rep = self.res.ma_rep(self.nahead)
assert_almost_equal(ma_rep, self.ref.ma_rep)
#--------------------------------------------------
# Lots of tests to make sure stuff works...need to check correctness
def test_causality(self):
causedby = self.ref.causality['causedby']
for i, name in enumerate(self.names):
variables = self.names[:i] + self.names[i + 1:]
result = self.res.test_causality(name, variables, kind='f')
assert_almost_equal(result['pvalue'], causedby[i], DECIMAL_4)
rng = lrange(self.k)
rng.remove(i)
result2 = self.res.test_causality(i, rng, kind='f')
assert_almost_equal(result['pvalue'], result2['pvalue'], DECIMAL_12)
# make sure works
result = self.res.test_causality(name, variables, kind='wald')
# corner cases
_ = self.res.test_causality(self.names[0], self.names[1])
_ = self.res.test_causality(0, 1)
assert_raises(Exception,self.res.test_causality, 0, 1, kind='foo')
def test_select_order(self):
result = self.model.fit(10, ic='aic', verbose=True)
result = self.model.fit(10, ic='fpe', verbose=True)
# bug
model = VAR(self.model.endog)
model.select_order()
def test_is_stable(self):
# may not necessarily be true for other datasets
assert(self.res.is_stable(verbose=True))
def test_acf(self):
# test that it works...for now
acfs = self.res.acf(10)
# defaults to nlags=lag_order
acfs = self.res.acf()
assert(len(acfs) == self.p + 1)
def test_acorr(self):
acorrs = self.res.acorr(10)
def test_forecast(self):
point = self.res.forecast(self.res.y[-5:], 5)
def test_forecast_interval(self):
y = self.res.y[:-self.p:]
point, lower, upper = self.res.forecast_interval(y, 5)
def test_plot_sim(self):
if not have_matplotlib():
raise nose.SkipTest
self.res.plotsim(steps=100)
close_plots()
def test_plot(self):
if not have_matplotlib():
raise nose.SkipTest
self.res.plot()
close_plots()
def test_plot_acorr(self):
if not have_matplotlib():
raise nose.SkipTest
self.res.plot_acorr()
close_plots()
def test_plot_forecast(self):
if not have_matplotlib():
raise nose.SkipTest
self.res.plot_forecast(5)
close_plots()
def test_reorder(self):
#manually reorder
data = self.data.view((float,3))
names = self.names
data2 = np.append(np.append(data[:,2,None], data[:,0,None], axis=1), data[:,1,None], axis=1)
names2 = []
names2.append(names[2])
names2.append(names[0])
names2.append(names[1])
res2 = VAR(data2).fit(maxlags=self.p)
#use reorder function
res3 = self.res.reorder(['realinv','realgdp', 'realcons'])
#check if the main results match
assert_almost_equal(res2.params, res3.params)
assert_almost_equal(res2.sigma_u, res3.sigma_u)
assert_almost_equal(res2.bic, res3.bic)
assert_almost_equal(res2.stderr, res3.stderr)
def test_pickle(self):
fh = BytesIO()
#test wrapped results load save pickle
self.res.save(fh)
fh.seek(0,0)
res_unpickled = self.res.__class__.load(fh)
assert_(type(res_unpickled) is type(self.res))
class E1_Results(object):
"""
Results from Lutkepohl (2005) using E2 dataset
"""
def __init__(self):
# Lutkepohl p. 120 results
# I asked the author about these results and there is probably rounding
# error in the book, so I adjusted these test results to match what is
# coming out of the Python (double-checked) calculations
self.irf_stderr = np.array([[[.125, 0.546, 0.664 ],
[0.032, 0.139, 0.169],
[0.026, 0.112, 0.136]],
[[0.129, 0.547, 0.663],
[0.032, 0.134, 0.163],
[0.026, 0.108, 0.131]],
[[0.084, .385, .479],
[.016, .079, .095],
[.016, .078, .103]]])
self.cum_irf_stderr = np.array([[[.125, 0.546, 0.664 ],
[0.032, 0.139, 0.169],
[0.026, 0.112, 0.136]],
[[0.149, 0.631, 0.764],
[0.044, 0.185, 0.224],
[0.033, 0.140, 0.169]],
[[0.099, .468, .555],
[.038, .170, .205],
[.033, .150, .185]]])
self.lr_stderr = np.array([[.134, .645, .808],
[.048, .230, .288],
[.043, .208, .260]])
basepath = os.path.split(sm.__file__)[0]
resultspath = basepath + '/tsa/vector_ar/tests/results/'
def get_lutkepohl_data(name='e2'):
lut_data = basepath + '/tsa/vector_ar/data/'
path = lut_data + '%s.dat' % name
return util.parse_lutkepohl_data(path)
def test_lutkepohl_parse():
files = ['e%d' % i for i in range(1, 7)]
for f in files:
get_lutkepohl_data(f)
class TestVARResultsLutkepohl(object):
"""
Verify calculations using results from Lutkepohl's book
"""
def __init__(self):
self.p = 2
sdata, dates = get_lutkepohl_data('e1')
data = data_util.struct_to_ndarray(sdata)
adj_data = np.diff(np.log(data), axis=0)
# est = VAR(adj_data, p=2, dates=dates[1:], names=names)
self.model = VAR(adj_data[:-16], dates=dates[1:-16], freq='Q')
self.res = self.model.fit(maxlags=self.p)
self.irf = self.res.irf(10)
self.lut = E1_Results()
def test_approx_mse(self):
# 3.5.18, p. 99
mse2 = np.array([[25.12, .580, 1.300],
[.580, 1.581, .586],
[1.300, .586, 1.009]]) * 1e-4
assert_almost_equal(mse2, self.res.forecast_cov(3)[1],
DECIMAL_3)
def test_irf_stderr(self):
irf_stderr = self.irf.stderr(orth=False)
for i in range(1, 1 + len(self.lut.irf_stderr)):
assert_almost_equal(np.round(irf_stderr[i], 3),
self.lut.irf_stderr[i-1])
def test_cum_irf_stderr(self):
stderr = self.irf.cum_effect_stderr(orth=False)
for i in range(1, 1 + len(self.lut.cum_irf_stderr)):
assert_almost_equal(np.round(stderr[i], 3),
self.lut.cum_irf_stderr[i-1])
def test_lr_effect_stderr(self):
stderr = self.irf.lr_effect_stderr(orth=False)
orth_stderr = self.irf.lr_effect_stderr(orth=True)
assert_almost_equal(np.round(stderr, 3), self.lut.lr_stderr)
def test_get_trendorder():
results = {
'c' : 1,
'nc' : 0,
'ct' : 2,
'ctt' : 3
}
for t, trendorder in iteritems(results):
assert(util.get_trendorder(t) == trendorder)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
| 29.309609
| 100
| 0.578497
|
b9e2fb17df5b2409f1a86d712c15bff121cf6979
| 11,039
|
py
|
Python
|
tests/unit/bokeh/embed/test_standalone.py
|
dkapitan/bokeh
|
d518cecd1d9919db49e3c0033e8c1b89db9965bf
|
[
"BSD-3-Clause"
] | 1
|
2020-02-07T16:57:56.000Z
|
2020-02-07T16:57:56.000Z
|
tests/unit/bokeh/embed/test_standalone.py
|
dkapitan/bokeh
|
d518cecd1d9919db49e3c0033e8c1b89db9965bf
|
[
"BSD-3-Clause"
] | 1
|
2021-05-11T04:37:17.000Z
|
2021-05-11T04:37:17.000Z
|
tests/unit/bokeh/embed/test_standalone.py
|
dkapitan/bokeh
|
d518cecd1d9919db49e3c0033e8c1b89db9965bf
|
[
"BSD-3-Clause"
] | 1
|
2020-03-06T07:38:50.000Z
|
2020-03-06T07:38:50.000Z
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from collections import OrderedDict
# External imports
import bs4
from jinja2 import Template
from mock import patch
# Bokeh imports
from bokeh.document import Document
from bokeh.embed.util import RenderRoot, standalone_docs_json
from bokeh.io import curdoc
from bokeh.plotting import figure
from bokeh.resources import CDN, CSSResources, JSResources
# Module under test
import bokeh.embed.standalone as bes # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
def stable_id():
return 'ID'
@pytest.fixture
def test_plot() -> None:
from bokeh.plotting import figure
test_plot = figure()
test_plot.circle([1, 2], [2, 3])
return test_plot
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Test_autoload_static(object):
def test_return_type(self, test_plot) -> None:
r = bes.autoload_static(test_plot, CDN, "some/path")
assert len(r) == 2
def test_script_attrs(self, test_plot) -> None:
js, tag = bes.autoload_static(test_plot, CDN, "some/path")
html = bs4.BeautifulSoup(tag, "lxml")
scripts = html.findAll(name='script')
assert len(scripts) == 1
attrs = scripts[0].attrs
assert set(attrs) == set(['src', 'id'])
assert attrs['src'] == 'some/path'
class Test_components(object):
def test_return_type(self) -> None:
plot1 = figure()
plot1.circle([], [])
plot2 = figure()
plot2.circle([], [])
# This is a testing artefact, users dont' have to do this in practice
curdoc().add_root(plot1)
curdoc().add_root(plot2)
r = bes.components(plot1)
assert len(r) == 2
_, divs = bes.components((plot1, plot2))
assert isinstance(divs, tuple)
_, divs = bes.components([plot1, plot2])
assert isinstance(divs, tuple)
_, divs = bes.components({"Plot 1": plot1, "Plot 2": plot2})
assert isinstance(divs, dict)
assert all(isinstance(x, str) for x in divs.keys())
_, divs = bes.components(OrderedDict([("Plot 1", plot1), ("Plot 2", plot2)]))
assert isinstance(divs, OrderedDict)
assert all(isinstance(x, str) for x in divs.keys())
@patch('bokeh.embed.util.make_globally_unique_id', new_callable=lambda: stable_id)
def test_plot_dict_returned_when_wrap_plot_info_is_false(self, mock_make_id) -> None:
doc = Document()
plot1 = figure()
plot1.circle([], [])
doc.add_root(plot1)
plot2 = figure()
plot2.circle([], [])
doc.add_root(plot2)
expected_plotdict_1 = RenderRoot(elementid="ID", id="ID")
expected_plotdict_2 = RenderRoot(elementid="ID", id="ID")
_, plotdict = bes.components(plot1, wrap_plot_info=False)
assert plotdict == expected_plotdict_1
_, plotids = bes.components((plot1, plot2), wrap_plot_info=False)
assert plotids == (expected_plotdict_1, expected_plotdict_2)
_, plotiddict = bes.components({'p1': plot1, 'p2': plot2}, wrap_plot_info=False)
assert plotiddict == {'p1': expected_plotdict_1, 'p2': expected_plotdict_2}
def test_result_attrs(self, test_plot) -> None:
script, div = bes.components(test_plot)
html = bs4.BeautifulSoup(script, "lxml")
scripts = html.findAll(name='script')
assert len(scripts) == 1
assert scripts[0].attrs == {'type': 'text/javascript'}
@patch('bokeh.embed.util.make_globally_unique_id', new=stable_id)
def test_div_attrs(self, test_plot) -> None:
script, div = bes.components(test_plot)
html = bs4.BeautifulSoup(div, "lxml")
divs = html.findAll(name='div')
assert len(divs) == 1
div = divs[0]
assert set(div.attrs) == set(['class', 'id', 'data-root-id'])
assert div.attrs['class'] == ['bk-root']
assert div.attrs['id'] == 'ID'
assert div.attrs['data-root-id'] == test_plot.id
assert div.text == ''
def test_script_is_utf8_encoded(self, test_plot) -> None:
script, div = bes.components(test_plot)
assert isinstance(script, str)
def test_output_is_without_script_tag_when_wrap_script_is_false(self, test_plot) -> None:
script, div = bes.components(test_plot)
html = bs4.BeautifulSoup(script, "lxml")
scripts = html.findAll(name='script')
assert len(scripts) == 1
# XXX: this needs to account for indentation
#script_content = scripts[0].getText()
#rawscript, div = bes.components(test_plot, wrap_script=False)
#self.maxDiff = None
#assert rawscript.strip() == script_content.strip()
class Test_file_html(object):
def test_return_type(self, test_plot) -> None:
class fake_template:
def __init__(self, tester, user_template_variables=None):
self.tester = tester
self.template_variables = {
"title",
"bokeh_js",
"bokeh_css",
"plot_script",
"doc",
"docs",
"base",
}
if user_template_variables is not None:
self.template_variables.update(user_template_variables)
def render(self, template_variables):
assert self.template_variables.issubset(set(template_variables.keys()))
return "template result"
r = bes.file_html(test_plot, CDN, "title")
assert isinstance(r, str)
r = bes.file_html(test_plot, CDN, "title", fake_template(self))
assert isinstance(r, str)
r = bes.file_html(test_plot, CDN, "title",
fake_template(self, {"test_var"}),
{"test_var": "test"})
assert isinstance(r, str)
@patch('bokeh.embed.bundle.warn')
def test_file_html_handles_js_only_resources(self, mock_warn, test_plot) -> None:
js_resources = JSResources(mode="relative", components=["bokeh"])
template = Template("<head>{{ bokeh_js }}</head><body></body>")
output = bes.file_html(test_plot, (js_resources, None), "title", template=template)
html = "<head>%s</head><body></body>" % js_resources.render_js()
assert output == html
@patch('bokeh.embed.bundle.warn')
def test_file_html_provides_warning_if_no_css(self, mock_warn, test_plot) -> None:
js_resources = JSResources()
bes.file_html(test_plot, (js_resources, None), "title")
mock_warn.assert_called_once_with(
'No Bokeh CSS Resources provided to template. If required you will need to provide them manually.'
)
@patch('bokeh.embed.bundle.warn')
def test_file_html_handles_css_only_resources(self, mock_warn, test_plot) -> None:
css_resources = CSSResources(mode="relative", components=["bokeh"])
template = Template("<head>{{ bokeh_css }}</head><body></body>")
output = bes.file_html(test_plot, (None, css_resources), "title", template=template)
html = "<head>%s</head><body></body>" % css_resources.render_css()
assert output == html
@patch('bokeh.embed.bundle.warn')
def test_file_html_provides_warning_if_no_js(self, mock_warn, test_plot) -> None:
css_resources = CSSResources()
bes.file_html(test_plot, (None, css_resources), "title")
mock_warn.assert_called_once_with(
'No Bokeh JS Resources provided to template. If required you will need to provide them manually.'
)
def test_file_html_title_is_escaped(self, test_plot) -> None:
r = bes.file_html(test_plot, CDN, "&<")
assert "<title>&<</title>" in r
def test_entire_doc_is_not_used(self) -> None:
from bokeh.document import Document
from bokeh.models import Button
fig = figure()
fig.x([0], [0])
button = Button(label="Button")
d = Document()
d.add_root(fig)
d.add_root(button)
out = bes.file_html([fig], CDN)
# this is a very coarse test but it will do
assert "bokeh-widgets" not in out
class Test_json_item(object):
def test_with_target_id(self, test_plot) -> None:
out = bes.json_item(test_plot, target="foo")
assert out['target_id'] == "foo"
def test_without_target_id(self, test_plot) -> None:
out = bes.json_item(test_plot)
assert out['target_id'] == None
def test_doc_json(self, test_plot) -> None:
out = bes.json_item(test_plot, target="foo")
expected = list(standalone_docs_json([test_plot]).values())[0]
assert out['doc'] == expected
def test_doc_title(self, test_plot) -> None:
out = bes.json_item(test_plot, target="foo")
assert out['doc']['title'] == ""
def test_root_id(self, test_plot) -> None:
out = bes.json_item(test_plot, target="foo")
assert out['doc']['roots']['root_ids'][0] == out['root_id']
@patch('bokeh.embed.standalone.OutputDocumentFor')
def test_apply_theme(self, mock_OFD, test_plot) -> None:
# the subsequent call inside ODF will fail since the model was never
# added to a document. Ignoring that since we just want to make sure
# ODF is called with the expected theme arg.
try:
bes.json_item(test_plot, theme="foo")
except ValueError:
pass
mock_OFD.assert_called_once_with([test_plot], apply_theme="foo")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
class Test__title_from_models(object):
pass
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 37.420339
| 110
| 0.55784
|
7cb5ebd33e19bc59b2398b62314c2c672bff6b4f
| 5,048
|
py
|
Python
|
abcnn/graph.py
|
DeepQuantitative/text_matching
|
7e75d76154613e4063193b3eda0d4ce555f685a1
|
[
"Apache-2.0"
] | 621
|
2019-05-07T06:21:06.000Z
|
2022-03-29T03:18:57.000Z
|
abcnn/graph.py
|
DeepQuantitative/text_matching
|
7e75d76154613e4063193b3eda0d4ce555f685a1
|
[
"Apache-2.0"
] | 16
|
2019-06-15T10:15:32.000Z
|
2022-02-16T06:12:57.000Z
|
abcnn/graph.py
|
DeepQuantitative/text_matching
|
7e75d76154613e4063193b3eda0d4ce555f685a1
|
[
"Apache-2.0"
] | 183
|
2019-05-31T17:02:20.000Z
|
2022-03-16T07:15:05.000Z
|
import tensorflow as tf
from abcnn import args
class Graph:
def __init__(self, abcnn1=False, abcnn2=False):
self.p = tf.placeholder(dtype=tf.int32, shape=(None, args.seq_length), name='p')
self.h = tf.placeholder(dtype=tf.int32, shape=(None, args.seq_length), name='h')
self.y = tf.placeholder(dtype=tf.int32, shape=None, name='y')
self.keep_prob = tf.placeholder(dtype=tf.float32, name='drop_rate')
self.embedding = tf.get_variable(dtype=tf.float32, shape=(args.vocab_size, args.char_embedding_size),
name='embedding')
self.W0 = tf.get_variable(name="aW",
shape=(args.seq_length + 4, args.char_embedding_size),
initializer=tf.contrib.layers.xavier_initializer(),
regularizer=tf.contrib.layers.l2_regularizer(scale=0.0004))
self.abcnn1 = abcnn1
self.abcnn2 = abcnn2
self.forward()
def dropout(self, x):
return tf.nn.dropout(x, keep_prob=self.keep_prob)
def cos_sim(self, v1, v2):
norm1 = tf.sqrt(tf.reduce_sum(tf.square(v1), axis=1))
norm2 = tf.sqrt(tf.reduce_sum(tf.square(v2), axis=1))
dot_products = tf.reduce_sum(v1 * v2, axis=1, name="cos_sim")
return dot_products / (norm1 * norm2)
def forward(self):
p_embedding = tf.nn.embedding_lookup(self.embedding, self.p)
h_embedding = tf.nn.embedding_lookup(self.embedding, self.h)
p_embedding = tf.expand_dims(p_embedding, axis=-1)
h_embedding = tf.expand_dims(h_embedding, axis=-1)
p_embedding = tf.pad(p_embedding, paddings=[[0, 0], [2, 2], [0, 0], [0, 0]])
h_embedding = tf.pad(h_embedding, paddings=[[0, 0], [2, 2], [0, 0], [0, 0]])
if self.abcnn1:
euclidean = tf.sqrt(tf.reduce_sum(
tf.square(tf.transpose(p_embedding, perm=[0, 2, 1, 3]) - tf.transpose(h_embedding, perm=[0, 2, 3, 1])),
axis=1) + 1e-6)
attention_matrix = 1 / (euclidean + 1)
p_attention = tf.expand_dims(tf.einsum("ijk,kl->ijl", attention_matrix, self.W0), -1)
h_attention = tf.expand_dims(
tf.einsum("ijk,kl->ijl", tf.transpose(attention_matrix, perm=[0, 2, 1]), self.W0), -1)
p_embedding = tf.concat([p_embedding, p_attention], axis=-1)
h_embedding = tf.concat([h_embedding, h_attention], axis=-1)
p = tf.layers.conv2d(p_embedding,
filters=args.cnn1_filters,
kernel_size=(args.filter_width, args.filter_height))
h = tf.layers.conv2d(h_embedding,
filters=args.cnn1_filters,
kernel_size=(args.filter_width, args.filter_height))
p = self.dropout(p)
h = self.dropout(h)
if self.abcnn2:
attention_pool_euclidean = tf.sqrt(
tf.reduce_sum(tf.square(tf.transpose(p, perm=[0, 3, 1, 2]) - tf.transpose(h, perm=[0, 3, 2, 1])),
axis=1))
attention_pool_matrix = 1 / (attention_pool_euclidean + 1)
p_sum = tf.reduce_sum(attention_pool_matrix, axis=2, keep_dims=True)
h_sum = tf.reduce_sum(attention_pool_matrix, axis=1, keep_dims=True)
p = tf.reshape(p, shape=(-1, p.shape[1], p.shape[2] * p.shape[3]))
h = tf.reshape(h, shape=(-1, h.shape[1], h.shape[2] * h.shape[3]))
p = tf.multiply(p, p_sum)
h = tf.multiply(h, tf.matrix_transpose(h_sum))
else:
p = tf.reshape(p, shape=(-1, p.shape[1], p.shape[2] * p.shape[3]))
h = tf.reshape(h, shape=(-1, h.shape[1], h.shape[2] * h.shape[3]))
p = tf.expand_dims(p, axis=3)
h = tf.expand_dims(h, axis=3)
p = tf.layers.conv2d(p,
filters=args.cnn2_filters,
kernel_size=(args.filter_width, args.cnn1_filters))
h = tf.layers.conv2d(h,
filters=args.cnn2_filters,
kernel_size=(args.filter_width, args.cnn1_filters))
p = self.dropout(p)
h = self.dropout(h)
p_all = tf.reduce_mean(p, axis=1)
h_all = tf.reduce_mean(h, axis=1)
x = tf.concat((p_all, h_all), axis=2)
x = tf.reshape(x, shape=(-1, x.shape[1] * x.shape[2]))
out = tf.layers.dense(x, 50)
logits = tf.layers.dense(out, 2)
self.train(logits)
def train(self, logits):
y = tf.one_hot(self.y, args.class_size)
loss = tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits)
self.loss = tf.reduce_mean(loss)
self.train_op = tf.train.AdamOptimizer(args.learning_rate).minimize(self.loss)
prediction = tf.argmax(logits, axis=1)
correct_prediction = tf.equal(tf.cast(prediction, tf.int32), self.y)
self.acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
| 43.145299
| 119
| 0.571712
|
1c531d22bf8c4576c5bb117a2c359271a0a03d5d
| 528
|
py
|
Python
|
Level1/Ex_3.py
|
zac11/Python_Excerices
|
775739e2639be1f82cc3690c854b9ea0ece05042
|
[
"Apache-2.0"
] | 2
|
2019-03-09T20:31:06.000Z
|
2020-06-19T12:15:13.000Z
|
Level1/Ex_3.py
|
zac11/Python_Excerices
|
775739e2639be1f82cc3690c854b9ea0ece05042
|
[
"Apache-2.0"
] | null | null | null |
Level1/Ex_3.py
|
zac11/Python_Excerices
|
775739e2639be1f82cc3690c854b9ea0ece05042
|
[
"Apache-2.0"
] | 1
|
2018-08-11T18:36:49.000Z
|
2018-08-11T18:36:49.000Z
|
"""
With a given integral number n, write a program to generate a dictionary that contains (i, i*i) such that is an integral number between 1 and n (both included). and then the program should print the dictionary.
Suppose the following input is supplied to the program:
8
Then, the output should be:
{1: 1, 2: 4, 3: 9, 4: 16, 5: 25, 6: 36, 7: 49, 8: 64}
"""
n=int(input())
d= dict()
for i in range(1,n+1):
d[i]=i*i
print(d)
"""
Output :
10
{1: 1, 2: 4, 3: 9, 4: 16, 5: 25, 6: 36, 7: 49, 8: 64, 9: 81, 10: 100}
"""
| 19.555556
| 210
| 0.623106
|
345172cdbde38532f54b5b72a9fb353df334db15
| 16,936
|
py
|
Python
|
Lib/site-packages/urllib3/contrib/pyopenssl.py
|
ldepaula3/TextAnalyticsApp
|
cd87f2017cf301266a82355d4c781de67b9c6ac9
|
[
"bzip2-1.0.6"
] | 7
|
2019-12-21T00:14:14.000Z
|
2021-03-11T14:51:37.000Z
|
Lib/site-packages/urllib3/contrib/pyopenssl.py
|
ldepaula3/TextAnalyticsApp
|
cd87f2017cf301266a82355d4c781de67b9c6ac9
|
[
"bzip2-1.0.6"
] | 29
|
2019-10-09T11:16:21.000Z
|
2020-06-23T09:32:09.000Z
|
Lib/site-packages/urllib3/contrib/pyopenssl.py
|
ldepaula3/TextAnalyticsApp
|
cd87f2017cf301266a82355d4c781de67b9c6ac9
|
[
"bzip2-1.0.6"
] | 1
|
2021-05-07T10:13:31.000Z
|
2021-05-07T10:13:31.000Z
|
"""
SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed:
* pyOpenSSL (tested with 16.0.0)
* cryptography (minimum 1.3.4, from pyopenssl)
* idna (minimum 2.0, from cryptography)
However, pyopenssl depends on cryptography, which depends on idna, so while we
use all three directly here we end up having relatively few packages required.
You can install them with the following command:
pip install pyopenssl cryptography idna
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
Activating this module also has the positive side effect of disabling SSL/TLS
compression in Python 2 (see `CRIME attack`_).
If you want to configure the default list of supported cipher suites, you can
set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
"""
from __future__ import absolute_import
import OpenSSL.SSL
from cryptography import x509
from cryptography.hazmat.backends.openssl import backend as openssl_backend
from cryptography.hazmat.backends.openssl.x509 import _Certificate
try:
from cryptography.x509 import UnsupportedExtension
except ImportError:
# UnsupportedExtension is gone in cryptography >= 2.1.0
class UnsupportedExtension(Exception):
pass
from socket import timeout, error as SocketError
from io import BytesIO
try: # Platform-specific: Python 2
from socket import _fileobject
except ImportError: # Platform-specific: Python 3
_fileobject = None
from ..packages.backports.makefile import backport_makefile
import logging
import ssl
from ..packages import six
import sys
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI always works.
HAS_SNI = True
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
util.PROTOCOL_TLS: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
if hasattr(ssl, 'PROTOCOL_SSLv3') and hasattr(OpenSSL.SSL, 'SSLv3_METHOD'):
_openssl_versions[ssl.PROTOCOL_SSLv3] = OpenSSL.SSL.SSLv3_METHOD
if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'):
_openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'):
_openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
_stdlib_to_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED:
OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
_openssl_to_stdlib_verify = dict(
(v, k) for k, v in _stdlib_to_openssl_verify.items()
)
# OpenSSL will only write 16K at a time
SSL_WRITE_BLOCKSIZE = 16384
orig_util_HAS_SNI = util.HAS_SNI
orig_util_SSLContext = util.ssl_.SSLContext
log = logging.getLogger(__name__)
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
_validate_dependencies_met()
util.SSLContext = PyOpenSSLContext
util.ssl_.SSLContext = PyOpenSSLContext
util.HAS_SNI = HAS_SNI
util.ssl_.HAS_SNI = HAS_SNI
util.IS_PYOPENSSL = True
util.ssl_.IS_PYOPENSSL = True
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
util.SSLContext = orig_util_SSLContext
util.ssl_.SSLContext = orig_util_SSLContext
util.HAS_SNI = orig_util_HAS_SNI
util.ssl_.HAS_SNI = orig_util_HAS_SNI
util.IS_PYOPENSSL = False
util.ssl_.IS_PYOPENSSL = False
def _validate_dependencies_met():
"""
Verifies that PyOpenSSL's package-level dependencies have been met.
Throws `ImportError` if they are not met.
"""
# Method added in `cryptography==1.1`; not available in older versions
from cryptography.x509.extensions import Extensions
if getattr(Extensions, "get_extension_for_class", None) is None:
raise ImportError("'cryptography' module missing required functionality. "
"Try upgrading to v1.3.4 or newer.")
# pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509
# attribute is only present on those versions.
from OpenSSL.crypto import X509
x509 = X509()
if getattr(x509, "_x509", None) is None:
raise ImportError("'pyOpenSSL' module missing required functionality. "
"Try upgrading to v0.14 or newer.")
def _dnsname_to_stdlib(name):
"""
Converts a dNSName SubjectAlternativeName field to the form used by the
standard library on the given Python version.
Cryptography produces a dNSName as a unicode string that was idna-decoded
from ASCII bytes. We need to idna-encode that string to get it back, and
then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
If the name cannot be idna-encoded then we return None signalling that
the name given should be skipped.
"""
def idna_encode(name):
"""
Borrowed wholesale from the Python Cryptography Project. It turns out
that we can't just safely call `idna.encode`: it can explode for
wildcard names. This avoids that problem.
"""
import idna
try:
for prefix in [u'*.', u'.']:
if name.startswith(prefix):
name = name[len(prefix):]
return prefix.encode('ascii') + idna.encode(name)
return idna.encode(name)
except idna.core.IDNAError:
return None
# Don't send IPv6 addresses through the IDNA encoder.
if ':' in name:
return name
name = idna_encode(name)
if name is None:
return None
elif sys.version_info >= (3, 0):
name = name.decode('utf-8')
return name
def get_subj_alt_name(peer_cert):
"""
Given an PyOpenSSL certificate, provides all the subject alternative names.
"""
# Pass the cert to cryptography, which has much better APIs for this.
if hasattr(peer_cert, "to_cryptography"):
cert = peer_cert.to_cryptography()
else:
# This is technically using private APIs, but should work across all
# relevant versions before PyOpenSSL got a proper API for this.
cert = _Certificate(openssl_backend, peer_cert._x509)
# We want to find the SAN extension. Ask Cryptography to locate it (it's
# faster than looping in Python)
try:
ext = cert.extensions.get_extension_for_class(
x509.SubjectAlternativeName
).value
except x509.ExtensionNotFound:
# No such extension, return the empty list.
return []
except (x509.DuplicateExtension, UnsupportedExtension,
x509.UnsupportedGeneralNameType, UnicodeError) as e:
# A problem has been found with the quality of the certificate. Assume
# no SAN field is present.
log.warning(
"A problem was encountered with the certificate that prevented "
"urllib3 from finding the SubjectAlternativeName field. This can "
"affect certificate validation. The error was %s",
e,
)
return []
# We want to return dNSName and iPAddress fields. We need to cast the IPs
# back to strings because the match_hostname function wants them as
# strings.
# Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8
# decoded. This is pretty frustrating, but that's what the standard library
# does with certificates, and so we need to attempt to do the same.
# We also want to skip over names which cannot be idna encoded.
names = [
('DNS', name) for name in map(_dnsname_to_stdlib, ext.get_values_for_type(x509.DNSName))
if name is not None
]
names.extend(
('IP Address', str(name))
for name in ext.get_values_for_type(x509.IPAddress)
)
return names
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.
Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
collector of pypy.
'''
def __init__(self, connection, socket, suppress_ragged_eofs=True):
self.connection = connection
self.socket = socket
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
self._closed = False
def fileno(self):
return self.socket.fileno()
# Copy-pasted from Python 3.5 source code
def _decref_socketios(self):
if self._makefile_refs > 0:
self._makefile_refs -= 1
if self._closed:
self.close()
def recv(self, *args, **kwargs):
try:
data = self.connection.recv(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return b''
else:
raise SocketError(str(e))
except OpenSSL.SSL.ZeroReturnError:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return b''
else:
raise
except OpenSSL.SSL.WantReadError:
if not util.wait_for_read(self.socket, self.socket.gettimeout()):
raise timeout('The read operation timed out')
else:
return self.recv(*args, **kwargs)
# TLS 1.3 post-handshake authentication
except OpenSSL.SSL.Error as e:
raise ssl.SSLError("read error: %r" % e)
else:
return data
def recv_into(self, *args, **kwargs):
try:
return self.connection.recv_into(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return 0
else:
raise SocketError(str(e))
except OpenSSL.SSL.ZeroReturnError:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return 0
else:
raise
except OpenSSL.SSL.WantReadError:
if not util.wait_for_read(self.socket, self.socket.gettimeout()):
raise timeout('The read operation timed out')
else:
return self.recv_into(*args, **kwargs)
# TLS 1.3 post-handshake authentication
except OpenSSL.SSL.Error as e:
raise ssl.SSLError("read error: %r" % e)
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def _send_until_done(self, data):
while True:
try:
return self.connection.send(data)
except OpenSSL.SSL.WantWriteError:
if not util.wait_for_write(self.socket, self.socket.gettimeout()):
raise timeout()
continue
except OpenSSL.SSL.SysCallError as e:
raise SocketError(str(e))
def sendall(self, data):
total_sent = 0
while total_sent < len(data):
sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
total_sent += sent
def shutdown(self):
# FIXME rethrow compatible exceptions should we ever use this
self.connection.shutdown()
def close(self):
if self._makefile_refs < 1:
try:
self._closed = True
return self.connection.close()
except OpenSSL.SSL.Error:
return
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
x509)
return {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': get_subj_alt_name(x509)
}
def version(self):
return self.connection.get_protocol_version_name()
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
if _fileobject: # Platform-specific: Python 2
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
else: # Platform-specific: Python 3
makefile = backport_makefile
WrappedSocket.makefile = makefile
class PyOpenSSLContext(object):
"""
I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible
for translating the interface of the standard library ``SSLContext`` object
to calls into PyOpenSSL.
"""
def __init__(self, protocol):
self.protocol = _openssl_versions[protocol]
self._ctx = OpenSSL.SSL.Context(self.protocol)
self._options = 0
self.check_hostname = False
@property
def options(self):
return self._options
@options.setter
def options(self, value):
self._options = value
self._ctx.set_options(value)
@property
def verify_mode(self):
return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()]
@verify_mode.setter
def verify_mode(self, value):
self._ctx.set_verify(
_stdlib_to_openssl_verify[value],
_verify_callback
)
def set_default_verify_paths(self):
self._ctx.set_default_verify_paths()
def set_ciphers(self, ciphers):
if isinstance(ciphers, six.text_type):
ciphers = ciphers.encode('utf-8')
self._ctx.set_cipher_list(ciphers)
def load_verify_locations(self, cafile=None, capath=None, cadata=None):
if cafile is not None:
cafile = cafile.encode('utf-8')
if capath is not None:
capath = capath.encode('utf-8')
self._ctx.load_verify_locations(cafile, capath)
if cadata is not None:
self._ctx.load_verify_locations(BytesIO(cadata))
def load_cert_chain(self, certfile, keyfile=None, password=None):
self._ctx.use_certificate_chain_file(certfile)
if password is not None:
if not isinstance(password, six.binary_type):
password = password.encode('utf-8')
self._ctx.set_passwd_cb(lambda *_: password)
self._ctx.use_privatekey_file(keyfile or certfile)
def wrap_socket(self, sock, server_side=False,
do_handshake_on_connect=True, suppress_ragged_eofs=True,
server_hostname=None):
cnx = OpenSSL.SSL.Connection(self._ctx, sock)
if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3
server_hostname = server_hostname.encode('utf-8')
if server_hostname is not None:
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
if not util.wait_for_read(sock, sock.gettimeout()):
raise timeout('select timed out')
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake: %r' % e)
break
return WrappedSocket(cnx, sock)
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
| 34.847737
| 97
| 0.642419
|
07ef7b33796d01d51455b12b14dbf8509e120a49
| 1,594
|
py
|
Python
|
src/sentry/models/authprovider.py
|
pierredup/sentry
|
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
|
[
"BSD-3-Clause"
] | 1
|
2019-10-17T17:46:16.000Z
|
2019-10-17T17:46:16.000Z
|
src/sentry/models/authprovider.py
|
pierredup/sentry
|
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/models/authprovider.py
|
pierredup/sentry
|
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import, print_function
from bitfield import BitField
from django.db import models
from django.utils import timezone
from sentry.db.models import (
BoundedPositiveIntegerField,
EncryptedJsonField,
FlexibleForeignKey,
Model,
sane_repr,
)
class AuthProvider(Model):
__core__ = True
organization = FlexibleForeignKey("sentry.Organization", unique=True)
provider = models.CharField(max_length=128)
config = EncryptedJsonField()
date_added = models.DateTimeField(default=timezone.now)
sync_time = BoundedPositiveIntegerField(null=True)
last_sync = models.DateTimeField(null=True)
default_role = BoundedPositiveIntegerField(default=50)
default_global_access = models.BooleanField(default=True)
# TODO(dcramer): ManyToMany has the same issue as ForeignKey and we need
# to either write our own which works w/ BigAuto or switch this to use
# through.
default_teams = models.ManyToManyField("sentry.Team", blank=True)
flags = BitField(
flags=(("allow_unlinked", "Grant access to members who have not linked SSO accounts."),),
default=0,
)
class Meta:
app_label = "sentry"
db_table = "sentry_authprovider"
__repr__ = sane_repr("organization_id", "provider")
def __unicode__(self):
return self.provider
def get_provider(self):
from sentry.auth import manager
return manager.get(self.provider, **self.config)
def get_audit_log_data(self):
return {"provider": self.provider, "config": self.config}
| 28.981818
| 97
| 0.716437
|
2254799a08067a7daecd225bf6a3f1078157a43c
| 10,773
|
py
|
Python
|
tests/tests.py
|
benhowes/django-side-effects
|
8caaa66dfcd85c4c65f6a9d80d306b98a1eab6a0
|
[
"MIT"
] | null | null | null |
tests/tests.py
|
benhowes/django-side-effects
|
8caaa66dfcd85c4c65f6a9d80d306b98a1eab6a0
|
[
"MIT"
] | null | null | null |
tests/tests.py
|
benhowes/django-side-effects
|
8caaa66dfcd85c4c65f6a9d80d306b98a1eab6a0
|
[
"MIT"
] | null | null | null |
from unittest import mock
from django.test import TestCase
from side_effects import registry, decorators, settings
class RegistryFunctionTests(TestCase):
"""Test the free functions in the registry module."""
def setUp(self):
registry._registry.clear()
def test_fname(self):
self.assertEqual(
# wait, what?
registry.fname(registry.fname),
"side_effects.registry.fname",
)
def test_docstring(self):
def test_func_no_docstring(arg1):
pass
def test_func_one_line(*args):
"""This is a one line docstring."""
return sum(args)
def test_func_one_line_2():
"""
This is also a one line docstring.
"""
pass
def test_func_multi_line():
"""
This is a multi-line docstring.
It has multiple lines.
"""
pass
self.assertEqual(registry.docstring(test_func_no_docstring), None)
self.assertEqual(
registry.docstring(test_func_one_line), ["This is a one line docstring."]
)
self.assertEqual(
registry.docstring(test_func_one_line_2),
["This is also a one line docstring."],
)
self.assertEqual(
registry.docstring(test_func_multi_line),
["This is a multi-line docstring.", "", "It has multiple lines."],
)
def test_pass_return_value(self):
def foo(arg1):
pass
def bar(arg1, **kwargz):
pass
def baz(*args, **kwargs):
pass
def dave(*args, return_value):
pass
def dee(arg1, return_value):
pass
def dozy(arg1, return_value=None):
pass
self.assertFalse(registry.pass_return_value(foo))
self.assertFalse(registry.pass_return_value(bar))
self.assertTrue(registry.pass_return_value(baz))
self.assertTrue(registry.pass_return_value(dave))
self.assertTrue(registry.pass_return_value(dee))
self.assertTrue(registry.pass_return_value(dozy))
def test_register_side_effect(self):
def test_func1():
pass
def test_func2():
pass
registry.register_side_effect("foo", test_func1)
self.assertTrue(registry._registry.contains("foo", test_func1))
self.assertFalse(registry._registry.contains("foo", test_func2))
# try adding adding a duplicate
registry.register_side_effect("foo", test_func1)
self.assertTrue(registry._registry.contains("foo", test_func1))
@mock.patch("side_effects.registry._run_func")
def test_run_side_effects(self, mock_func):
def test_func1():
pass
def test_func2():
pass
registry.run_side_effects("foo")
self.assertEqual(mock_func.call_count, 0)
mock_func.reset_mock()
registry.register_side_effect("foo", test_func1)
registry.run_side_effects("foo")
self.assertEqual(mock_func.call_count, 1)
mock_func.reset_mock()
registry.register_side_effect("foo", test_func2)
registry.run_side_effects("foo")
self.assertEqual(mock_func.call_count, 2)
mock_func.reset_mock()
with mock.patch("side_effects.settings.TEST_MODE", True):
registry.run_side_effects("foo")
self.assertEqual(mock_func.call_count, 0)
with mock.patch("side_effects.settings.TEST_MODE", False):
registry.run_side_effects("foo")
self.assertEqual(mock_func.call_count, 2)
@mock.patch("side_effects.registry.settings.TEST_MODE_FAIL", True)
def test_run_side_effects__test_mode_fail(self):
def test_func():
pass
registry.register_side_effect("foo", test_func)
self.assertRaises(
registry.SideEffectsTestFailure, registry.run_side_effects, "foo"
)
def test__run_func__no_return_value(self):
"""Test the _run_func function does not pass return_value if not required."""
def test_func():
pass
registry._run_func(test_func, return_value=None)
def test__run_func__with_return_value(self):
"""Test the _run_func function passes through the return_value if required."""
def test_func(**kwargs):
assert "return_value" in kwargs
# return_value not passed through, so fails
registry._run_func(test_func)
# self.assertRaises(KeyError, registry._run_func, test_func)
registry._run_func(test_func, return_value=None)
def test__run_func__aborts_on_error(self):
"""Test the _run_func function handles ABORT_ON_ERROR correctly."""
def test_func():
raise Exception("Pah")
# error is logged, but not raised
with mock.patch.object(settings, "ABORT_ON_ERROR", False):
self.assertFalse(settings.ABORT_ON_ERROR)
registry._run_func(test_func, return_value=None)
# error is raised
with mock.patch.object(settings, "ABORT_ON_ERROR", True):
self.assertTrue(settings.ABORT_ON_ERROR)
self.assertRaises(Exception, registry._run_func, test_func)
class RegistryTests(TestCase):
"""Tests for the registry module."""
def test_registry_add_contains(self):
"""Check that add and contains functions work together."""
def test_func():
pass
r = registry.Registry()
self.assertFalse(r.contains("foo", test_func))
r.add("foo", test_func)
self.assertTrue(r.contains("foo", test_func))
def test_by_label(self):
def test_func():
pass
r = registry.Registry()
r.add("foo", test_func)
self.assertEqual(r.by_label("foo").items(), r.items())
self.assertEqual(r.by_label("foo"), {"foo": [test_func]})
self.assertEqual(r.by_label("bar"), {})
def test_by_label_contains(self):
def test_func():
pass
r = registry.Registry()
r.add("foo", test_func)
self.assertEqual(r.by_label_contains("foo").items(), r.items())
self.assertEqual(r.by_label_contains("f"), {"foo": [test_func]})
self.assertEqual(r.by_label_contains("fo"), {"foo": [test_func]})
self.assertEqual(r.by_label_contains("foo"), {"foo": [test_func]})
self.assertEqual(r.by_label_contains("food"), {})
@mock.patch("side_effects.registry._run_func")
def test__run_side_effects__no_return_value(self, mock_run):
"""Test return_value is not passed"""
def no_return_value(*args, **kwargz):
assert "return_value" not in kwargz
r = registry.Registry()
r.add("foo", no_return_value)
r._run_side_effects("foo")
r._run_side_effects("foo", return_value=None)
def test__run_side_effects__with_return_value(self):
"""Test return_value is passed"""
r = registry.Registry()
def has_return_value(*args, **kwargs):
assert "return_value" in kwargs
r.add("foo", has_return_value)
r._run_side_effects("foo", return_value=None)
class DecoratorTests(TestCase):
"""Tests for the decorators module."""
def setUp(self):
registry._registry.clear()
def test_http_response_check(self):
"""Test the HTTP response check rejects 4xx, 5xx status_codes."""
response = decorators.HttpResponse(status=200)
self.assertTrue(decorators.http_response_check(response))
response.status_code = 300
self.assertTrue(decorators.http_response_check(response))
response.status_code = 400
self.assertFalse(decorators.http_response_check(response))
response.status_code = 500
self.assertFalse(decorators.http_response_check(response))
response.status_code = 600
self.assertTrue(decorators.http_response_check(response))
@mock.patch("side_effects.decorators.registry")
def test_has_side_effects(self, mock_registry):
"""Decorated functions should call run_side_effects."""
# call the decorator directly - then call the decorated function
# as the action takes places post-function call.
def test_func(arg1: int):
return arg1 * 2
func = decorators.has_side_effects("foo")(test_func)
func(1)
mock_registry.run_side_effects.assert_called_with("foo", 1, return_value=2)
@mock.patch("side_effects.decorators.registry")
def test_has_side_effects__run_on_exit_false(self, mock_registry):
"""Decorated functions should call run_side_effects."""
def test_func(*args, **kwargs):
pass
func = decorators.has_side_effects("foo", run_on_exit=lambda r: False)(
test_func
)
func("bar")
mock_registry.run_side_effects.assert_not_called()
@mock.patch("side_effects.registry.register_side_effect")
def test_is_side_effect_of(self, mock_register):
"""Decorated functions should be added to the registry."""
def test_func(arg1, arg2):
return arg1 + arg2
# call the decorator directly - no need to call the decorated
# function as the action takes place outside of that.
func = decorators.is_side_effect_of("foo")(test_func)
mock_register.assert_called_with("foo", test_func)
# check the function still works!
self.assertEqual(func(1, 2), 3)
@decorators.disable_side_effects()
def test_disable_side_effects(self, events):
# simple func that calls the side-effect 'foo'
def test_func():
registry.run_side_effects("foo")
registry.register_side_effect("foo", test_func)
test_func()
self.assertEqual(events, ["foo"])
test_func()
self.assertEqual(events, ["foo", "foo"])
class ContextManagerTests(TestCase):
@mock.patch("side_effects.registry._run_func")
def test_disable_side_effects(self, mock_func):
"""Side-effects can be temporarily disabled."""
def test_func():
pass
registry._registry.clear()
registry.register_side_effect("foo", test_func)
registry.run_side_effects("foo")
self.assertEqual(mock_func.call_count, 1)
# shouldn't get another call inside the CM
with registry.disable_side_effects() as events:
registry.run_side_effects("foo")
self.assertEqual(mock_func.call_count, 1)
self.assertEqual(events, ["foo"])
# re-enabled
registry.run_side_effects("foo")
self.assertEqual(mock_func.call_count, 2)
| 32.744681
| 86
| 0.640304
|
842b93c43104944832376724b0133e758e37cfc5
| 9,939
|
py
|
Python
|
dataframe_utilities.py
|
labscript-suite-temp-archive/lyse-fork--mearnshaw-lyse--forked-from--labscript_suite-lyse
|
f72266fe6e70cdfaf9a57338307e2a8379d7ba4b
|
[
"BSD-2-Clause"
] | null | null | null |
dataframe_utilities.py
|
labscript-suite-temp-archive/lyse-fork--mearnshaw-lyse--forked-from--labscript_suite-lyse
|
f72266fe6e70cdfaf9a57338307e2a8379d7ba4b
|
[
"BSD-2-Clause"
] | null | null | null |
dataframe_utilities.py
|
labscript-suite-temp-archive/lyse-fork--mearnshaw-lyse--forked-from--labscript_suite-lyse
|
f72266fe6e70cdfaf9a57338307e2a8379d7ba4b
|
[
"BSD-2-Clause"
] | null | null | null |
#####################################################################
# #
# /dataframe_utilities.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the program lyse, in the labscript suite #
# (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
import h5_lock, h5py
import pandas
import os
from numpy import *
import dateutil
from timezones import localtz
import shared_drive
# asdatetime = dateutil.parser.parse
# def asdatetime(timestr):
# return localtz().localize(dateutil.parser.parse(timestr))
def asdatetime(timestr):
# tz = localtz().zone
tz = 'Australia/Melbourne'
# tz = None
return pandas.Timestamp(timestr, tz=tz)
class Fields(object):
"""A workaraound for the fact that numpy.void objects cannot be
correctly unpickled (a bug in numpy) and therefore cannot be sent
to other processes over the network. This class implements the same
functionality mostly. Basically the thing you get back looks like a
tuple but can be indexed with either names of the fields or integers,
much like a single row of a numpy structured array. Whenever this
module encounters a numpy.void type when reading attributes from a
HDF5 file, it converts it to one of these."""
def __init__(self, data):
self.data_by_name = {}
self.data_by_index = tuple(data)
self.dtype = data.dtype
for name in data.dtype.names:
self.data_by_name[name] = data[name]
def __getitem__(self, key):
if isinstance(key,int):
return self.data_by_index[key]
else:
return self.data_by_name[key]
def __repr__(self):
return str(self.data_by_index)
def get_nested_dict_from_shot(filepath):
with h5py.File(filepath,'r') as h5_file:
row = dict(h5_file['globals'].attrs)
if 'results' in h5_file:
for groupname in h5_file['results']:
resultsgroup = h5_file['results'][groupname]
row[groupname] = dict(resultsgroup.attrs)
if 'images' in h5_file:
for orientation in h5_file['images'].keys():
row[orientation] = dict(h5_file['images'][orientation].attrs)
for label in h5_file['images'][orientation]:
row[orientation][label] = {}
group = h5_file['images'][orientation][label]
for image in group:
row[orientation][label][image] = dict(
group[image].attrs)
row['filepath'] = filepath
row['agnostic_path'] = shared_drive.path_to_local(filepath)
row['sequence'] = asdatetime(h5_file.attrs['sequence_id'].split('_')[0])
if 'script' in h5_file:
row['labscript'] = h5_file['script'].attrs['name']
try:
row['run time'] = asdatetime(h5_file.attrs['run time'])
except KeyError:
row['run time'] = float('nan')
try:
row['run number'] = h5_file.attrs['run number']
except KeyError:
# ignore:
pass
try:
row['individual id'] = h5_file.attrs['individual id']
row['generation'] = h5_file.attrs['generation']
except KeyError:
pass
return row
def flatten_dict(dictionary, keys=tuple()):
"""Takes a nested dictionary whose keys are strings, and returns a
flat dictionary whose keys are tuples of strings, each element of
which is the key for one level of the hierarchy."""
result = {}
for name in dictionary:
if isinstance(dictionary[name],dict):
flat = flatten_dict(dictionary[name],keys=keys + (str(name),))
result.update(flat)
else:
result[keys + (str(name),)] = dictionary[name]
return result
def flat_dict_to_hierarchical_dataframe(dictionary):
"""Make all the keys tuples of the same length"""
max_tuple_length = 2 # Must have at least two levels to make a MultiIndex
for key in dictionary:
max_tuple_length = max(max_tuple_length,len(key))
result = {}
for key in dictionary:
newkey = key[:]
while len(newkey) < max_tuple_length:
newkey += ('',)
result[newkey] = dictionary[key]
index = pandas.MultiIndex.from_tuples(sorted(result.keys()))
return pandas.DataFrame([result],columns=index)
def workaround_empty_string_bug(dictionary):
# It doesn't look like this function does anything, but it does. It
# converts numpy empty strings to python empty strings. This is
# to workaround the fact that h5py returns empty stings as a numpy
# datatype which numpy itself actually can'y handle. Numpy never uses
# length zero strings, only length one or greater. So by replacing
# all empty strings with ordinary python ones, numpy will convert them
# (when it needs to) to a datatype it can handle.
for key, value in dictionary.items():
if isinstance(value,str) and value == '':
dictionary[key] = ''
def workaround_numpy_void_bug(dictionary):
# numpy.void objects undergo data corruption when pickled and
# unpickled. h5py returns numpy.void objects for attributes
# which are its 'compound' datatype. We'll convert any we find to our
# home-cooked Fields class (defined above), which provides mostly
# the same functionality. This will be removed if and when numpy fix their bug.
for key, value in dictionary.items():
if isinstance(value, void):
dictionary[key] = Fields(value)
def do_workarounds(dictionary):
workaround_empty_string_bug(dictionary)
#workaround_numpy_void_bug(dictionary)
def flat_dict_to_flat_series(dictionary):
max_tuple_length = 2 # Must have at least two levels to make a MultiIndex
result = {}
for key in dictionary:
if len(key) > 1:
result[key] = dictionary[key]
else:
result[key[0]] = dictionary[key]
keys = result.keys()
keys.sort(key = lambda item:
(len(item),) + item if isinstance(item, tuple) else (1,item))
return pandas.Series(result,index=keys)
def get_dataframe_from_shot(filepath):
nested_dict = get_nested_dict_from_shot(filepath)
flat_dict = flatten_dict(nested_dict)
do_workarounds(flat_dict)
df = flat_dict_to_hierarchical_dataframe(flat_dict)
return df
def get_series_from_shot(filepath):
nested_dict = get_nested_dict_from_shot(filepath)
flat_dict = flatten_dict(nested_dict)
do_workarounds(flat_dict)
s = flat_dict_to_flat_series(flat_dict)
return s
def pad_columns(df, n):
"""Add depth to hiererchical column labels with empty strings"""
if df.columns.nlevels == n:
return df
new_columns = []
data = {}
for column in df.columns:
new_column = column + ('',)*(n-len(column))
new_columns.append(new_column)
data[new_column] = df[column]
index = pandas.MultiIndex.from_tuples(new_columns)
return pandas.DataFrame(data,columns = index)
def concat_with_padding(df1, df2):
"""Concatenates two dataframes with MultiIndex column labels,
padding the shallower hierarchy such that the two MultiIndexes have
the same nlevels."""
if df1.columns.nlevels < df2.columns.nlevels:
df1 = pad_columns(df1, df2.columns.nlevels)
elif df1.columns.nlevels > df2.columns.nlevels:
df2 = pad_columns(df2, df1.columns.nlevels)
return df1.append(df2, ignore_index=True)
def replace_with_padding(df,row,index):
if df.columns.nlevels < row.columns.nlevels:
df = pad_columns(df, row.columns.nlevels)
elif df.columns.nlevels > row.columns.nlevels:
row = pad_columns(row, df.columns.nlevels)
# Wow, changing the index of a single row dataframe is a pain in
# the neck:
row = pandas.DataFrame(row.ix[0],columns=[index]).T
# Wow, replacing a row of a dataframe is a pain in the neck:
df = df.drop([index])
df = df.append(row)
df = df.sort()
return df
def dict_diff(dict1, dict2):
"""Return the difference between two dictionaries as a dictionary of key: [val1, val2] pairs.
Keys unique to either dictionary are included as key: [val1, '-'] or key: ['-', val2]."""
diff_keys = []
common_keys = intersect1d(dict1.keys(), dict2.keys())
for key in common_keys:
if iterable(dict1[key]):
if any(dict1[key] != dict2[key]):
diff_keys.append(key)
else:
if dict1[key] != dict2[key]:
diff_keys.append(key)
dict1_unique = [key for key in dict1.keys() if key not in common_keys]
dict2_unique = [key for key in dict2.keys() if key not in common_keys]
diff = {}
for key in diff_keys:
diff[key] = [dict1[key], dict2[key]]
for key in dict1_unique:
diff[key] = [dict1[key], '-']
for key in dict2_unique:
diff[key] = ['-', dict2[key]]
return diff
| 40.901235
| 98
| 0.591709
|
34a106db6f86c512a164b86f587cb7a8c6a4af2e
| 1,743
|
py
|
Python
|
classifiers/binary/dtclassifier.py
|
marinakiseleva/thex_model
|
9d498b697b7c4b03e1db31cae4d7a469311229f7
|
[
"MIT"
] | 4
|
2020-07-20T21:15:19.000Z
|
2021-10-01T19:45:47.000Z
|
classifiers/binary/dtclassifier.py
|
marinakiseleva/thex_model
|
9d498b697b7c4b03e1db31cae4d7a469311229f7
|
[
"MIT"
] | 1
|
2019-12-18T17:45:10.000Z
|
2019-12-18T17:45:10.000Z
|
classifiers/binary/dtclassifier.py
|
marinakiseleva/thex_model
|
9d498b697b7c4b03e1db31cae4d7a469311229f7
|
[
"MIT"
] | 1
|
2021-10-06T23:11:32.000Z
|
2021-10-06T23:11:32.000Z
|
import sys
import pandas as pd
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import brier_score_loss
from sklearn.tree import DecisionTreeClassifier
from thex_data.data_consts import TARGET_LABEL, CPU_COUNT, LOSS_FUNCTION
class DTClassifier():
"""
Decision tree classifier
"""
def __init__(self, X, y, sample_weights, class_weights):
"""
Init classifier through training
"""
self.name = "Decision Tree"
self.clf = self.train(X, y, sample_weights, class_weights)
def train(self, X, y, sample_weights, class_weights):
grid = {'criterion': ['entropy', 'gini'],
'splitter': ['best', 'random'],
# 'class_weight': [None, 'balanced', class_weights]
'max_depth': [20, 50, None],
'min_samples_split': [2, 4, 8, 0.05],
'min_samples_leaf': [1, 2, 4, 8],
'min_weight_fraction_leaf': [0, 0.001, 0.01],
'max_features': [0.3, 0.5, None],
}
clf_optimize = GridSearchCV(
estimator=DecisionTreeClassifier(class_weight='balanced'),
param_grid=grid,
scoring=LOSS_FUNCTION,
cv=3,
iid=True,
n_jobs=CPU_COUNT)
# Fit the random search model
clf_optimize.fit(X.values, y.values)
clf = clf_optimize.best_estimator_
print(self.name + " optimal parameters:\n" + str(clf_optimize.best_params_))
sys.stdout.flush() # Print to output file
return clf
def get_class_probability(self, x):
# Return probability of class at index 1 (==1, positive class)
return self.clf.predict_proba([x.values])[0][1]
| 34.176471
| 84
| 0.601836
|
25985a64ce671533c476ec68a14129f00afc5cb5
| 5,928
|
py
|
Python
|
tests/test_utils.py
|
arnavkapoor/dateparser
|
f884c353863aafca48d35a38ed9a23ddab90bd6a
|
[
"BSD-3-Clause"
] | 1
|
2021-04-07T09:00:57.000Z
|
2021-04-07T09:00:57.000Z
|
tests/test_utils.py
|
arnavkapoor/dateparser
|
f884c353863aafca48d35a38ed9a23ddab90bd6a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_utils.py
|
arnavkapoor/dateparser
|
f884c353863aafca48d35a38ed9a23ddab90bd6a
|
[
"BSD-3-Clause"
] | null | null | null |
import itertools
from datetime import datetime
import pytest
from tests import BaseTestCase
from parameterized import parameterized, param
from dateparser.utils import (
find_date_separator, localize_timezone, apply_timezone,
apply_timezone_from_settings, registry,
get_last_day_of_month, get_previous_leap_year, get_next_leap_year)
from pytz import UnknownTimeZoneError, utc
from dateparser.conf import settings
class TestUtils(BaseTestCase):
def setUp(self):
super(TestUtils, self).setUp()
self.date_format = None
self.result = None
def given_date_format(self, date_format):
self.date_format = date_format
def when_date_separator_is_parsed(self):
self.result = find_date_separator(self.date_format)
def then_date_separator_is(self, sep):
self.assertEqual(self.result, sep)
@staticmethod
def make_class_without_get_keys():
class SomeClass:
pass
some_class = SomeClass
return some_class
@parameterized.expand([
param(date_format=fmt.format(sep=sep), expected_sep=sep)
for (fmt, sep) in itertools.product(
['%d{sep}%m{sep}%Y', '%d{sep}%m{sep}%Y %H:%M'],
['/', '.', '-', ':'])
])
def test_separator_extraction(self, date_format, expected_sep):
self.given_date_format(date_format)
self.when_date_separator_is_parsed()
self.then_date_separator_is(expected_sep)
@parameterized.expand([
param(datetime(2015, 12, 12), timezone='UTC', zone='UTC'),
param(datetime(2015, 12, 12), timezone='Asia/Karachi', zone='Asia/Karachi'),
param(datetime(2015, 12, 12, tzinfo=utc), timezone='UTC', zone='UTC'),
])
def test_localize_timezone_function(self, date, timezone, zone):
tzaware_dt = localize_timezone(date, timezone)
self.assertEqual(tzaware_dt.tzinfo.zone, zone)
@parameterized.expand([
param(datetime(2015, 12, 12), timezone='UTB'),
param(datetime(2015, 12, 12), timezone='Asia/Karach'),
])
def test_localize_timezone_function_raise_error(self, date, timezone):
self.assertRaises(UnknownTimeZoneError, localize_timezone, date, timezone)
@parameterized.expand([
param(datetime(2015, 12, 12), timezone='UTC+3', zone=r'UTC\+03:00'),
])
def test_localize_timezone_function_exception(self, date, timezone, zone):
tzaware_dt = localize_timezone(date, timezone)
self.assertEqual(tzaware_dt.tzinfo._StaticTzInfo__name, zone)
@parameterized.expand([
param(datetime(2015, 12, 12, 10, 12), timezone='Asia/Karachi', expected=datetime(2015, 12, 12, 15, 12)),
param(datetime(2015, 12, 12, 10, 12), timezone='-0500', expected=datetime(2015, 12, 12, 5, 12)),
])
def test_apply_timezone_function(self, date, timezone, expected):
result = apply_timezone(date, timezone)
result = result.replace(tzinfo=None)
self.assertEqual(expected, result)
@parameterized.expand([
param(datetime(2015, 12, 12, 10, 12), timezone='Asia/Karachi', expected=datetime(2015, 12, 12, 15, 12)),
param(datetime(2015, 12, 12, 10, 12), timezone='-0500', expected=datetime(2015, 12, 12, 5, 12)),
])
def test_apply_timezone_from_settings_function(self, date, timezone, expected):
result = apply_timezone_from_settings(date, settings.replace(**{'TO_TIMEZONE': timezone, 'TIMEZONE': 'UTC'}))
self.assertEqual(expected, result)
@parameterized.expand([
param(datetime(2015, 12, 12, 10, 12),
expected=datetime(2015, 12, 12, 10, 12)),
])
def test_apply_timezone_from_settings_function_none_settings(self, date, expected):
result = apply_timezone_from_settings(date, None)
self.assertEqual(expected, result)
@parameterized.expand([
param(datetime(2015, 12, 12, 10, 12),),
param(datetime(2015, 12, 12, 10, 12),),
])
def test_apply_timezone_from_settings_function_should_return_tz(self, date):
result = apply_timezone_from_settings(date, settings.replace(**{'RETURN_AS_TIMEZONE_AWARE': True}))
self.assertTrue(bool(result.tzinfo))
def test_registry_when_get_keys_not_implemented(self):
cl = self.make_class_without_get_keys()
self.assertRaises(NotImplementedError, registry, cl)
@parameterized.expand([
param(2111, 1, 31),
param(1999, 2, 28), # normal year
param(1996, 2, 29), # leap and not centurial year
param(2000, 2, 29), # leap and centurial year
param(1700, 2, 28), # no leap and centurial year (exception)
param(2020, 3, 31),
param(1987, 4, 30),
param(1000, 5, 31),
param(1534, 6, 30),
param(1777, 7, 31),
param(1234, 8, 31),
param(1678, 9, 30),
param(1947, 10, 31),
param(2015, 11, 30),
param(2300, 12, 31),
])
def test_get_last_day_of_month(self, year, month, expected_last_day):
assert get_last_day_of_month(year, month) == expected_last_day
@pytest.mark.parametrize(
"year,expected_previous_leap_year", [
(2020, 2016),
(2000, 1996), # leap and centurial year
(2104, 2096), # missing no leap centurial year
(1704, 1696),
(2396, 2392),
(0, -4), # even if this is not a valid year, it is the expected result
])
def test_get_previous_leap_year(year, expected_previous_leap_year):
assert get_previous_leap_year(year) == expected_previous_leap_year
@pytest.mark.parametrize(
"year,expected_next_leap_year", [
(2020, 2024),
(1996, 2000), # leap and centurial year
(2096, 2104), # missing no leap centurial year
(1696, 1704),
(2396, 2400),
(0, 4)
])
def test_get_next_leap_year(year, expected_next_leap_year):
assert get_next_leap_year(year) == expected_next_leap_year
| 38
| 117
| 0.662787
|
734ddfa418ce1b2a2cf34dbb3389b737e0e97dad
| 10,975
|
py
|
Python
|
pytools/src/IndexEval/evalcontinously.py
|
selentd/pythontools
|
ab3158dca1c3f6ef0f6d6678070da4a6551fa334
|
[
"Apache-2.0"
] | null | null | null |
pytools/src/IndexEval/evalcontinously.py
|
selentd/pythontools
|
ab3158dca1c3f6ef0f6d6678070da4a6551fa334
|
[
"Apache-2.0"
] | null | null | null |
pytools/src/IndexEval/evalcontinously.py
|
selentd/pythontools
|
ab3158dca1c3f6ef0f6d6678070da4a6551fa334
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on 17.03.2016
@author: selen00r
'''
import datetime
import evalbase
import evalrunner
import fetchdata
import indexdata
import transactionchecker
class EvalContinously(evalbase.EvalBase):
'''
classdocs
'''
maxDaysKey = "maxDays"
maxWinKey = "maxWin"
maxLossKey = "maxLoss"
maxJumpKey = "maxJump"
maxHighJumpKey = "maxHighJump"
def __init__(self, dbName, idxName, runParameters = None):
evalbase.EvalBase.__init__(self, dbName, idxName, runParameters)
def _loadIndexHistory(self, startDate, endDate = datetime.datetime.now()):
self.startDate = startDate
self.endDate = endDate
self.indexHistory = fetchdata.FetchData( self.indexName ).fetchDataByDate( self.startDate, self.endDate )
def _calculateResult(self):
idxBuy = indexdata.IndexData()
idxHistory = indexdata.IndexHistory()
transactionList = indexdata.TransactionResultHistory()
isInTransaction = False
for idxData in self.indexHistory.indexHistory:
# --- check transaction
if isInTransaction:
idxHistory.addIndexData( idxData )
if self._checkEndTransaction( idxData, idxHistory.len() ):
transactionList.addTransactionResult( self._endTransaction( idxBuy, idxData, idxHistory ) )
isInTransaction = False
if not isInTransaction:
if self._checkStartTransaction( idxData ):
self._startTransaction( idxData )
isInTransaction = True
idxBuy = idxData
idxHistory = indexdata.IndexHistory()
idxHistory.addIndexData( idxData )
if isInTransaction:
transactionList.addTransactionResult(self._endTransaction(idxBuy, idxData, idxHistory))
return transactionList
class EvalContinouslyMean(EvalContinously):
isCallKey = "isCall"
meanKey = "mean"
mean2Key = "mean2"
mean3Key = "mean3"
mean4Key = "mean4"
mean5Key = "mean5"
endMeanKey = "endMean"
endMeanKey2 = "endMean2"
endMeanKey3 = "endMean3"
endMeanKey4 = "endMean4"
endMeanKey5 = "endMean5"
gradKey = "grad"
grad2Key = "grad2"
grad3Key = "grad3"
minGradKey = "minGrad"
minGrad2Key = "minGrad2"
minGrad3Key = "minGrad3"
startOffsetKey = "startOffset"
endOffsetKey = "endOffset"
startOffset2Key = "startOffset2"
endOffset2Key = "endOffset2"
startOffset3Key = "startOffset3"
endOffset3Key = "endOffset3"
def __init__(self, dbName, idxName, runParameters = None):
EvalContinously.__init__(self, dbName, idxName, runParameters)
# if self.runParameters.has_key( EvalContinouslyMean.isCallKey):
if EvalContinouslyMean.isCallKey in self.runParameters:
self.isCall = self.runParameters[EvalContinouslyMean.isCallKey]
else:
self.isCall = True
# if self.runParameters.has_key( EvalContinouslyMean.meanKey):
if EvalContinouslyMean.endMeanKey in self.runParameters:
self.mean = self.runParameters[EvalContinouslyMean.meanKey]
else:
self.mean = 200
# if self.runParameters.has_key(EvalContinouslyMean.mean2Key):
if EvalContinouslyMean.mean2Key in self.runParameters:
self.mean2 = self.runParameters[EvalContinouslyMean.mean2Key]
else:
self.mean2 = 0
# if self.runParameters.has_key(EvalContinouslyMean.mean3Key):
if EvalContinouslyMean.mean3Key in self.runParameters:
self.mean3 = self.runParameters[EvalContinouslyMean.mean3Key]
else:
self.mean3 = 0
# if self.runParameters.has_key(EvalContinouslyMean.endMeanKey):
if EvalContinouslyMean.endMeanKey in self.runParameters:
self.endMean = self.runParameters[EvalContinouslyMean.endMeanKey]
else:
self.endMean = self.mean
# if self.runParameters.has_key(EvalContinouslyMean.endMeanKey2):
if EvalContinouslyMean.endMeanKey2 in self.runParameters:
self.endMean2 = self.runParameters[EvalContinouslyMean.endMeanKey2]
else:
self.endMean2 = 0
# if self.runParameters.has_key(EvalContinouslyMean.endMeanKey3):
if EvalContinouslyMean.endMeanKey3 in self.runParameters:
self.endMean3 = self.runParameters[EvalContinouslyMean.endMeanKey3]
else:
self.endMean3 = 0
# if self.runParameters.has_key( EvalContinouslyMean.gradKey ):
if EvalContinouslyMean.gradKey in self.runParameters:
self.grad = self.runParameters[EvalContinouslyMean.gradKey]
else:
self.grad = 0.0
# if self.runParameters.has_key( EvalContinouslyMean.minGradKey):
if EvalContinouslyMean.minGradKey in self.runParameters:
self.minGrad = self.runParameters[EvalContinouslyMean.minGradKey]
else:
self.minGrad = 0.0
# if self.runParameters.has_key( EvalContinouslyMean.grad2Key ):
if EvalContinouslyMean.grad2Key in self.runParameters:
self.grad2 = self.runParameters[EvalContinouslyMean.grad2Key]
else:
self.grad2 = 0.0
# if self.runParameters.has_key( EvalContinouslyMean.minGrad2Key):
if EvalContinouslyMean.minGrad2Key in self.runParameters:
self.minGrad2 = self.runParameters[EvalContinouslyMean.minGrad2Key]
else:
self.minGrad2 = 0.0
# if self.runParameters.has_key( EvalContinouslyMean.grad3Key ):
if EvalContinouslyMean.grad3Key in self.runParameters:
self.grad3 = self.runParameters[EvalContinouslyMean.grad3Key]
else:
self.grad3 = 0.0
# if self.runParameters.has_key( EvalContinouslyMean.minGrad3Key):
if EvalContinouslyMean.minGrad3Key in self.runParameters:
self.minGrad3 = self.runParameters[EvalContinouslyMean.minGrad3Key]
else:
self.minGrad3 = 0.0
# if self.runParameters.has_key( EvalContinouslyMean.startOffsetKey):
if EvalContinouslyMean.startOffsetKey in self.runParameters:
self.startOffset = self.runParameters[EvalContinouslyMean.startOffsetKey]
else:
self.startOffset = 0.0
# if self.runParameters.has_key( EvalContinouslyMean.endOffsetKey):
if EvalContinouslyMean.endOffsetKey in self.runParameters:
self.endOffset = self.runParameters[EvalContinouslyMean.endOffsetKey]
else:
self.endOffset = 0.0
# if self.runParameters.has_key( EvalContinouslyMean.startOffset2Key):
if EvalContinouslyMean.startOffset2Key in self.runParameters:
self.startOffset2 = self.runParameters[EvalContinouslyMean.startOffset2Key]
else:
self.startOffset2 = 0.0
# if self.runParameters.has_key( EvalContinouslyMean.endOffset2Key):
if EvalContinouslyMean.endOffset2Key in self.runParameters:
self.endOffset = self.runParameters[EvalContinouslyMean.endOffset2Key]
else:
self.endOffset2 = 0.0
# if self.runParameters.has_key( EvalContinouslyMean.startOffset3Key):
if EvalContinouslyMean.startOffset3Key in self.runParameters:
self.startOffset3 = self.runParameters[EvalContinouslyMean.startOffset3Key]
else:
self.startOffset3 = 0.0
# if self.runParameters.has_key( EvalContinouslyMean.endOffset3Key):
if EvalContinouslyMean.endOffset3Key in self.runParameters:
self.endOffset = self.runParameters[EvalContinouslyMean.endOffset3Key]
else:
self.endOffset3 = 0.0
def _setupStartGradTransactionCheckers(self):
if self.grad != 0.0:
self.startTransactionChecker.addTransactionChecker(transactionchecker.StartTransactionCheckerGrad(self.grad, self.minGrad, self.isCall))
if self.grad2 != 0.0:
self.startTransactionChecker.addTransactionChecker(transactionchecker.StartTransactionCheckerGrad(self.grad2, self.minGrad2, self.isCall))
if self.grad3 != 0.0:
self.startTransactionChecker.addTransactionChecker(transactionchecker.StartTransactionCheckerGrad(self.grad3, self.minGrad3, self.isCall))
def _setupTransactionCheckers(self):
self.startTransactionChecker = transactionchecker.StartTransactionCheckerStrategie(
[transactionchecker.StartTransactionCheckerMean(self.mean, self.startOffset, self.isCall)] )
if self.mean2 != 0:
self.startTransactionChecker.addTransactionChecker(transactionchecker.StartTransactionCheckerMean(self.mean2, self.startOffset2, self.isCall))
if self.mean3 != 0:
self.startTransactionChecker.addTransactionChecker(transactionchecker.StartTransactionCheckerMean(self.mean3, self.startOffset3, self.isCall))
self.endTransactionChecker = transactionchecker.EndTransactionCheckerStrategie(
[transactionchecker.EndTransactionCheckerMean( self.endMean, self.endOffset, self.isCall )] )
if self.endMean2 != 0:
self.endTransactionChecker.addTransactionChecker(transactionchecker.EndTransactionCheckerMean( self.endMean2, self.endOffset2, self.isCall))
if self.endMean3 != 0:
self.endTransactionChecker.addTransactionChecker(transactionchecker.EndTransactionCheckerMean( self.endMean3, self.endOffset3, self.isCall))
class EvalContinouslyGrad(EvalContinouslyMean):
def __init__(self, dbName, idxName, runParameters = None):
EvalContinouslyMean.__init__(self, dbName, idxName, runParameters)
def _setupTransactionCheckers(self):
self.startTransactionChecker = transactionchecker.StartTransactionCheckerStrategie(
[transactionchecker.StartTransactionCheckerGrad(self.grad, self.minGrad, self.isCall)])
self.endTransactionChecker = transactionchecker.EndTransactionCheckerGrad( self.grad, -(self.minGrad), self.isCall)
class EvalContinouslyMeanRunner(evalrunner.EvalRunner):
def __init__(self, runParameters):
evalrunner.EvalRunner.__init__(self, runParameters)
def _createIndexEvaluation(self, indexName):
evaluation = EvalContinouslyMean( self.dbName, indexName, self.runParameters )
return evaluation
class EvalContinouslyGradRunner(evalrunner.EvalRunner):
def __init__(self, runParameters):
evalrunner.EvalRunner.__init__(self, runParameters)
def _createIndexEvaluation(self, indexName):
evaluation = EvalContinouslyGrad( self.dbName, indexName, self.runParameters )
return evaluation
| 41.259398
| 157
| 0.682733
|
e81547f5a9ff424d20f5829ae95876d8d09cd811
| 1,106
|
py
|
Python
|
sponge-app/sponge-app-demo-service/sponge/sponge_demo_binary_result.py
|
mnpas/sponge
|
7190f23ae888bbef49d0fbb85157444d6ea48bcd
|
[
"Apache-2.0"
] | 9
|
2017-12-16T21:48:57.000Z
|
2022-01-06T12:22:24.000Z
|
sponge-app/sponge-app-demo-service/sponge/sponge_demo_binary_result.py
|
mnpas/sponge
|
7190f23ae888bbef49d0fbb85157444d6ea48bcd
|
[
"Apache-2.0"
] | 3
|
2020-12-18T11:56:46.000Z
|
2022-03-31T18:37:10.000Z
|
sponge-app/sponge-app-demo-service/sponge/sponge_demo_binary_result.py
|
mnpas/sponge
|
7190f23ae888bbef49d0fbb85157444d6ea48bcd
|
[
"Apache-2.0"
] | 2
|
2019-12-29T16:08:32.000Z
|
2020-06-15T14:05:34.000Z
|
"""
Sponge Knowledge Base
Demo
"""
class HtmlFileOutput(Action):
def onConfigure(self):
self.withLabel("HTML file output").withDescription("Returns the HTML file.")
self.withNoArgs().withResult(BinaryType().withMimeType("text/html").withLabel("HTML file"))
self.withFeatures({"icon":"web"})
def onCall(self):
return String("""
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN">
<html>
<head>
<title>HTML page</title>
</head>
<body>
<!-- Main content -->
<h1>Header</h1>
<p>Some text
</body>
</html>
""").getBytes("UTF-8")
class PdfFileOutput(Action):
def onConfigure(self):
self.withLabel("PDF file output").withDescription("Returns the PDF file.")
self.withNoArgs().withResult(BinaryType().withMimeType("application/pdf").withLabel("PDF file").withFeatures({"icon":"file-pdf"}))
self.withFeatures({"icon":"file-pdf"})
def onCall(self):
return sponge.process("curl", "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf").outputAsBinary().run().outputBinary
| 33.515152
| 148
| 0.641049
|
377473683665d62266d90c2f7aa4e13b632dc6e6
| 16,327
|
py
|
Python
|
telnetserver/telnetserver.py
|
OliverLSanz/python-telnetserver
|
c85a7786322b41af5aeb4d7da6f59c4e55c7a168
|
[
"MIT"
] | 1
|
2020-08-04T14:31:48.000Z
|
2020-08-04T14:31:48.000Z
|
telnetserver/telnetserver.py
|
OliverLSanz/Simple-Python-Telnet-Server
|
c85a7786322b41af5aeb4d7da6f59c4e55c7a168
|
[
"MIT"
] | 3
|
2020-08-04T14:33:09.000Z
|
2021-04-14T11:48:25.000Z
|
telnetserver/telnetserver.py
|
OliverLSanz/python-telnetserver
|
c85a7786322b41af5aeb4d7da6f59c4e55c7a168
|
[
"MIT"
] | null | null | null |
"""Basic Telnet server module.
Contains one class, TelnetServer, which can be instantiated to start a
server running then used to send and receive messages from clients.
It is a generalization made by Oliver L. Sanz from Mark Frimston's
mud-py server.
"""
import socket
import select
import time
import sys
class TelnetServer(object):
"""A basic Telnet server.
Once created, the server will listen for clients connecting using
Telnet. Messages can then be sent to and from multiple connected
clients.
The 'update' method should be called in a loop to keep the server
running.
"""
# An inner class which is instantiated for each connected client to store
# info about them
class _Client(object):
"""Holds information about a connected client"""
# the socket object used to communicate with this client
socket = None
# the ip address of this client
address = ""
# holds data send from the client until a full message is received
buffer = ""
# the last time we checked if the client was still connected
lastcheck = 0
def __init__(self, socket, address, buffer, lastcheck):
self.socket = socket
self.address = address
self.buffer = buffer
self.lastcheck = lastcheck
# Used to store different types of occurences
_EVENT_NEW_client = 1
_EVENT_client_LEFT = 2
_EVENT_MESSAGE = 3
# Different states we can be in while reading data from client
# See _process_sent_data function
_READ_STATE_NORMAL = 1
_READ_STATE_MESSAGE = 2
_READ_STATE_SUBNEG = 3
# Command codes used by Telnet protocol
# See _process_sent_data function
_TN_INTERPRET_AS_MESSAGE = 255
_TN_ARE_YOU_THERE = 246
_TN_WILL = 251
_TN_WONT = 252
_TN_DO = 253
_TN_DONT = 254
_TN_SUBNEGOTIATION_START = 250
_TN_SUBNEGOTIATION_END = 240
# socket used to listen for new clients
_listen_socket = None
# holds info on clients. Maps client id to _Client object
_clients = {}
# counter for assigning each client a new id
_nextid = 0
# list of occurences waiting to be handled by the code
_events = []
# list of newly-added occurences
_new_events = []
def __init__(self, encoding="utf-8", error_policy='replace', port=1234):
"""Constructs the TelnetServer object and starts listening for
new clients.
Args:
encoding (str, optional): Enconding of the data to be processed. Valid values are specified here: https://docs.python.org/3/howto/unicode.html. Defaults to "utf-8".
error_policy (str, optional): What to do when a character cannot be decoded. Valid values are specified here: https://docs.python.org/3/howto/unicode.html. Defaults to 'replace'.
port (int, optional): port for the server.
Returns:
[type]: [description]
"""
self.error_policy = error_policy
self.encoding = encoding
self._clients = {}
self._nextid = 0
self._events = []
self._new_events = []
# create a new tcp socket which will be used to listen for new clients
self._listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# set a special option on the socket which allows the port to be
# immediately without having to wait
self._listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,
1)
# bind the socket to an ip address and port. Port 23 is the standard
# telnet port which telnet clients will use, however on some platforms
# this requires root permissions, so we use a higher arbitrary port
# number instead: 1234. Address 0.0.0.0 means that we will bind to all
# of the available network interfaces
self._listen_socket.bind(("0.0.0.0", port))
# set to non-blocking mode. This means that when we call 'accept', it
# will return immediately without waiting for a connection
self._listen_socket.setblocking(False)
# start listening for connections on the socket
self._listen_socket.listen(1)
def update(self):
"""Checks for new clients, disconnected clients, and new
messages sent from clients. This method must be called before
up-to-date info can be obtained from the 'get_new_clients',
'get_disconnected_clients' and 'get_messages' methods.
It should be called in a loop to keep the server running.
"""
# check for new stuff
self._check_for_new_connections()
self._check_for_disconnected()
self._check_for_messages()
# move the new events into the main events list so that they can be
# obtained with 'get_new_clients', 'get_disconnected_clients' and
# 'get_messages'. The previous events are discarded
self._events = list(self._new_events)
self._new_events = []
def get_new_clients(self):
"""Returns a list containing info on any new clients that have
entered the server since the last call to 'update'. Each item in
the list is a client id number.
"""
retval = []
# go through all the events in the main list
for ev in self._events:
# if the event is a new client occurence, add the info to the list
if ev[0] == self._EVENT_NEW_client:
retval.append(ev[1])
# return the info list
return retval
def get_disconnected_clients(self):
"""Returns a list containing info on any clients that have left
the server since the last call to 'update'. Each item in the list
is a client id number.
"""
retval = []
# go through all the events in the main list
for ev in self._events:
# if the event is a client disconnect occurence, add the info to
# the list
if ev[0] == self._EVENT_client_LEFT:
retval.append(ev[1])
# return the info list
return retval
def get_messages(self):
"""Returns a list containing any messages sent from clients
since the last call to 'update'. Each item in the list is a
2-tuple containing the id number of the sending client, and
a string containing the message sent by the user.
"""
retval = []
# go through all the events in the main list
for ev in self._events:
# if the event is a message occurence, add the info to the list
if ev[0] == self._EVENT_MESSAGE:
retval.append((ev[1], ev[2]))
# return the info list
return retval
def send_message(self, to, message):
"""Sends the text in the 'message' parameter to the client with
the id number given in the 'to' parameter. The text will be
printed out in the client's terminal.
"""
# we make sure to put a newline on the end so the client receives the
# message on its own line
self._attempt_send(to, message+"\n\r")
def shutdown(self):
"""Closes down the server, disconnecting all clients and
closing the listen socket.
"""
# for each client
for cl in self._clients.values():
# close the socket, disconnecting the client
cl.socket.shutdown(socket.SHUT_RDWR)
cl.socket.close()
# stop listening for new clients
self._listen_socket.close()
def _attempt_send(self, clid, data):
# python 2/3 compatability fix - convert non-unicode string to unicode
if sys.version < '3' and type(data) != unicode:
data = unicode(data, self.encoding)
try:
# look up the client in the client map and use 'sendall' to send
# the message string on the socket. 'sendall' ensures that all of
# the data is sent in one go
self._clients[clid].socket.sendall(bytearray(data, self.encoding))
# KeyError will be raised if there is no client with the given id in
# the map
except KeyError:
pass
# If there is a connection problem with the client (e.g. they have
# disconnected) a socket error will be raised
except socket.error:
self._handle_disconnect(clid)
def _check_for_new_connections(self):
# 'select' is used to check whether there is data waiting to be read
# from the socket. We pass in 3 lists of sockets, the first being those
# to check for readability. It returns 3 lists, the first being
# the sockets that are readable. The last parameter is how long to wait
# - we pass in 0 so that it returns immediately without waiting
rlist, wlist, xlist = select.select([self._listen_socket], [], [], 0)
# if the socket wasn't in the readable list, there's no data available,
# meaning no clients waiting to connect, and so we can exit the method
# here
if self._listen_socket not in rlist:
return
# 'accept' returns a new socket and address info which can be used to
# communicate with the new client
joined_socket, addr = self._listen_socket.accept()
# set non-blocking mode on the new socket. This means that 'send' and
# 'recv' will return immediately without waiting
joined_socket.setblocking(False)
# construct a new _Client object to hold info about the newly connected
# client. Use 'nextid' as the new client's id number
self._clients[self._nextid] = TelnetServer._Client(joined_socket, addr[0],
"", time.time())
# add a new client occurence to the new events list with the client's
# id number
self._new_events.append((self._EVENT_NEW_client, self._nextid))
# add 1 to 'nextid' so that the next client to connect will get a
# unique id number
self._nextid += 1
def _check_for_disconnected(self):
# go through all the clients
for id, cl in list(self._clients.items()):
# if we last checked the client less than 5 seconds ago, skip this
# client and move on to the next one
if time.time() - cl.lastcheck < 5.0:
continue
# send the client an invisible character. It doesn't actually
# matter what we send, we're really just checking that data can
# still be written to the socket. If it can't, an error will be
# raised and we'll know that the client has disconnected.
self._attempt_send(id, "\x00")
# update the last check time
cl.lastcheck = time.time()
def _check_for_messages(self):
# go through all the clients
for id, cl in list(self._clients.items()):
# we use 'select' to test whether there is data waiting to be read
# from the client socket. The function takes 3 lists of sockets,
# the first being those to test for readability. It returns 3 list
# of sockets, the first being those that are actually readable.
rlist, wlist, xlist = select.select([cl.socket], [], [], 0)
# if the client socket wasn't in the readable list, there is no
# new data from the client - we can skip it and move on to the next
# one
if cl.socket not in rlist:
continue
try:
# read data from the socket, using a max length of 4096
data = cl.socket.recv(4096).decode(self.encoding, self.error_policy)
# process the data, stripping out any special Telnet messages
message = self._process_sent_data(cl, data)
# if there was a message in the data
if message:
# remove any spaces, tabs etc from the start and end of
# the message
message = message.strip()
# add a message occurence to the new events list with the
# client's id number, and the message
self._new_events.append((self._EVENT_MESSAGE, id,
message))
# if there is a problem reading from the socket (e.g. the client
# has disconnected) a socket error will be raised
except socket.error:
self._handle_disconnect(id)
def _handle_disconnect(self, clid):
# remove the client from the clients map
del(self._clients[clid])
# add a 'client left' occurence to the new events list, with the
# client's id number
self._new_events.append((self._EVENT_client_LEFT, clid))
def _process_sent_data(self, client, data):
# the Telnet protocol allows special message codes to be inserted into
# messages. For our very simple server we don't need to response to
# any of these codes, but we must at least detect and skip over them
# so that we don't interpret them as text data.
# More info on the Telnet protocol can be found here:
# http://pcmicro.com/netfoss/telnet.html
# start with no message and in the normal state
message = None
state = self._READ_STATE_NORMAL
# go through the data a character at a time
for c in data:
# handle the character differently depending on the state we're in:
# normal state
if state == self._READ_STATE_NORMAL:
# if we received the special 'interpret as message' code,
# switch to 'message' state so that we handle the next
# character as a message code and not as regular text data
if ord(c) == self._TN_INTERPRET_AS_MESSAGE:
state = self._READ_STATE_MESSAGE
# some telnet clients send the characters as soon as the user
# types them. So if we get a backspace character, this is where
# the user has deleted a character and we should delete the
# last character from the buffer.
elif c == "\x08":
client.buffer = client.buffer[:-1]
# otherwise it's just a regular character - add it to the
# buffer where we're building up the received message
else:
client.buffer += c
# message state
elif state == self._READ_STATE_MESSAGE:
# the special 'start of subnegotiation' message code indicates
# that the following characters are a list of options until
# we're told otherwise. We switch into 'subnegotiation' state
# to handle this
if ord(c) == self._TN_SUBNEGOTIATION_START:
state = self._READ_STATE_SUBNEG
# if the message code is one of the 'will', 'wont', 'do' or
# 'dont' messages, the following character will be an option
# code so we must remain in the 'message' state
elif ord(c) in (self._TN_WILL, self._TN_WONT, self._TN_DO,
self._TN_DONT):
state = self._READ_STATE_MESSAGE
# for all other message codes, there is no accompanying data so
# we can return to 'normal' state.
else:
state = self._READ_STATE_NORMAL
# subnegotiation state
elif state == self._READ_STATE_SUBNEG:
# if we reach an 'end of subnegotiation' message, this ends the
# list of options and we can return to 'normal' state.
# Otherwise we must remain in this state
if ord(c) == self._TN_SUBNEGOTIATION_END:
state = self._READ_STATE_NORMAL
# return the contents of 'message' which is either a string or None
message = client.buffer
client.buffer = ""
return message
| 40.715711
| 190
| 0.614565
|
40ae38c9ad3a9a0b56474e1c28aaa4ea1e9451af
| 68,921
|
py
|
Python
|
sleekxmpp/xmlstream/xmlstream.py
|
dashdash-chat/SleekXMPP
|
00d5de83536e9ca22ea4a27460e42097651ce60f
|
[
"BSD-3-Clause"
] | null | null | null |
sleekxmpp/xmlstream/xmlstream.py
|
dashdash-chat/SleekXMPP
|
00d5de83536e9ca22ea4a27460e42097651ce60f
|
[
"BSD-3-Clause"
] | null | null | null |
sleekxmpp/xmlstream/xmlstream.py
|
dashdash-chat/SleekXMPP
|
00d5de83536e9ca22ea4a27460e42097651ce60f
|
[
"BSD-3-Clause"
] | null | null | null |
"""
sleekxmpp.xmlstream.xmlstream
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides the module for creating and
interacting with generic XML streams, along with
the necessary eventing infrastructure.
Part of SleekXMPP: The Sleek XMPP Library
:copyright: (c) 2011 Nathanael C. Fritz
:license: MIT, see LICENSE for more details
"""
from __future__ import with_statement, unicode_literals
import base64
import copy
import logging
import signal
import socket as Socket
import ssl
import sys
import threading
import time
import random
import weakref
import uuid
try:
import queue
except ImportError:
import Queue as queue
from xml.parsers.expat import ExpatError
import sleekxmpp
from sleekxmpp.thirdparty.statemachine import StateMachine
from sleekxmpp.xmlstream import Scheduler, tostring, cert
from sleekxmpp.xmlstream.stanzabase import StanzaBase, ET, ElementBase
from sleekxmpp.xmlstream.handler import Waiter, XMLCallback
from sleekxmpp.xmlstream.matcher import MatchXMLMask
from sleekxmpp.xmlstream.resolver import resolve, default_resolver
# In Python 2.x, file socket objects are broken. A patched socket
# wrapper is provided for this case in filesocket.py.
if sys.version_info < (3, 0):
from sleekxmpp.xmlstream.filesocket import FileSocket, Socket26
#: The time in seconds to wait before timing out waiting for response stanzas.
RESPONSE_TIMEOUT = 30
#: The time in seconds to wait for events from the event queue, and also the
#: time between checks for the process stop signal.
WAIT_TIMEOUT = 0.1
#: The number of threads to use to handle XML stream events. This is not the
#: same as the number of custom event handling threads.
#: :data:`HANDLER_THREADS` must be at least 1. For Python implementations
#: with a GIL, this should be left at 1, but for implemetnations without
#: a GIL increasing this value can provide better performance.
HANDLER_THREADS = 1
#: Flag indicating if the SSL library is available for use.
SSL_SUPPORT = True
#: The time in seconds to delay between attempts to resend data
#: after an SSL error.
SSL_RETRY_DELAY = 0.5
#: The maximum number of times to attempt resending data due to
#: an SSL error.
SSL_RETRY_MAX = 10
#: Maximum time to delay between connection attempts is one hour.
RECONNECT_MAX_DELAY = 600
#: Maximum number of attempts to connect to the server before quitting
#: and raising a 'connect_failed' event. Setting this to ``None`` will
#: allow infinite reconnection attempts, and using ``0`` will disable
#: reconnections. Defaults to ``None``.
RECONNECT_MAX_ATTEMPTS = None
log = logging.getLogger(__name__)
class RestartStream(Exception):
"""
Exception to restart stream processing, including
resending the stream header.
"""
class XMLStream(object):
"""
An XML stream connection manager and event dispatcher.
The XMLStream class abstracts away the issues of establishing a
connection with a server and sending and receiving XML "stanzas".
A stanza is a complete XML element that is a direct child of a root
document element. Two streams are used, one for each communication
direction, over the same socket. Once the connection is closed, both
streams should be complete and valid XML documents.
Three types of events are provided to manage the stream:
:Stream: Triggered based on received stanzas, similar in concept
to events in a SAX XML parser.
:Custom: Triggered manually.
:Scheduled: Triggered based on time delays.
Typically, stanzas are first processed by a stream event handler which
will then trigger custom events to continue further processing,
especially since custom event handlers may run in individual threads.
:param socket: Use an existing socket for the stream. Defaults to
``None`` to generate a new socket.
:param string host: The name of the target server.
:param int port: The port to use for the connection. Defaults to 0.
"""
def __init__(self, socket=None, host='', port=0):
#: Flag indicating if the SSL library is available for use.
self.ssl_support = SSL_SUPPORT
#: Most XMPP servers support TLSv1, but OpenFire in particular
#: does not work well with it. For OpenFire, set
#: :attr:`ssl_version` to use ``SSLv23``::
#:
#: import ssl
#: xmpp.ssl_version = ssl.PROTOCOL_SSLv23
self.ssl_version = ssl.PROTOCOL_TLSv1
#: Path to a file containing certificates for verifying the
#: server SSL certificate. A non-``None`` value will trigger
#: certificate checking.
#:
#: .. note::
#:
#: On Mac OS X, certificates in the system keyring will
#: be consulted, even if they are not in the provided file.
self.ca_certs = None
#: The time in seconds to wait for events from the event queue,
#: and also the time between checks for the process stop signal.
self.wait_timeout = WAIT_TIMEOUT
#: The time in seconds to wait before timing out waiting
#: for response stanzas.
self.response_timeout = RESPONSE_TIMEOUT
#: The current amount to time to delay attempting to reconnect.
#: This value doubles (with some jitter) with each failed
#: connection attempt up to :attr:`reconnect_max_delay` seconds.
self.reconnect_delay = None
#: Maximum time to delay between connection attempts is one hour.
self.reconnect_max_delay = RECONNECT_MAX_DELAY
#: Maximum number of attempts to connect to the server before
#: quitting and raising a 'connect_failed' event. Setting to
#: ``None`` allows infinite reattempts, while setting it to ``0``
#: will disable reconnection attempts. Defaults to ``None``.
self.reconnect_max_attempts = RECONNECT_MAX_ATTEMPTS
#: The time in seconds to delay between attempts to resend data
#: after an SSL error.
self.ssl_retry_max = SSL_RETRY_MAX
#: The maximum number of times to attempt resending data due to
#: an SSL error.
self.ssl_retry_delay = SSL_RETRY_DELAY
#: The connection state machine tracks if the stream is
#: ``'connected'`` or ``'disconnected'``.
self.state = StateMachine(('disconnected', 'connected'))
self.state._set_state('disconnected')
#: The default port to return when querying DNS records.
self.default_port = int(port)
#: The domain to try when querying DNS records.
self.default_domain = ''
#: The expected name of the server, for validation.
self._expected_server_name = ''
#: The desired, or actual, address of the connected server.
self.address = (host, int(port))
#: A file-like wrapper for the socket for use with the
#: :mod:`~xml.etree.ElementTree` module.
self.filesocket = None
self.set_socket(socket)
if sys.version_info < (3, 0):
self.socket_class = Socket26
else:
self.socket_class = Socket.socket
#: Enable connecting to the server directly over SSL, in
#: particular when the service provides two ports: one for
#: non-SSL traffic and another for SSL traffic.
self.use_ssl = False
#: Enable connecting to the service without using SSL
#: immediately, but allow upgrading the connection later
#: to use SSL.
self.use_tls = False
#: If set to ``True``, attempt to connect through an HTTP
#: proxy based on the settings in :attr:`proxy_config`.
self.use_proxy = False
#: An optional dictionary of proxy settings. It may provide:
#: :host: The host offering proxy services.
#: :port: The port for the proxy service.
#: :username: Optional username for accessing the proxy.
#: :password: Optional password for accessing the proxy.
self.proxy_config = {}
#: The default namespace of the stream content, not of the
#: stream wrapper itself.
self.default_ns = ''
#: The namespace of the enveloping stream element.
self.stream_ns = ''
#: The default opening tag for the stream element.
self.stream_header = "<stream>"
#: The default closing tag for the stream element.
self.stream_footer = "</stream>"
#: If ``True``, periodically send a whitespace character over the
#: wire to keep the connection alive. Mainly useful for connections
#: traversing NAT.
self.whitespace_keepalive = True
#: The default interval between keepalive signals when
#: :attr:`whitespace_keepalive` is enabled.
self.whitespace_keepalive_interval = 300
#: An :class:`~threading.Event` to signal that the application
#: is stopping, and that all threads should shutdown.
self.stop = threading.Event()
#: An :class:`~threading.Event` to signal receiving a closing
#: stream tag from the server.
self.stream_end_event = threading.Event()
self.stream_end_event.set()
#: An :class:`~threading.Event` to signal the start of a stream
#: session. Until this event fires, the send queue is not used
#: and data is sent immediately over the wire.
self.session_started_event = threading.Event()
#: The default time in seconds to wait for a session to start
#: after connecting before reconnecting and trying again.
self.session_timeout = 45
#: Flag for controlling if the session can be considered ended
#: if the connection is terminated.
self.end_session_on_disconnect = True
#: A queue of stream, custom, and scheduled events to be processed.
self.event_queue = queue.Queue()
#: A queue of string data to be sent over the stream.
self.send_queue = queue.Queue()
self.send_queue_lock = threading.Lock()
self.send_lock = threading.RLock()
#: A :class:`~sleekxmpp.xmlstream.scheduler.Scheduler` instance for
#: executing callbacks in the future based on time delays.
self.scheduler = Scheduler(self.stop)
self.__failed_send_stanza = None
#: A mapping of XML namespaces to well-known prefixes.
self.namespace_map = {StanzaBase.xml_ns: 'xml'}
self.__thread = {}
self.__root_stanza = []
self.__handlers = []
self.__event_handlers = {}
self.__event_handlers_lock = threading.Lock()
self.__filters = {'in': [], 'out': [], 'out_sync': []}
self.__thread_count = 0
self.__thread_cond = threading.Condition()
self.__active_threads = set()
self._use_daemons = False
self._disconnect_wait_for_threads = True
self._id = 0
self._id_lock = threading.Lock()
#: We use an ID prefix to ensure that all ID values are unique.
self._id_prefix = '%s-' % uuid.uuid4()
#: The :attr:`auto_reconnnect` setting controls whether or not
#: the stream will be restarted in the event of an error.
self.auto_reconnect = True
#: The :attr:`disconnect_wait` setting is the default value
#: for controlling if the system waits for the send queue to
#: empty before ending the stream. This may be overridden by
#: passing ``wait=True`` or ``wait=False`` to :meth:`disconnect`.
#: The default :attr:`disconnect_wait` value is ``False``.
self.disconnect_wait = False
#: A list of DNS results that have not yet been tried.
self.dns_answers = []
#: The service name to check with DNS SRV records. For
#: example, setting this to ``'xmpp-client'`` would query the
#: ``_xmpp-client._tcp`` service.
self.dns_service = None
self.add_event_handler('connected', self._handle_connected)
self.add_event_handler('disconnected', self._remove_schedules)
self.add_event_handler('session_start', self._start_keepalive)
self.add_event_handler('session_start', self._cert_expiration)
def use_signals(self, signals=None):
"""Register signal handlers for ``SIGHUP`` and ``SIGTERM``.
By using signals, a ``'killed'`` event will be raised when the
application is terminated.
If a signal handler already existed, it will be executed first,
before the ``'killed'`` event is raised.
:param list signals: A list of signal names to be monitored.
Defaults to ``['SIGHUP', 'SIGTERM']``.
"""
if signals is None:
signals = ['SIGHUP', 'SIGTERM']
existing_handlers = {}
for sig_name in signals:
if hasattr(signal, sig_name):
sig = getattr(signal, sig_name)
handler = signal.getsignal(sig)
if handler:
existing_handlers[sig] = handler
def handle_kill(signum, frame):
"""
Capture kill event and disconnect cleanly after first
spawning the ``'killed'`` event.
"""
if signum in existing_handlers and \
existing_handlers[signum] != handle_kill:
existing_handlers[signum](signum, frame)
self.event("killed", direct=True)
self.disconnect()
try:
for sig_name in signals:
if hasattr(signal, sig_name):
sig = getattr(signal, sig_name)
signal.signal(sig, handle_kill)
self.__signals_installed = True
except:
log.debug("Can not set interrupt signal handlers. " + \
"SleekXMPP is not running from a main thread.")
def new_id(self):
"""Generate and return a new stream ID in hexadecimal form.
Many stanzas, handlers, or matchers may require unique
ID values. Using this method ensures that all new ID values
are unique in this stream.
"""
with self._id_lock:
self._id += 1
return self.get_id()
def get_id(self):
"""Return the current unique stream ID in hexadecimal form."""
return "%s%X" % (self._id_prefix, self._id)
def connect(self, host='', port=0, use_ssl=False,
use_tls=True, reattempt=True):
"""Create a new socket and connect to the server.
Setting ``reattempt`` to ``True`` will cause connection
attempts to be made with an exponential backoff delay (max of
:attr:`reconnect_max_delay` which defaults to 10 minute) until a
successful connection is established.
:param host: The name of the desired server for the connection.
:param port: Port to connect to on the server.
:param use_ssl: Flag indicating if SSL should be used by connecting
directly to a port using SSL.
:param use_tls: Flag indicating if TLS should be used, allowing for
connecting to a port without using SSL immediately and
later upgrading the connection.
:param reattempt: Flag indicating if the socket should reconnect
after disconnections.
"""
if host and port:
self.address = (host, int(port))
try:
Socket.inet_aton(self.address[0])
except (Socket.error, ssl.SSLError):
self.default_domain = self.address[0]
# Respect previous SSL and TLS usage directives.
if use_ssl is not None:
self.use_ssl = use_ssl
if use_tls is not None:
self.use_tls = use_tls
# Repeatedly attempt to connect until a successful connection
# is established.
attempts = self.reconnect_max_attempts
connected = self.state.transition('disconnected', 'connected',
func=self._connect, args=(reattempt,))
while reattempt and not connected and not self.stop.is_set():
connected = self.state.transition('disconnected', 'connected',
func=self._connect)
if not connected:
if attempts is not None:
attempts -= 1
if attempts <= 0:
self.event('connection_failed', direct=True)
return False
return connected
def _connect(self, reattempt=True):
self.scheduler.remove('Session timeout check')
self.stop.clear()
if self.reconnect_delay is None or not reattempt:
delay = 1.0
else:
delay = min(self.reconnect_delay * 2, self.reconnect_max_delay)
delay = random.normalvariate(delay, delay * 0.1)
log.debug('Waiting %s seconds before connecting.', delay)
elapsed = 0
try:
while elapsed < delay and not self.stop.is_set():
time.sleep(0.1)
elapsed += 0.1
except KeyboardInterrupt:
self.stop.set()
return False
except SystemExit:
self.stop.set()
return False
if self.default_domain:
try:
self.address = self.pick_dns_answer(self.default_domain,
self.address[1])
except StopIteration:
log.debug("No remaining DNS records to try.")
self.dns_answers = None
if reattempt:
self.reconnect_delay = delay
return False
af = Socket.AF_INET
proto = 'IPv4'
if ':' in self.address[0]:
af = Socket.AF_INET6
proto = 'IPv6'
try:
self.socket = self.socket_class(af, Socket.SOCK_STREAM)
except Socket.error:
log.debug("Could not connect using %s", proto)
return False
self.configure_socket()
if self.use_proxy:
connected = self._connect_proxy()
if not connected:
if reattempt:
self.reconnect_delay = delay
return False
if self.use_ssl and self.ssl_support:
log.debug("Socket Wrapped for SSL")
if self.ca_certs is None:
cert_policy = ssl.CERT_NONE
else:
cert_policy = ssl.CERT_REQUIRED
ssl_socket = ssl.wrap_socket(self.socket,
ca_certs=self.ca_certs,
cert_reqs=cert_policy,
do_handshake_on_connect=False)
if hasattr(self.socket, 'socket'):
# We are using a testing socket, so preserve the top
# layer of wrapping.
self.socket.socket = ssl_socket
else:
self.socket = ssl_socket
try:
if not self.use_proxy:
domain = self.address[0]
if ':' in domain:
domain = '[%s]' % domain
log.debug("Connecting to %s:%s", domain, self.address[1])
self.socket.connect(self.address)
if self.use_ssl and self.ssl_support:
try:
self.socket.do_handshake()
except (Socket.error, ssl.SSLError):
log.error('CERT: Invalid certificate trust chain.')
if not self.event_handled('ssl_invalid_chain'):
self.disconnect(self.auto_reconnect, send_close=False)
else:
self.event('ssl_invalid_chain', direct=True)
return False
self._der_cert = self.socket.getpeercert(binary_form=True)
pem_cert = ssl.DER_cert_to_PEM_cert(self._der_cert)
log.debug('CERT: %s', pem_cert)
self.event('ssl_cert', pem_cert, direct=True)
try:
cert.verify(self._expected_server_name, self._der_cert)
except cert.CertificateError as err:
log.error(err.message)
if not self.event_handled('ssl_invalid_cert'):
self.disconnect(send_close=False)
else:
self.event('ssl_invalid_cert', pem_cert, direct=True)
self.set_socket(self.socket, ignore=True)
#this event is where you should set your application state
self.event("connected", direct=True)
self.reconnect_delay = 1.0
return True
except (Socket.error, ssl.SSLError) as serr:
error_msg = "Could not connect to %s:%s. Socket Error #%s: %s"
self.event('socket_error', serr, direct=True)
domain = self.address[0]
if ':' in domain:
domain = '[%s]' % domain
log.error(error_msg, domain, self.address[1],
serr.errno, serr.strerror)
return False
def _connect_proxy(self):
"""Attempt to connect using an HTTP Proxy."""
# Extract the proxy address, and optional credentials
address = (self.proxy_config['host'], int(self.proxy_config['port']))
cred = None
if self.proxy_config['username']:
username = self.proxy_config['username']
password = self.proxy_config['password']
cred = '%s:%s' % (username, password)
if sys.version_info < (3, 0):
cred = bytes(cred)
else:
cred = bytes(cred, 'utf-8')
cred = base64.b64encode(cred).decode('utf-8')
# Build the HTTP headers for connecting to the XMPP server
headers = ['CONNECT %s:%s HTTP/1.0' % self.address,
'Host: %s:%s' % self.address,
'Proxy-Connection: Keep-Alive',
'Pragma: no-cache',
'User-Agent: SleekXMPP/%s' % sleekxmpp.__version__]
if cred:
headers.append('Proxy-Authorization: Basic %s' % cred)
headers = '\r\n'.join(headers) + '\r\n\r\n'
try:
log.debug("Connecting to proxy: %s:%s", address)
self.socket.connect(address)
self.send_raw(headers, now=True)
resp = ''
while '\r\n\r\n' not in resp and not self.stop.is_set():
resp += self.socket.recv(1024).decode('utf-8')
log.debug('RECV: %s', resp)
lines = resp.split('\r\n')
if '200' not in lines[0]:
self.event('proxy_error', resp)
log.error('Proxy Error: %s', lines[0])
return False
# Proxy connection established, continue connecting
# with the XMPP server.
return True
except (Socket.error, ssl.SSLError) as serr:
error_msg = "Could not connect to %s:%s. Socket Error #%s: %s"
self.event('socket_error', serr, direct=True)
log.error(error_msg, self.address[0], self.address[1],
serr.errno, serr.strerror)
return False
def _handle_connected(self, event=None):
"""
Add check to ensure that a session is established within
a reasonable amount of time.
"""
def _handle_session_timeout():
if not self.session_started_event.is_set():
log.debug("Session start has taken more " + \
"than %d seconds", self.session_timeout)
self.disconnect(reconnect=self.auto_reconnect)
self.schedule("Session timeout check",
self.session_timeout,
_handle_session_timeout)
def disconnect(self, reconnect=False, wait=None, send_close=True):
"""Terminate processing and close the XML streams.
Optionally, the connection may be reconnected and
resume processing afterwards.
If the disconnect should take place after all items
in the send queue have been sent, use ``wait=True``.
.. warning::
If you are constantly adding items to the queue
such that it is never empty, then the disconnect will
not occur and the call will continue to block.
:param reconnect: Flag indicating if the connection
and processing should be restarted.
Defaults to ``False``.
:param wait: Flag indicating if the send queue should
be emptied before disconnecting, overriding
:attr:`disconnect_wait`.
:param send_close: Flag indicating if the stream footer
should be sent before terminating the
connection. Setting this to ``False``
prevents error loops when trying to
disconnect after a socket error.
"""
self.state.transition('connected', 'disconnected',
wait=2.0,
func=self._disconnect,
args=(reconnect, wait, send_close))
def _disconnect(self, reconnect=False, wait=None, send_close=True):
if self.end_session_on_disconnect or send_close:
self.event('session_end', direct=True)
# Wait for the send queue to empty.
if wait is not None:
if wait:
self.send_queue.join()
elif self.disconnect_wait:
self.send_queue.join()
# Clearing this event will pause the send loop.
self.session_started_event.clear()
self.__failed_send_stanza = None
# Send the end of stream marker.
if send_close:
self.send_raw(self.stream_footer, now=True)
# Wait for confirmation that the stream was
# closed in the other direction. If we didn't
# send a stream footer we don't need to wait
# since the server won't know to respond.
self.auto_reconnect = reconnect
if send_close:
log.info('Waiting for %s from server', self.stream_footer)
self.stream_end_event.wait(4)
else:
self.stream_end_event.set()
if not self.auto_reconnect:
self.stop.set()
if self._disconnect_wait_for_threads:
self._wait_for_threads()
try:
self.socket.shutdown(Socket.SHUT_RDWR)
self.socket.close()
self.filesocket.close()
except (Socket.error, ssl.SSLError) as serr:
self.event('socket_error', serr, direct=True)
finally:
#clear your application state
self.event("disconnected", direct=True)
return True
def reconnect(self, reattempt=True, wait=False, send_close=True):
"""Reset the stream's state and reconnect to the server."""
log.debug("reconnecting...")
if self.state.ensure('connected'):
self.state.transition('connected', 'disconnected',
wait=2.0,
func=self._disconnect,
args=(True, wait, send_close))
attempts = self.reconnect_max_attempts
log.debug("connecting...")
connected = self.state.transition('disconnected', 'connected',
wait=2.0, func=self._connect, args=(reattempt,))
while reattempt and not connected and not self.stop.is_set():
connected = self.state.transition('disconnected', 'connected',
wait=2.0, func=self._connect)
connected = connected or self.state.ensure('connected')
if not connected:
if attempts is not None:
attempts -= 1
if attempts <= 0:
self.event('connection_failed', direct=True)
return False
return connected
def set_socket(self, socket, ignore=False):
"""Set the socket to use for the stream.
The filesocket will be recreated as well.
:param socket: The new socket object to use.
:param bool ignore: If ``True``, don't set the connection
state to ``'connected'``.
"""
self.socket = socket
if socket is not None:
# ElementTree.iterparse requires a file.
# 0 buffer files have to be binary.
# Use the correct fileobject type based on the Python
# version to work around a broken implementation in
# Python 2.x.
if sys.version_info < (3, 0):
self.filesocket = FileSocket(self.socket)
else:
self.filesocket = self.socket.makefile('rb', 0)
if not ignore:
self.state._set_state('connected')
def configure_socket(self):
"""Set timeout and other options for self.socket.
Meant to be overridden.
"""
self.socket.settimeout(None)
def configure_dns(self, resolver, domain=None, port=None):
"""
Configure and set options for a :class:`~dns.resolver.Resolver`
instance, and other DNS related tasks. For example, you
can also check :meth:`~socket.socket.getaddrinfo` to see
if you need to call out to ``libresolv.so.2`` to
run ``res_init()``.
Meant to be overridden.
:param resolver: A :class:`~dns.resolver.Resolver` instance
or ``None`` if ``dnspython`` is not installed.
:param domain: The initial domain under consideration.
:param port: The initial port under consideration.
"""
pass
def start_tls(self):
"""Perform handshakes for TLS.
If the handshake is successful, the XML stream will need
to be restarted.
"""
if self.ssl_support:
log.info("Negotiating TLS")
log.info("Using SSL version: %s", str(self.ssl_version))
if self.ca_certs is None:
cert_policy = ssl.CERT_NONE
else:
cert_policy = ssl.CERT_REQUIRED
ssl_socket = ssl.wrap_socket(self.socket,
ssl_version=self.ssl_version,
do_handshake_on_connect=False,
ca_certs=self.ca_certs,
cert_reqs=cert_policy)
if hasattr(self.socket, 'socket'):
# We are using a testing socket, so preserve the top
# layer of wrapping.
self.socket.socket = ssl_socket
else:
self.socket = ssl_socket
try:
self.socket.do_handshake()
except (Socket.error, ssl.SSLError):
log.error('CERT: Invalid certificate trust chain.')
if not self.event_handled('ssl_invalid_chain'):
self.disconnect(self.auto_reconnect, send_close=False)
else:
self.event('ssl_invalid_chain', direct=True)
return False
self._der_cert = self.socket.getpeercert(binary_form=True)
pem_cert = ssl.DER_cert_to_PEM_cert(self._der_cert)
log.debug('CERT: %s', pem_cert)
self.event('ssl_cert', pem_cert, direct=True)
try:
cert.verify(self._expected_server_name, self._der_cert)
except cert.CertificateError as err:
log.error(err.message)
if not self.event_handled('ssl_invalid_cert'):
self.disconnect(self.auto_reconnect, send_close=False)
else:
self.event('ssl_invalid_cert', pem_cert, direct=True)
self.set_socket(self.socket)
return True
else:
log.warning("Tried to enable TLS, but ssl module not found.")
return False
def _cert_expiration(self, event):
"""Schedule an event for when the TLS certificate expires."""
if not self.use_tls and not self.use_ssl:
return
if not self._der_cert:
log.warn("TLS or SSL was enabled, but no certificate was found.")
return
def restart():
if not self.event_handled('ssl_expired_cert'):
log.warn("The server certificate has expired. Restarting.")
self.reconnect()
else:
pem_cert = ssl.DER_cert_to_PEM_cert(self._der_cert)
self.event('ssl_expired_cert', pem_cert)
cert_ttl = cert.get_ttl(self._der_cert)
if cert_ttl is None:
return
if cert_ttl.days < 0:
log.warn('CERT: Certificate has expired.')
restart()
log.info('CERT: Time until certificate expiration: %s' % cert_ttl)
self.schedule('Certificate Expiration',
cert_ttl.seconds,
restart)
def _start_keepalive(self, event):
"""Begin sending whitespace periodically to keep the connection alive.
May be disabled by setting::
self.whitespace_keepalive = False
The keepalive interval can be set using::
self.whitespace_keepalive_interval = 300
"""
self.schedule('Whitespace Keepalive',
self.whitespace_keepalive_interval,
self.send_raw,
args = (' ',),
kwargs = {'now': True},
repeat=True)
def _remove_schedules(self, event):
"""Remove whitespace keepalive and certificate expiration schedules."""
self.scheduler.remove('Whitespace Keepalive')
self.scheduler.remove('Certificate Expiration')
def start_stream_handler(self, xml):
"""Perform any initialization actions, such as handshakes,
once the stream header has been sent.
Meant to be overridden.
"""
pass
def register_stanza(self, stanza_class):
"""Add a stanza object class as a known root stanza.
A root stanza is one that appears as a direct child of the stream's
root element.
Stanzas that appear as substanzas of a root stanza do not need to
be registered here. That is done using register_stanza_plugin() from
sleekxmpp.xmlstream.stanzabase.
Stanzas that are not registered will not be converted into
stanza objects, but may still be processed using handlers and
matchers.
:param stanza_class: The top-level stanza object's class.
"""
self.__root_stanza.append(stanza_class)
def remove_stanza(self, stanza_class):
"""Remove a stanza from being a known root stanza.
A root stanza is one that appears as a direct child of the stream's
root element.
Stanzas that are not registered will not be converted into
stanza objects, but may still be processed using handlers and
matchers.
"""
self.__root_stanza.remove(stanza_class)
def add_filter(self, mode, handler, order=None):
"""Add a filter for incoming or outgoing stanzas.
These filters are applied before incoming stanzas are
passed to any handlers, and before outgoing stanzas
are put in the send queue.
Each filter must accept a single stanza, and return
either a stanza or ``None``. If the filter returns
``None``, then the stanza will be dropped from being
processed for events or from being sent.
:param mode: One of ``'in'`` or ``'out'``.
:param handler: The filter function.
:param int order: The position to insert the filter in
the list of active filters.
"""
if order:
self.__filters[mode].insert(order, handler)
else:
self.__filters[mode].append(handler)
def add_handler(self, mask, pointer, name=None, disposable=False,
threaded=False, filter=False, instream=False):
"""A shortcut method for registering a handler using XML masks.
The use of :meth:`register_handler()` is preferred.
:param mask: An XML snippet matching the structure of the
stanzas that will be passed to this handler.
:param pointer: The handler function itself.
:parm name: A unique name for the handler. A name will
be generated if one is not provided.
:param disposable: Indicates if the handler should be discarded
after one use.
:param threaded: **DEPRECATED**.
Remains for backwards compatibility.
:param filter: **DEPRECATED**.
Remains for backwards compatibility.
:param instream: Indicates if the handler should execute during
stream processing and not during normal event
processing.
"""
# To prevent circular dependencies, we must load the matcher
# and handler classes here.
if name is None:
name = 'add_handler_%s' % self.getNewId()
self.registerHandler(XMLCallback(name, MatchXMLMask(mask), pointer,
once=disposable, instream=instream))
def register_handler(self, handler, before=None, after=None):
"""Add a stream event handler that will be executed when a matching
stanza is received.
:param handler: The :class:`~sleekxmpp.xmlstream.handler.base.BaseHandler`
derived object to execute.
"""
if handler.stream is None:
self.__handlers.append(handler)
handler.stream = weakref.ref(self)
def remove_handler(self, name):
"""Remove any stream event handlers with the given name.
:param name: The name of the handler.
"""
idx = 0
for handler in self.__handlers:
if handler.name == name:
self.__handlers.pop(idx)
return True
idx += 1
return False
def get_dns_records(self, domain, port=None):
"""Get the DNS records for a domain.
:param domain: The domain in question.
:param port: If the results don't include a port, use this one.
"""
if port is None:
port = self.default_port
resolver = default_resolver()
self.configure_dns(resolver, domain=domain, port=port)
return resolve(domain, port, service=self.dns_service, resolver=resolver)
def pick_dns_answer(self, domain, port=None):
"""Pick a server and port from DNS answers.
Gets DNS answers if none available.
Removes used answer from available answers.
:param domain: The domain in question.
:param port: If the results don't include a port, use this one.
"""
if not self.dns_answers:
self.dns_answers = self.get_dns_records(domain, port)
if sys.version_info < (3, 0):
return self.dns_answers.next()
else:
return next(self.dns_answers)
def add_event_handler(self, name, pointer,
threaded=False, disposable=False):
"""Add a custom event handler that will be executed whenever
its event is manually triggered.
:param name: The name of the event that will trigger
this handler.
:param pointer: The function to execute.
:param threaded: If set to ``True``, the handler will execute
in its own thread. Defaults to ``False``.
:param disposable: If set to ``True``, the handler will be
discarded after one use. Defaults to ``False``.
"""
if not name in self.__event_handlers:
self.__event_handlers[name] = []
self.__event_handlers[name].append((pointer, threaded, disposable))
def del_event_handler(self, name, pointer):
"""Remove a function as a handler for an event.
:param name: The name of the event.
:param pointer: The function to remove as a handler.
"""
if not name in self.__event_handlers:
return
# Need to keep handlers that do not use
# the given function pointer
def filter_pointers(handler):
return handler[0] != pointer
self.__event_handlers[name] = list(filter(
filter_pointers,
self.__event_handlers[name]))
def event_handled(self, name):
"""Returns the number of registered handlers for an event.
:param name: The name of the event to check.
"""
return len(self.__event_handlers.get(name, []))
def event(self, name, data={}, direct=False):
"""Manually trigger a custom event.
:param name: The name of the event to trigger.
:param data: Data that will be passed to each event handler.
Defaults to an empty dictionary, but is usually
a stanza object.
:param direct: Runs the event directly if True, skipping the
event queue. All event handlers will run in the
same thread.
"""
handlers = self.__event_handlers.get(name, [])
for handler in handlers:
#TODO: Data should not be copied, but should be read only,
# but this might break current code so it's left for future.
out_data = copy.copy(data) if len(handlers) > 1 else data
old_exception = getattr(data, 'exception', None)
if direct:
try:
handler[0](out_data)
except Exception as e:
error_msg = 'Error processing event handler: %s'
log.exception(error_msg, str(handler[0]))
if old_exception:
old_exception(e)
else:
self.exception(e)
else:
self.event_queue.put(('event', handler, out_data))
if handler[2]:
# If the handler is disposable, we will go ahead and
# remove it now instead of waiting for it to be
# processed in the queue.
with self.__event_handlers_lock:
try:
h_index = self.__event_handlers[name].index(handler)
self.__event_handlers[name].pop(h_index)
except:
pass
def schedule(self, name, seconds, callback, args=None,
kwargs=None, repeat=False):
"""Schedule a callback function to execute after a given delay.
:param name: A unique name for the scheduled callback.
:param seconds: The time in seconds to wait before executing.
:param callback: A pointer to the function to execute.
:param args: A tuple of arguments to pass to the function.
:param kwargs: A dictionary of keyword arguments to pass to
the function.
:param repeat: Flag indicating if the scheduled event should
be reset and repeat after executing.
"""
self.scheduler.add(name, seconds, callback, args, kwargs,
repeat, qpointer=self.event_queue)
def incoming_filter(self, xml):
"""Filter incoming XML objects before they are processed.
Possible uses include remapping namespaces, or correcting elements
from sources with incorrect behavior.
Meant to be overridden.
"""
return xml
def send(self, data, mask=None, timeout=None, now=False, use_filters=True):
"""A wrapper for :meth:`send_raw()` for sending stanza objects.
May optionally block until an expected response is received.
:param data: The :class:`~sleekxmpp.xmlstream.stanzabase.ElementBase`
stanza to send on the stream.
:param mask: **DEPRECATED**
An XML string snippet matching the structure
of the expected response. Execution will block
in this thread until the response is received
or a timeout occurs.
:param int timeout: Time in seconds to wait for a response before
continuing. Defaults to :attr:`response_timeout`.
:param bool now: Indicates if the send queue should be skipped,
sending the stanza immediately. Useful mainly
for stream initialization stanzas.
Defaults to ``False``.
:param bool use_filters: Indicates if outgoing filters should be
applied to the given stanza data. Disabling
filters is useful when resending stanzas.
Defaults to ``True``.
"""
if timeout is None:
timeout = self.response_timeout
if hasattr(mask, 'xml'):
mask = mask.xml
if isinstance(data, ElementBase):
if use_filters:
for filter in self.__filters['out']:
data = filter(data)
if data is None:
return
if mask is not None:
log.warning("Use of send mask waiters is deprecated.")
wait_for = Waiter("SendWait_%s" % self.new_id(),
MatchXMLMask(mask))
self.register_handler(wait_for)
if isinstance(data, ElementBase):
with self.send_queue_lock:
if use_filters:
for filter in self.__filters['out_sync']:
data = filter(data)
if data is None:
return
str_data = str(data)
self.send_raw(str_data, now)
else:
self.send_raw(data, now)
if mask is not None:
return wait_for.wait(timeout)
def send_xml(self, data, mask=None, timeout=None, now=False):
"""Send an XML object on the stream, and optionally wait
for a response.
:param data: The :class:`~xml.etree.ElementTree.Element` XML object
to send on the stream.
:param mask: **DEPRECATED**
An XML string snippet matching the structure
of the expected response. Execution will block
in this thread until the response is received
or a timeout occurs.
:param int timeout: Time in seconds to wait for a response before
continuing. Defaults to :attr:`response_timeout`.
:param bool now: Indicates if the send queue should be skipped,
sending the stanza immediately. Useful mainly
for stream initialization stanzas.
Defaults to ``False``.
"""
if timeout is None:
timeout = self.response_timeout
return self.send(tostring(data), mask, timeout, now)
def send_raw(self, data, now=False, reconnect=None):
"""Send raw data across the stream.
:param string data: Any string value.
:param bool reconnect: Indicates if the stream should be
restarted if there is an error sending
the stanza. Used mainly for testing.
Defaults to :attr:`auto_reconnect`.
"""
if now:
log.debug("SEND (IMMED): %s", data)
try:
data = data.encode('utf-8')
total = len(data)
sent = 0
count = 0
tries = 0
with self.send_lock:
while sent < total and not self.stop.is_set():
try:
sent += self.socket.send(data[sent:])
count += 1
except ssl.SSLError as serr:
if tries >= self.ssl_retry_max:
log.debug('SSL error - max retries reached')
self.exception(serr)
log.warning("Failed to send %s", data)
if reconnect is None:
reconnect = self.auto_reconnect
if not self.stop.is_set():
self.disconnect(reconnect, send_close=False)
log.warning('SSL write error - reattempting')
if not self.stop.is_set():
time.sleep(self.ssl_retry_delay)
tries += 1
if count > 1:
log.debug('SENT: %d chunks', count)
except (Socket.error, ssl.SSLError) as serr:
self.event('socket_error', serr, direct=True)
log.warning("Failed to send %s", data)
if reconnect is None:
reconnect = self.auto_reconnect
if not self.stop.is_set():
self.disconnect(reconnect, send_close=False)
else:
self.send_queue.put(data)
return True
def _start_thread(self, name, target, track=True):
self.__active_threads.add(name)
self.__thread[name] = threading.Thread(name=name, target=target)
self.__thread[name].daemon = self._use_daemons
self.__thread[name].start()
if track:
with self.__thread_cond:
self.__thread_count += 1
def _end_thread(self, name, early=False):
with self.__thread_cond:
curr_thread = threading.current_thread().name
if curr_thread in self.__active_threads:
self.__thread_count -= 1
self.__active_threads.remove(curr_thread)
if early:
log.debug('Threading deadlock prevention!')
log.debug(("Marked %s thread as ended due to " + \
"disconnect() call. %s threads remain.") % (
name, self.__thread_count))
else:
log.debug("Stopped %s thread. %s threads remain." % (
name, self.__thread_count))
else:
log.debug(("Finished exiting %s thread after early " + \
"termination from disconnect() call. " + \
"%s threads remain.") % (
name, self.__thread_count))
if self.__thread_count == 0:
self.__thread_cond.notify()
def _wait_for_threads(self):
with self.__thread_cond:
if self.__thread_count != 0:
log.debug("Waiting for %s threads to exit." %
self.__thread_count)
name = threading.current_thread().name
if name in self.__thread:
self._end_thread(name, early=True)
self.__thread_cond.wait(4)
if self.__thread_count != 0:
log.error("Hanged threads: %s" % threading.enumerate())
log.error("This may be due to calling disconnect() " + \
"from a non-threaded event handler. Be " + \
"sure that event handlers that call " + \
"disconnect() are registered using: " + \
"add_event_handler(..., threaded=True)")
def process(self, **kwargs):
"""Initialize the XML streams and begin processing events.
The number of threads used for processing stream events is determined
by :data:`HANDLER_THREADS`.
:param bool block: If ``False``, then event dispatcher will run
in a separate thread, allowing for the stream to be
used in the background for another application.
Otherwise, ``process(block=True)`` blocks the current
thread. Defaults to ``False``.
:param bool threaded: **DEPRECATED**
If ``True``, then event dispatcher will run
in a separate thread, allowing for the stream to be
used in the background for another application.
Defaults to ``True``. This does **not** mean that no
threads are used at all if ``threaded=False``.
Regardless of these threading options, these threads will
always exist:
- The event queue processor
- The send queue processor
- The scheduler
"""
if 'threaded' in kwargs and 'block' in kwargs:
raise ValueError("process() called with both " + \
"block and threaded arguments")
elif 'block' in kwargs:
threaded = not(kwargs.get('block', False))
else:
threaded = kwargs.get('threaded', True)
for t in range(0, HANDLER_THREADS):
log.debug("Starting HANDLER THREAD")
self._start_thread('event_thread_%s' % t, self._event_runner)
self._start_thread('send_thread', self._send_thread)
self._start_thread('scheduler_thread', self._scheduler_thread)
if threaded:
# Run the XML stream in the background for another application.
self._start_thread('read_thread', self._process, track=False)
else:
self._process()
def _process(self):
"""Start processing the XML streams.
Processing will continue after any recoverable errors
if reconnections are allowed.
"""
# The body of this loop will only execute once per connection.
# Additional passes will be made only if an error occurs and
# reconnecting is permitted.
while True:
shutdown = False
try:
# The call to self.__read_xml will block and prevent
# the body of the loop from running until a disconnect
# occurs. After any reconnection, the stream header will
# be resent and processing will resume.
while not self.stop.is_set():
# Only process the stream while connected to the server
if not self.state.ensure('connected', wait=0.1):
break
# Ensure the stream header is sent for any
# new connections.
if not self.session_started_event.is_set():
self.send_raw(self.stream_header, now=True)
if not self.__read_xml():
# If the server terminated the stream, end processing
break
except KeyboardInterrupt:
log.debug("Keyboard Escape Detected in _process")
self.event('killed', direct=True)
shutdown = True
except SystemExit:
log.debug("SystemExit in _process")
shutdown = True
except (SyntaxError, ExpatError) as e:
log.error("Error reading from XML stream.")
self.exception(e)
except (Socket.error, ssl.SSLError) as serr:
self.event('socket_error', serr, direct=True)
log.error('Socket Error #%s: %s', serr.errno, serr.strerror)
except ValueError as e:
msg = e.message if hasattr(e, 'message') else e.args[0]
if 'I/O operation on closed file' in msg:
log.error('Can not read from closed socket.')
else:
self.exception(e)
except Exception as e:
if not self.stop.is_set():
log.error('Connection error.')
self.exception(e)
if not shutdown and not self.stop.is_set() \
and self.auto_reconnect:
self.reconnect()
else:
self.disconnect()
break
def __read_xml(self):
"""Parse the incoming XML stream
Stream events are raised for each received stanza.
"""
depth = 0
root = None
for event, xml in ET.iterparse(self.filesocket, (b'end', b'start')):
if event == b'start':
if depth == 0:
# We have received the start of the root element.
root = xml
# Perform any stream initialization actions, such
# as handshakes.
self.stream_end_event.clear()
self.start_stream_handler(root)
depth += 1
if event == b'end':
depth -= 1
if depth == 0:
# The stream's root element has closed,
# terminating the stream.
log.debug("End of stream recieved")
self.stream_end_event.set()
return False
elif depth == 1:
# We only raise events for stanzas that are direct
# children of the root element.
try:
self.__spawn_event(xml)
except RestartStream:
return True
if root is not None:
# Keep the root element empty of children to
# save on memory use.
root.clear()
log.debug("Ending read XML loop")
def _build_stanza(self, xml, default_ns=None):
"""Create a stanza object from a given XML object.
If a specialized stanza type is not found for the XML, then
a generic :class:`~sleekxmpp.xmlstream.stanzabase.StanzaBase`
stanza will be returned.
:param xml: The :class:`~xml.etree.ElementTree.Element` XML object
to convert into a stanza object.
:param default_ns: Optional default namespace to use instead of the
stream's current default namespace.
"""
if default_ns is None:
default_ns = self.default_ns
stanza_type = StanzaBase
for stanza_class in self.__root_stanza:
if xml.tag == "{%s}%s" % (default_ns, stanza_class.name) or \
xml.tag == stanza_class.tag_name():
stanza_type = stanza_class
break
stanza = stanza_type(self, xml)
return stanza
def __spawn_event(self, xml):
"""
Analyze incoming XML stanzas and convert them into stanza
objects if applicable and queue stream events to be processed
by matching handlers.
:param xml: The :class:`~sleekxmpp.xmlstream.stanzabase.ElementBase`
stanza to analyze.
"""
# Apply any preprocessing filters.
xml = self.incoming_filter(xml)
# Convert the raw XML object into a stanza object. If no registered
# stanza type applies, a generic StanzaBase stanza will be used.
stanza = self._build_stanza(xml)
for filter in self.__filters['in']:
if stanza is not None:
stanza = filter(stanza)
if stanza is None:
return
log.debug("RECV: %s", stanza)
# Match the stanza against registered handlers. Handlers marked
# to run "in stream" will be executed immediately; the rest will
# be queued.
unhandled = True
matched_handlers = [h for h in self.__handlers if h.match(stanza)]
for handler in matched_handlers:
if len(matched_handlers) > 1:
stanza_copy = copy.copy(stanza)
else:
stanza_copy = stanza
handler.prerun(stanza_copy)
self.event_queue.put(('stanza', handler, stanza_copy))
try:
if handler.check_delete():
self.__handlers.remove(handler)
except:
pass # not thread safe
unhandled = False
# Some stanzas require responses, such as Iq queries. A default
# handler will be executed immediately for this case.
if unhandled:
stanza.unhandled()
def _threaded_event_wrapper(self, func, args):
"""Capture exceptions for event handlers that run
in individual threads.
:param func: The event handler to execute.
:param args: Arguments to the event handler.
"""
# this is always already copied before this is invoked
orig = args[0]
try:
func(*args)
except Exception as e:
error_msg = 'Error processing event handler: %s'
log.exception(error_msg, str(func))
if hasattr(orig, 'exception'):
orig.exception(e)
else:
self.exception(e)
def _event_runner(self):
"""Process the event queue and execute handlers.
The number of event runner threads is controlled by HANDLER_THREADS.
Stream event handlers will all execute in this thread. Custom event
handlers may be spawned in individual threads.
"""
log.debug("Loading event runner")
try:
while not self.stop.is_set():
try:
wait = self.wait_timeout
event = self.event_queue.get(True, timeout=wait)
except queue.Empty:
event = None
if event is None:
continue
etype, handler = event[0:2]
args = event[2:]
orig = copy.copy(args[0])
if etype == 'stanza':
try:
handler.run(args[0])
except Exception as e:
error_msg = 'Error processing stream handler: %s'
log.exception(error_msg, handler.name)
orig.exception(e)
elif etype == 'schedule':
name = args[1]
try:
log.debug('Scheduled event: %s: %s', name, args[0])
handler(*args[0])
except Exception as e:
log.exception('Error processing scheduled task')
self.exception(e)
elif etype == 'event':
func, threaded, disposable = handler
try:
if threaded:
x = threading.Thread(
name="Event_%s" % str(func),
target=self._threaded_event_wrapper,
args=(func, args))
x.daemon = self._use_daemons
x.start()
else:
func(*args)
except Exception as e:
error_msg = 'Error processing event handler: %s'
log.exception(error_msg, str(func))
if hasattr(orig, 'exception'):
orig.exception(e)
else:
self.exception(e)
elif etype == 'quit':
log.debug("Quitting event runner thread")
break
except KeyboardInterrupt:
log.debug("Keyboard Escape Detected in _event_runner")
self.event('killed', direct=True)
self.disconnect()
except SystemExit:
self.disconnect()
self.event_queue.put(('quit', None, None))
self._end_thread('event runner')
def _send_thread(self):
"""Extract stanzas from the send queue and send them on the stream."""
try:
while not self.stop.is_set():
while not self.stop.is_set() and \
not self.session_started_event.is_set():
self.session_started_event.wait(timeout=0.1)
if self.__failed_send_stanza is not None:
data = self.__failed_send_stanza
self.__failed_send_stanza = None
else:
try:
data = self.send_queue.get(True, 1)
except queue.Empty:
continue
log.debug("SEND: %s", data)
enc_data = data.encode('utf-8')
total = len(enc_data)
sent = 0
count = 0
tries = 0
try:
with self.send_lock:
while sent < total and not self.stop.is_set() and \
self.session_started_event.is_set():
try:
sent += self.socket.send(enc_data[sent:])
count += 1
except ssl.SSLError as serr:
if tries >= self.ssl_retry_max:
log.debug('SSL error - max retries reached')
self.exception(serr)
log.warning("Failed to send %s", data)
if not self.stop.is_set():
self.disconnect(self.auto_reconnect, send_close=False)
log.warning('SSL write error - reattempting')
if not self.stop.is_set():
time.sleep(self.ssl_retry_delay)
tries += 1
if count > 1:
log.debug('SENT: %d chunks', count)
self.send_queue.task_done()
except (Socket.error, ssl.SSLError) as serr:
self.event('socket_error', serr, direct=True)
log.warning("Failed to send %s", data)
if not self.stop.is_set():
self.__failed_send_stanza = data
self._end_thread('send')
self.disconnect(self.auto_reconnect, send_close=False)
return
except Exception as ex:
log.exception('Unexpected error in send thread: %s', ex)
self.exception(ex)
if not self.stop.is_set():
self._end_thread('send')
self.disconnect(self.auto_reconnect)
return
self._end_thread('send')
def _scheduler_thread(self):
self.scheduler.process(threaded=False)
self._end_thread('scheduler')
def exception(self, exception):
"""Process an unknown exception.
Meant to be overridden.
:param exception: An unhandled exception object.
"""
pass
# To comply with PEP8, method names now use underscores.
# Deprecated method names are re-mapped for backwards compatibility.
XMLStream.startTLS = XMLStream.start_tls
XMLStream.registerStanza = XMLStream.register_stanza
XMLStream.removeStanza = XMLStream.remove_stanza
XMLStream.registerHandler = XMLStream.register_handler
XMLStream.removeHandler = XMLStream.remove_handler
XMLStream.setSocket = XMLStream.set_socket
XMLStream.sendRaw = XMLStream.send_raw
XMLStream.getId = XMLStream.get_id
XMLStream.getNewId = XMLStream.new_id
XMLStream.sendXML = XMLStream.send_xml
| 40.399179
| 94
| 0.561643
|
c558e9f7af28a1053e87043c62baf90c1c9e114e
| 4,708
|
py
|
Python
|
utils.py
|
OakLake/GANs
|
12131b7ba64849986cf31991ef1ed39691f26e51
|
[
"MIT"
] | null | null | null |
utils.py
|
OakLake/GANs
|
12131b7ba64849986cf31991ef1ed39691f26e51
|
[
"MIT"
] | null | null | null |
utils.py
|
OakLake/GANs
|
12131b7ba64849986cf31991ef1ed39691f26e51
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import errno
import torchvision.utils as vutils
from tensorboardX import SummaryWriter
from IPython import display
from matplotlib import pyplot as plt
import torch
'''
TensorBoard Data will be stored in './runs' path
'''
class Logger:
def __init__(self, model_name, data_name):
self.model_name = model_name
self.data_name = data_name
self.comment = '{}_{}'.format(model_name, data_name)
self.data_subdir = '{}/{}'.format(model_name, data_name)
# TensorBoard
self.writer = SummaryWriter(comment=self.comment)
def log(self, d_error, g_error, epoch, n_batch, num_batches):
var_class = torch.autograd.variable.Variable
if type(d_error)==var_class:
d_error = d_error.data.cpu().numpy()
if type(g_error)==var_class:
g_error = g_error.data.cpu().numpy()
step = Logger._step(epoch, n_batch, num_batches)
self.writer.add_scalar(
'{}/D_error'.format(self.comment), d_error, step)
self.writer.add_scalar(
'{}/G_error'.format(self.comment), g_error, step)
def log_images(self, images, num_images, epoch, n_batch, num_batches, format='NCHW', normalize=True):
'''
input images are expected in format (NCHW)
'''
if type(images) == np.ndarray:
images = torch.from_numpy(images)
if format=='NHWC':
images = images.transpose(1,3)
step = Logger._step(epoch, n_batch, num_batches)
img_name = '{}/images{}'.format(self.comment, '')
# Make horizontal grid from image tensor
horizontal_grid = vutils.make_grid(
images, normalize=normalize, scale_each=True)
# Make vertical grid from image tensor
nrows = int(np.sqrt(num_images))
grid = vutils.make_grid(
images, nrow=nrows, normalize=True, scale_each=True)
# Add horizontal images to tensorboard
self.writer.add_image(img_name, horizontal_grid, step)
# Save plots
self.save_torch_images(horizontal_grid, grid, epoch, n_batch)
def save_torch_images(self, horizontal_grid, grid, epoch, n_batch, plot_horizontal=True):
out_dir = './data/images/{}'.format(self.data_subdir)
Logger._make_dir(out_dir)
# Plot and save horizontal
fig = plt.figure(figsize=(16, 16))
plt.imshow(np.moveaxis(horizontal_grid.numpy(), 0, -1))
plt.axis('off')
if plot_horizontal:
display.display(plt.gcf())
self._save_images(fig, epoch, n_batch, 'hori')
plt.close()
# Save squared
fig = plt.figure()
plt.imshow(np.moveaxis(grid.numpy(), 0, -1))
plt.axis('off')
self._save_images(fig, epoch, n_batch)
plt.close()
def _save_images(self, fig, epoch, n_batch, comment=''):
out_dir = './data/images/{}'.format(self.data_subdir)
Logger._make_dir(out_dir)
fig.savefig('{}/{}_epoch_{}_batch_{}.png'.format(out_dir,
comment, epoch, n_batch))
def display_status(self, epoch, num_epochs, n_batch, num_batches, d_error, g_error, d_pred_real, d_pred_fake):
var_class = torch.autograd.variable.Variable
if type(d_error)==var_class:
d_error = d_error.data.cpu().numpy()[0]
if type(g_error)==var_class:
g_error = g_error.data.cpu().numpy()[0]
if type(d_pred_real)==var_class:
d_pred_real = d_pred_real.data
if type(d_pred_fake)==var_class:
d_pred_fake = d_pred_fake.data
print('Epoch: [{}/{}], Batch Num: [{}/{}]'.format(
epoch,num_epochs, n_batch, num_batches)
)
print('Discriminator Loss: {:.4f}, Generator Loss: {:.4f}'.format(d_error, g_error))
print('D(x): {:.4f}, D(G(z)): {:.4f}'.format(d_pred_real.mean(), d_pred_fake.mean()))
def save_models(self, generator, discriminator, epoch):
out_dir = './data/models/{}'.format(self.data_subdir)
Logger._make_dir(out_dir)
torch.save(generator.state_dict(),
'{}/G_epoch_{}'.format(out_dir, epoch))
torch.save(discriminator.state_dict(),
'{}/D_epoch_{}'.format(out_dir, epoch))
def close(self):
self.writer.close()
# Private Functionality
@staticmethod
def _step(epoch, n_batch, num_batches):
return epoch * num_batches + n_batch
@staticmethod
def _make_dir(directory):
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
| 34.115942
| 114
| 0.609601
|
50d429d6c8bc593fcaab595f4a36cd7140d6c1fb
| 6,702
|
py
|
Python
|
ingest/qtl/eqtlgen_2018/process.py
|
opentargets/sumstat_data
|
9fb0aafd763def5d0a274e5175def21db1e41de2
|
[
"Apache-2.0"
] | 1
|
2018-11-30T10:19:08.000Z
|
2018-11-30T10:19:08.000Z
|
ingest/qtl/eqtlgen_2018/process.py
|
opentargets/sumstat_data
|
9fb0aafd763def5d0a274e5175def21db1e41de2
|
[
"Apache-2.0"
] | 1
|
2018-11-02T10:50:57.000Z
|
2018-11-02T10:50:57.000Z
|
ingest/qtl/eqtlgen_2018/process.py
|
opentargets/sumstat_data
|
9fb0aafd763def5d0a274e5175def21db1e41de2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Ed Mountjoy
#
'''
# Set SPARK_HOME and PYTHONPATH to use 2.4.0
export PYSPARK_SUBMIT_ARGS="--driver-memory 8g pyspark-shell"
export SPARK_HOME=/Users/em21/software/spark-2.4.0-bin-hadoop2.7
export PYTHONPATH=$SPARK_HOME/python:$SPARK_HOME/python/lib/py4j-2.4.0-src.zip:$PYTHONPATH
'''
import sys
import os
from time import time
import pyspark.sql
from pyspark.sql.types import *
from pyspark.sql import DataFrame
from pyspark.sql.functions import *
def main():
# Args
min_mac = 5
pheno_var = 1
study_id = 'eQTLGen'
bio_feature = 'UBERON_0000178'
data_type = 'eqtl'
# # File args (local)
# in_sumstats = 'example_data/cis-eQTLs_full_20180905.head.txt'
# in_varindex = 'example_data/variant-annotation.parquet'
# out_parquet = 'output/eQTLGen'
# File args (server)
in_sumstats = 'gs://genetics-portal-raw/eqtlgen_20180905/cis-eQTLs_full_20180905.txt'
in_varindex = 'gs://genetics-portal-data/variant-annotation/190129/variant-annotation.parquet'
out_parquet = 'gs://genetics-portal-sumstats-b38/unfiltered/molecular_trait/eQTLGen.parquet'
# Make spark session
global spark
spark = (
pyspark.sql.SparkSession.builder
.config("parquet.enable.summary-metadata", "true")
.getOrCreate()
)
print('Spark version: ', spark.version)
start_time = time()
#
# Load --------------------------------------------------------------------
#
# Load sumstats
import_schema = (
StructType()
.add('pval', DoubleType())
.add('rsid', StringType())
.add('chrom', StringType())
.add('pos', IntegerType())
.add('z', DoubleType())
.add('effect_allele', StringType())
.add('other_allele', StringType())
.add('gene_id', StringType())
.add('gene_name', StringType())
.add('gene_chrom', StringType())
.add('gene_pos', IntegerType())
.add('n_cohorts', IntegerType())
.add('n_total', IntegerType())
)
sumstats = (
spark.read.csv(
path=in_sumstats,
sep='\t',
schema=import_schema,
enforceSchema=True,
header=True,
comment='#')
.drop('rsid', 'gene_name', 'gene_chrom', 'gene_pos', 'n_cohorts')
)
# Load varindex
varindex = (
spark.read.parquet(in_varindex)
.select(
'chrom_b37',
'pos_b37',
'chrom_b38',
'pos_b38',
'ref',
'alt',
'af.gnomad_nfe'
)
.withColumnRenamed('chrom_b37', 'chrom')
.withColumnRenamed('pos_b37', 'pos')
)
#
# Harmonised other and effect alleles to be ref and alt, respectively -----
#
# Left merge sumstats with gnomad
merged = (
sumstats.join(varindex,
(
(varindex.chrom == sumstats.chrom) &
(varindex.pos == sumstats.pos) &
(
((varindex.ref == sumstats.other_allele) & (varindex.alt == sumstats.effect_allele)) |
((varindex.ref == sumstats.effect_allele) & (varindex.alt == sumstats.other_allele))
)
))
)
# If effect_allele == ref, flip z-score and eaf
merged = (
merged
.withColumn('z', when(col('effect_allele') == col('ref'), -1 * col('z')).otherwise(col('z')))
.withColumn('eaf', when(col('effect_allele') == col('ref'), 1 - col('gnomad_nfe')).otherwise(col('gnomad_nfe')))
.drop('effect_allele', 'other_allele', 'gnomad_nfe')
)
#
# Estimate beta and SE ----------------------------------------------------
#
# Equation from eQTLGen paper
# beta = z / (√(2p(1-p)(n+z^2))
# SE(beta) = 1 / (√(2p(1-p)(n+z^2))
merged = (
merged
.withColumn('beta', col('z')/((2*col('eaf')*(1-col('eaf'))*(col('n_total')+col('z')**2))**0.5))
.withColumn('se', 1/((2*col('eaf')*(1-col('eaf'))*(col('n_total')+col('z')**2))**0.5))
.drop('z')
)
#
# Calc number of tests per phenotype_id -----------------------------------
#
merged = (
merged
.withColumn('phenotype_id', col('gene_id'))
.persist()
)
# Count number of tests
num_tests = (
merged
.groupby('phenotype_id')
.agg(count(col('pval')).alias('num_tests'))
)
# Merge result back onto merged
merged = merged.join(num_tests, on='phenotype_id')
#
# Tidy up and write -------------------------------------------------------
#
df = merged
# Format columns
df = (
df
# Use build 38 chrom and positions
.drop('chrom', 'pos', 'effect_allele', 'other_allele')
.withColumnRenamed('chrom_b38', 'chrom')
.withColumnRenamed('pos_b38', 'pos')
# Add new columns
.withColumn('study_id', lit(study_id))
.withColumn('type', lit(data_type))
.withColumn('bio_feature', lit(bio_feature))
.withColumn('n_cases', lit(None).cast('int'))
.withColumn('is_cc', lit(False))
.withColumn('maf', when(col('eaf') > 0.5, 1 - col('eaf')).otherwise(col('eaf')))
.withColumn('mac', col('n_total') * 2 * col('maf')) # TODO - if reingesting this should be converted to integer type
.withColumn('mac_cases', lit(None).cast('int'))
.withColumn('info', lit(None).cast('double'))
# Filter based on mac
.filter(col('mac') >= min_mac)
)
# Order and select columns
df = (
df.select(
'type',
'study_id',
'phenotype_id',
'bio_feature',
'gene_id',
'chrom',
'pos',
'ref',
'alt',
'beta',
'se',
'pval',
'n_total',
'n_cases',
'eaf',
'mac',
'mac_cases',
'num_tests',
'info',
'is_cc'
)
)
# Drop NA rows
required_cols = ['type', 'study_id', 'phenotype_id', 'bio_feature',
'gene_id', 'chrom', 'pos', 'ref', 'alt', 'beta', 'se', 'pval']
df = df.dropna(subset=required_cols)
# Repartition and sort
df = (
df.repartitionByRange('chrom', 'pos')
.sortWithinPartitions('chrom', 'pos')
)
# Write output
(
df
.write
.partitionBy('bio_feature', 'chrom')
.parquet(
out_parquet,
mode='overwrite',
compression='snappy'
)
)
return 0
if __name__ == '__main__':
main()
| 28.041841
| 124
| 0.522232
|
2d5d1cb6373939e8d707a7bba1b3d4615b1dc075
| 19,444
|
py
|
Python
|
torch/distributed/_shard/sharded_tensor/__init__.py
|
YifanShenSZ/pytorch
|
b4232f7cbe407909f9d95b91304c73fdc4c66a50
|
[
"Intel"
] | null | null | null |
torch/distributed/_shard/sharded_tensor/__init__.py
|
YifanShenSZ/pytorch
|
b4232f7cbe407909f9d95b91304c73fdc4c66a50
|
[
"Intel"
] | null | null | null |
torch/distributed/_shard/sharded_tensor/__init__.py
|
YifanShenSZ/pytorch
|
b4232f7cbe407909f9d95b91304c73fdc4c66a50
|
[
"Intel"
] | null | null | null |
# coding=utf-8
import copy
import functools
from typing import List
import torch
import torch.distributed._shard.sharding_spec as shard_spec
from torch.distributed._shard.partial_tensor import _PartialTensor
from .api import (
_CUSTOM_SHARDED_OPS,
_SHARDED_OPS,
Shard,
ShardedTensor,
ShardedTensorMetadata,
TensorProperties,
)
from .metadata import ShardMetadata # noqa: F401
from torch.distributed._shard.op_registry_utils import _decorator_func
def empty(sharding_spec: shard_spec.ShardingSpec,
*size,
dtype=None,
layout=torch.strided,
requires_grad=False,
pin_memory=False,
memory_format=torch.contiguous_format,
process_group=None,
init_rrefs=False) -> ShardedTensor:
"""
Returns a :class:`ShardedTensor` filled with uninitialized data.
Needs to be called on all ranks in an SPMD fashion.
Args:
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
size (int...): a sequence of integers defining the shape of the output
tensor. Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.contiguous_format``.
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
Returns:
A :class:`ShardedTensor` object on each rank
"""
return ShardedTensor(
sharding_spec,
*size,
dtype=dtype,
layout=layout,
requires_grad=requires_grad,
pin_memory=pin_memory,
memory_format=memory_format,
process_group=process_group,
init_rrefs=init_rrefs,
)
def ones(sharding_spec: shard_spec.ShardingSpec,
*size,
dtype=None,
layout=torch.strided,
requires_grad=False,
pin_memory=False,
memory_format=torch.contiguous_format,
process_group=None,
init_rrefs=False) -> ShardedTensor:
"""
Returns a :class:`ShardedTensor` with the scalar value 1.
Needs to be called on all ranks in an SPMD fashion.
Args:
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
size (int...): a sequence of integers defining the shape of the output
tensor. Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
Returns:
A :class:`ShardedTensor` object on each rank
"""
return full(
sharding_spec,
size,
fill_value=1,
dtype=dtype,
layout=layout,
requires_grad=requires_grad,
pin_memory=pin_memory,
memory_format=memory_format,
process_group=process_group,
init_rrefs=init_rrefs
)
def zeros(sharding_spec: shard_spec.ShardingSpec,
*size,
dtype=None,
layout=torch.strided,
requires_grad=False,
pin_memory=False,
memory_format=torch.contiguous_format,
process_group=None,
init_rrefs=False) -> ShardedTensor:
"""
Returns a :class:`ShardedTensor` filled with the scalar value 0.
Needs to be called on all ranks in an SPMD fashion.
Args:
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
size (int...): a sequence of integers defining the shape of the output
tensor. Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
Returns:
A :class:`ShardedTensor` object on each rank
"""
return full(
sharding_spec,
size,
fill_value=0,
dtype=dtype,
layout=layout,
requires_grad=requires_grad,
pin_memory=pin_memory,
memory_format=memory_format,
process_group=process_group,
init_rrefs=init_rrefs
)
def full(sharding_spec: shard_spec.ShardingSpec,
size,
fill_value,
*,
dtype=None,
layout=torch.strided,
requires_grad=False,
pin_memory=False,
memory_format=torch.contiguous_format,
process_group=None,
init_rrefs=False) -> ShardedTensor:
"""
Creates a :class:`ShardedTensor` filled with fill_value. The tensor’s dtype
is inferred from fill_value. If dtype is specified, it will override the
inferred type from fill_value. Needs to be called on all ranks in an SPMD fashion.
Args:
sharding_spec (:class:`torch.distributed._sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the
output tensor.
fill_value (Scalar) – the value to fill the output tensor with.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
Returns:
A :class:`ShardedTensor` object on each rank
"""
sharded_tensor = ShardedTensor(
sharding_spec,
*size,
dtype=dtype,
layout=layout,
requires_grad=requires_grad,
pin_memory=pin_memory,
memory_format=memory_format,
process_group=process_group,
init_rrefs=init_rrefs,
)
torch.nn.init.constant_(sharded_tensor, fill_value) # type: ignore[arg-type]
return sharded_tensor
def rand(sharding_spec: shard_spec.ShardingSpec,
*size,
dtype=None,
layout=torch.strided,
requires_grad=False,
pin_memory=False,
memory_format=torch.contiguous_format,
process_group=None,
init_rrefs=False) -> ShardedTensor:
"""
Creates a :class:`ShardedTensor` filled with random numbers from a uniform distribution
on the interval :math:`[0, 1)`. The shape of the tensor is defined by the
variable argument `size`. Needs to be called on all ranks in an SPMD fashion.
Args:
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the
output tensor.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
Returns:
A :class:`ShardedTensor` object on each rank
"""
sharded_tensor = ShardedTensor(
sharding_spec,
*size,
dtype=dtype,
layout=layout,
requires_grad=requires_grad,
pin_memory=pin_memory,
memory_format=memory_format,
process_group=process_group,
init_rrefs=init_rrefs,
)
torch.nn.init.uniform_(sharded_tensor, 0, 1) # type: ignore[arg-type]
return sharded_tensor
def randn(sharding_spec: shard_spec.ShardingSpec,
*size,
dtype=None,
layout=torch.strided,
requires_grad=False,
pin_memory=False,
memory_format=torch.contiguous_format,
process_group=None,
init_rrefs=False) -> ShardedTensor:
"""
Creates a :class:`ShardedTensor` filled with random numbers from a uniform distribution
with mean `0` and variance `1` (also called standard normal distribution). The shape
of the tensor is defined by the variable argument `size`. Needs to be called on all ranks
in an SPMD fashion.
Args:
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the
output tensor.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
Returns:
A :class:`ShardedTensor` object on each rank
"""
sharded_tensor = ShardedTensor(
sharding_spec,
*size,
dtype=dtype,
layout=layout,
requires_grad=requires_grad,
pin_memory=pin_memory,
memory_format=memory_format,
process_group=process_group,
init_rrefs=init_rrefs,
)
torch.nn.init.normal_(sharded_tensor, 0, 1) # type: ignore[arg-type]
return sharded_tensor
def init_from_local_shards(
local_shards: List[Shard],
*global_size,
process_group=None,
init_rrefs=False) -> ShardedTensor:
"""
Creates an :class:`ShardedTensor` from local shards and the global metadata.
Needs to be called on all ranks in an SPMD fashion.
Args:
local_shards (List[:class `torch.distributed._shard.sharded_tensor.Shard`]): A list
of shards that represent the local shards on this rank.
global_size (int...): a list, tuple, or `torch.Size` of integers defining the
shape of the overall sharded tensor.
Keyword args:
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
Returns:
A :class:`ShardedTensor` object handle on this rank
Examples:
Suppose we want construct a sharded tensor on two ranks, global size = (10, 5),
each shard have a (5, 5) local tensor, we can do it like below:
on rank 0:
>>> local_shard_metadata = ShardMetadata(
>>> shard_offsets=[0, 0]
>>> shard_lengths=[5, 5]
>>> placement="rank:0/cuda:0"
>>> )
>>> local_shards = [Shard(torch.randn(5, 5), local_shard_metadata)]
>>> sharded_tensor = init_from_local_shards(local_shards, [10, 5])
on rank 1:
>>> local_shard_metadata = ShardMetadata(
>>> shard_offsets=[5, 0]
>>> shard_lengths=[5, 5]
>>> placement="rank:1/cuda:1"
>>> )
>>> local_shards = [Shard(torch.randn(5, 5), local_shard_metadata)]
>>> sharded_tensor = init_from_local_shards(local_shards, [10, 5])
"""
return ShardedTensor._init_from_local_shards(
local_shards,
*global_size,
process_group=process_group,
init_rrefs=init_rrefs
)
def state_dict_hook(module, destination, prefix, local_metadata):
"""
Hook to add ShardedTensor to Module's ``state_dict``. Needs to be
registered to the Module using
:meth:`torch.nn.Module._register_state_dict_hook`.
"""
for submodule_name, submodule in module.named_modules():
for attr_name, attr in submodule.__dict__.items():
if isinstance(attr, ShardedTensor):
mod_prefix = prefix + submodule_name
key = mod_prefix + ('.' if mod_prefix else '') + attr_name
destination[key] = attr
def pre_load_state_dict_hook(module, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
"""
Pre-load state dict hook to add ShardedTensor to the module.
"""
for submodule_name, submodule in module.named_modules():
for attr_name, attr in submodule.__dict__.items():
mod_prefix = prefix + submodule_name
key = mod_prefix + ('.' if mod_prefix else '') + attr_name
if key in state_dict:
if isinstance(state_dict[key], ShardedTensor):
setattr(submodule, attr_name, state_dict[key])
def custom_sharded_op_impl(func):
"""
Provides a way for users to write their own custom sharded operator. This
can be used to override existing ShardedTensor operators or write a new
one not supported by ShardedTensor. If the operator in question is covered
by ``__torch_function__`` dispatch and has a ShardedTensor as any of its
parameters, the function provided will be invoked for that operator.
Example::
>>> @custom_sharded_op_impl(torch.nn.functional.linear)
>>> def my_custom_sharded_linear(types, args, kwargs, process_group):
>>> ....
>>>
>>> input = torch.rand(10, 32)
>>> weight = sharded_tensor.rand(32, 16)
>>> bias = torch.rand(16)
>>> # This will call 'my_custom_sharded_linear'
>>> torch.nn.functional.linear(input, weight, bias)
The types, args and kwargs parameters are the same parameters that are
passed to ``__torch_function__`` dispatch API
(https://pytorch.org/docs/stable/notes/extending.html#extending-torch).
There is an additional ``process_group`` parameter which is the
process_group used for the ShardedTensor and can be used by
implementations for communications within a sharded implementation.
Args:
func(Callable): Torch function for which we want to provide a sharded
implementation (ex: torch.nn.functional.linear)
"""
return functools.partial(
_decorator_func,
op=func,
op_table=_CUSTOM_SHARDED_OPS
)
def _sharded_op_impl(func):
"""
Decorator to register a default sharded op.
"""
return functools.partial(
_decorator_func,
op=func,
op_table=_SHARDED_OPS
)
# Import all builtin sharded ops
from ._ops import * # noqa: F403
| 41.725322
| 124
| 0.651049
|
11303bb945bc072a9179262dd1f21c809f71e016
| 734
|
py
|
Python
|
examples/setup.py
|
meangrape/build_manpage
|
05f68158e2256df4f8e888b91f22a2e18ae49712
|
[
"Apache-2.0"
] | 1
|
2020-05-15T00:38:23.000Z
|
2020-05-15T00:38:23.000Z
|
examples/setup.py
|
meangrape/build_manpage
|
05f68158e2256df4f8e888b91f22a2e18ae49712
|
[
"Apache-2.0"
] | null | null | null |
examples/setup.py
|
meangrape/build_manpage
|
05f68158e2256df4f8e888b91f22a2e18ae49712
|
[
"Apache-2.0"
] | 1
|
2020-05-15T00:38:31.000Z
|
2020-05-15T00:38:31.000Z
|
#!/usr/bin/env python3
import os.path
import sys
from setuptools import setup, find_packages
from build_manpage import build_manpage
HOME=os.path.expanduser('~')
setup(
name='example',
version='0.2.0',
author='Jay Edwards',
cmdclass={'build_manpage': build_manpage},
author_email='jay@meangrape.com',
data_files=[('%s/share/man/man1' % sys.prefix, ['doc/example.1'])],
license='Apache License 2.0',
description='build_manpage example',
long_description=open('../README').read(),
entry_points = {
'console_scripts': [
'example = example.example:main',
],
'distutils.commands': [
'build_manpage = build_manpage.build_manpage'
]
}
)
| 24.466667
| 71
| 0.637602
|
0d8f39d7a1b79765dcac221d5a546b1b955a58af
| 25,871
|
py
|
Python
|
electrum/gui/qt/channels_list.py
|
jeroz1/electrum-ravencoin-utd
|
1775d61162d3c6a4e60a040a7eef69b2cc0e7b1f
|
[
"MIT"
] | 1
|
2022-03-06T08:02:55.000Z
|
2022-03-06T08:02:55.000Z
|
electrum/gui/qt/channels_list.py
|
jeroz1/electrum-ravencoin-utd
|
1775d61162d3c6a4e60a040a7eef69b2cc0e7b1f
|
[
"MIT"
] | null | null | null |
electrum/gui/qt/channels_list.py
|
jeroz1/electrum-ravencoin-utd
|
1775d61162d3c6a4e60a040a7eef69b2cc0e7b1f
|
[
"MIT"
] | 1
|
2021-05-25T19:05:23.000Z
|
2021-05-25T19:05:23.000Z
|
# -*- coding: utf-8 -*-
import traceback
from enum import IntEnum
from typing import Sequence, Optional, Dict
from abc import abstractmethod, ABC
from PyQt5 import QtCore, QtGui
from PyQt5.QtCore import Qt, QRect, QSize
from PyQt5.QtWidgets import (QMenu, QHBoxLayout, QLabel, QVBoxLayout, QGridLayout, QLineEdit,
QPushButton, QAbstractItemView, QComboBox, QCheckBox,
QToolTip)
from PyQt5.QtGui import QFont, QStandardItem, QBrush, QPainter, QIcon, QHelpEvent
from electrum.util import bh2u, NotEnoughFunds, NoDynamicFeeEstimates
from electrum.i18n import _
from electrum.lnchannel import AbstractChannel, PeerState, ChannelBackup, Channel, ChannelState
from electrum.wallet import Abstract_Wallet
from electrum.lnutil import LOCAL, REMOTE, format_short_channel_id, LN_MAX_FUNDING_SAT
from electrum.lnworker import LNWallet
from electrum import ecc
from electrum.gui import messages
from .util import (MyTreeView, WindowModalDialog, Buttons, OkButton, CancelButton,
EnterButton, WaitingDialog, MONOSPACE_FONT, ColorScheme)
from .amountedit import RVNAmountEdit, FreezableLineEdit
from .util import read_QIcon
ROLE_CHANNEL_ID = Qt.UserRole
class ChannelsList(MyTreeView):
update_rows = QtCore.pyqtSignal(Abstract_Wallet)
update_single_row = QtCore.pyqtSignal(Abstract_Wallet, AbstractChannel)
gossip_db_loaded = QtCore.pyqtSignal()
class Columns(IntEnum):
FEATURES = 0
SHORT_CHANID = 1
NODE_ALIAS = 2
CAPACITY = 3
LOCAL_BALANCE = 4
REMOTE_BALANCE = 5
CHANNEL_STATUS = 6
headers = {
Columns.SHORT_CHANID: _('Short Channel ID'),
Columns.NODE_ALIAS: _('Node alias'),
Columns.FEATURES: "",
Columns.CAPACITY: _('Capacity'),
Columns.LOCAL_BALANCE: _('Can send'),
Columns.REMOTE_BALANCE: _('Can receive'),
Columns.CHANNEL_STATUS: _('Status'),
}
filter_columns = [
Columns.SHORT_CHANID,
Columns.NODE_ALIAS,
Columns.CHANNEL_STATUS,
]
_default_item_bg_brush = None # type: Optional[QBrush]
def __init__(self, parent):
super().__init__(parent, self.create_menu, stretch_column=self.Columns.NODE_ALIAS)
self.setModel(QtGui.QStandardItemModel(self))
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.main_window = parent
self.gossip_db_loaded.connect(self.on_gossip_db)
self.update_rows.connect(self.do_update_rows)
self.update_single_row.connect(self.do_update_single_row)
self.network = self.parent.network
self.wallet = self.parent.wallet
self.setSortingEnabled(True)
@property
# property because lnworker might be initialized at runtime
def lnworker(self):
return self.wallet.lnworker
def format_fields(self, chan: AbstractChannel) -> Dict['ChannelsList.Columns', str]:
labels = {}
for subject in (REMOTE, LOCAL):
if isinstance(chan, Channel):
can_send = chan.available_to_spend(subject) / 1000
label = self.parent.format_amount(can_send)
other = subject.inverted()
bal_other = chan.balance(other)//1000
bal_minus_htlcs_other = chan.balance_minus_outgoing_htlcs(other)//1000
if bal_other != bal_minus_htlcs_other:
label += ' (+' + self.parent.format_amount(bal_other - bal_minus_htlcs_other) + ')'
else:
assert isinstance(chan, ChannelBackup)
label = ''
labels[subject] = label
status = chan.get_state_for_GUI()
closed = chan.is_closed()
node_alias = self.lnworker.get_node_alias(chan.node_id) or chan.node_id.hex()
capacity_str = self.parent.format_amount(chan.get_capacity(), whitespaces=True)
return {
self.Columns.SHORT_CHANID: chan.short_id_for_GUI(),
self.Columns.NODE_ALIAS: node_alias,
self.Columns.FEATURES: '',
self.Columns.CAPACITY: capacity_str,
self.Columns.LOCAL_BALANCE: '' if closed else labels[LOCAL],
self.Columns.REMOTE_BALANCE: '' if closed else labels[REMOTE],
self.Columns.CHANNEL_STATUS: status,
}
def on_channel_closed(self, txid):
self.main_window.show_error('Channel closed' + '\n' + txid)
def on_request_sent(self, b):
self.main_window.show_message(_('Request sent'))
def on_failure(self, exc_info):
type_, e, tb = exc_info
traceback.print_tb(tb)
self.main_window.show_error('Failed to close channel:\n{}'.format(repr(e)))
def close_channel(self, channel_id):
self.is_force_close = False
msg = _('Close channel?')
force_cb = QCheckBox('Request force close from remote peer')
tooltip = _(messages.MSG_REQUEST_FORCE_CLOSE)
tooltip = messages.to_rtf(tooltip)
def on_checked(b):
self.is_force_close = bool(b)
force_cb.stateChanged.connect(on_checked)
force_cb.setToolTip(tooltip)
if not self.parent.question(msg, checkbox=force_cb):
return
if self.is_force_close:
coro = self.lnworker.request_force_close(channel_id)
on_success = self.on_request_sent
else:
coro = self.lnworker.close_channel(channel_id)
on_success = self.on_channel_closed
def task():
return self.network.run_from_another_thread(coro)
WaitingDialog(self, 'please wait..', task, on_success, self.on_failure)
def force_close(self, channel_id):
self.save_backup = True
backup_cb = QCheckBox('Create a backup now', checked=True)
def on_checked(b):
self.save_backup = bool(b)
backup_cb.stateChanged.connect(on_checked)
chan = self.lnworker.channels[channel_id]
to_self_delay = chan.config[REMOTE].to_self_delay
msg = '<b>' + _('Force-close channel?') + '</b><br/>'\
+ '<p>' + _('If you force-close this channel, the funds you have in it will not be available for {} blocks.').format(to_self_delay) + ' '\
+ _('After that delay, funds will be swept to an address derived from your wallet seed.') + '</p>'\
+ '<u>' + _('Please create a backup of your wallet file!') + '</u> '\
+ '<p>' + _('Funds in this channel will not be recoverable from seed until they are swept back into your wallet, and might be lost if you lose your wallet file.') + ' '\
+ _('To prevent that, you should save a backup of your wallet on another device.') + '</p>'
if not self.parent.question(msg, title=_('Force-close channel'), rich_text=True, checkbox=backup_cb):
return
if self.save_backup:
if not self.parent.backup_wallet():
return
def task():
coro = self.lnworker.force_close_channel(channel_id)
return self.network.run_from_another_thread(coro)
WaitingDialog(self, 'please wait..', task, self.on_channel_closed, self.on_failure)
def remove_channel(self, channel_id):
if self.main_window.question(_('Are you sure you want to delete this channel? This will purge associated transactions from your wallet history.')):
self.lnworker.remove_channel(channel_id)
def remove_channel_backup(self, channel_id):
if self.main_window.question(_('Remove channel backup?')):
self.lnworker.remove_channel_backup(channel_id)
def export_channel_backup(self, channel_id):
msg = ' '.join([
_("Channel backups can be imported in another instance of the same wallet, by scanning this QR code."),
_("Please note that channel backups cannot be used to restore your channels."),
_("If you lose your wallet file, the only thing you can do with a backup is to request your channel to be closed, so that your funds will be sent on-chain."),
])
data = self.lnworker.export_channel_backup(channel_id)
self.main_window.show_qrcode(data, 'channel backup', help_text=msg,
show_copy_text_btn=True)
def request_force_close(self, channel_id):
def task():
coro = self.lnworker.request_force_close(channel_id)
return self.network.run_from_another_thread(coro)
WaitingDialog(self, 'please wait..', task, self.on_request_sent, self.on_failure)
def freeze_channel_for_sending(self, chan, b):
if self.lnworker.channel_db or self.lnworker.is_trampoline_peer(chan.node_id):
chan.set_frozen_for_sending(b)
else:
msg = messages.MSG_NON_TRAMPOLINE_CHANNEL_FROZEN_WITHOUT_GOSSIP
self.main_window.show_warning(msg, title=_('Channel is frozen for sending'))
def create_menu(self, position):
menu = QMenu()
menu.setSeparatorsCollapsible(True) # consecutive separators are merged together
selected = self.selected_in_column(self.Columns.NODE_ALIAS)
if not selected:
menu.addAction(_("Import channel backup"), lambda: self.parent.do_process_from_text_channel_backup())
menu.exec_(self.viewport().mapToGlobal(position))
return
multi_select = len(selected) > 1
if multi_select:
return
idx = self.indexAt(position)
if not idx.isValid():
return
item = self.model().itemFromIndex(idx)
if not item:
return
channel_id = idx.sibling(idx.row(), self.Columns.NODE_ALIAS).data(ROLE_CHANNEL_ID)
chan = self.lnworker.channel_backups.get(channel_id)
if chan:
funding_tx = self.parent.wallet.db.get_transaction(chan.funding_outpoint.txid)
menu.addAction(_("View funding transaction"), lambda: self.parent.show_transaction(funding_tx))
if chan.get_state() == ChannelState.FUNDED:
menu.addAction(_("Request force-close"), lambda: self.request_force_close(channel_id))
if chan.can_be_deleted():
menu.addAction(_("Delete"), lambda: self.remove_channel_backup(channel_id))
menu.exec_(self.viewport().mapToGlobal(position))
return
chan = self.lnworker.channels[channel_id]
menu.addAction(_("Details..."), lambda: self.parent.show_channel(channel_id))
cc = self.add_copy_menu(menu, idx)
cc.addAction(_("Node ID"), lambda: self.place_text_on_clipboard(
chan.node_id.hex(), title=_("Node ID")))
cc.addAction(_("Long Channel ID"), lambda: self.place_text_on_clipboard(
channel_id.hex(), title=_("Long Channel ID")))
if not chan.is_closed():
if not chan.is_frozen_for_sending():
menu.addAction(_("Freeze (for sending)"), lambda: self.freeze_channel_for_sending(chan, True)) #
else:
menu.addAction(_("Unfreeze (for sending)"), lambda: self.freeze_channel_for_sending(chan, False))
if not chan.is_frozen_for_receiving():
menu.addAction(_("Freeze (for receiving)"), lambda: chan.set_frozen_for_receiving(True))
else:
menu.addAction(_("Unfreeze (for receiving)"), lambda: chan.set_frozen_for_receiving(False))
funding_tx = self.parent.wallet.db.get_transaction(chan.funding_outpoint.txid)
if funding_tx:
menu.addAction(_("View funding transaction"), lambda: self.parent.show_transaction(funding_tx))
if not chan.is_closed():
menu.addSeparator()
if chan.peer_state == PeerState.GOOD:
menu.addAction(_("Close channel"), lambda: self.close_channel(channel_id))
menu.addAction(_("Force-close channel"), lambda: self.force_close(channel_id))
else:
item = chan.get_closing_height()
if item:
txid, height, timestamp = item
closing_tx = self.lnworker.lnwatcher.db.get_transaction(txid)
if closing_tx:
menu.addAction(_("View closing transaction"), lambda: self.parent.show_transaction(closing_tx))
menu.addSeparator()
menu.addAction(_("Export backup"), lambda: self.export_channel_backup(channel_id))
if chan.can_be_deleted():
menu.addSeparator()
menu.addAction(_("Delete"), lambda: self.remove_channel(channel_id))
menu.exec_(self.viewport().mapToGlobal(position))
@QtCore.pyqtSlot(Abstract_Wallet, AbstractChannel)
def do_update_single_row(self, wallet: Abstract_Wallet, chan: AbstractChannel):
if wallet != self.parent.wallet:
return
for row in range(self.model().rowCount()):
item = self.model().item(row, self.Columns.NODE_ALIAS)
if item.data(ROLE_CHANNEL_ID) != chan.channel_id:
continue
for column, v in self.format_fields(chan).items():
self.model().item(row, column).setData(v, QtCore.Qt.DisplayRole)
items = [self.model().item(row, column) for column in self.Columns]
self._update_chan_frozen_bg(chan=chan, items=items)
if wallet.lnworker:
self.update_can_send(wallet.lnworker)
@QtCore.pyqtSlot()
def on_gossip_db(self):
self.do_update_rows(self.parent.wallet)
@QtCore.pyqtSlot(Abstract_Wallet)
def do_update_rows(self, wallet):
if wallet != self.parent.wallet:
return
channels = list(wallet.lnworker.channels.values()) if wallet.lnworker else []
backups = list(wallet.lnworker.channel_backups.values()) if wallet.lnworker else []
if wallet.lnworker:
self.update_can_send(wallet.lnworker)
self.model().clear()
self.update_headers(self.headers)
for chan in channels + backups:
field_map = self.format_fields(chan)
items = [QtGui.QStandardItem(field_map[col]) for col in sorted(field_map)]
self.set_editability(items)
if self._default_item_bg_brush is None:
self._default_item_bg_brush = items[self.Columns.NODE_ALIAS].background()
items[self.Columns.NODE_ALIAS].setData(chan.channel_id, ROLE_CHANNEL_ID)
items[self.Columns.NODE_ALIAS].setFont(QFont(MONOSPACE_FONT))
items[self.Columns.LOCAL_BALANCE].setFont(QFont(MONOSPACE_FONT))
items[self.Columns.REMOTE_BALANCE].setFont(QFont(MONOSPACE_FONT))
items[self.Columns.FEATURES].setData(ChannelFeatureIcons.from_channel(chan), self.ROLE_CUSTOM_PAINT)
items[self.Columns.CAPACITY].setFont(QFont(MONOSPACE_FONT))
self._update_chan_frozen_bg(chan=chan, items=items)
self.model().insertRow(0, items)
self.sortByColumn(self.Columns.SHORT_CHANID, Qt.DescendingOrder)
def _update_chan_frozen_bg(self, *, chan: AbstractChannel, items: Sequence[QStandardItem]):
assert self._default_item_bg_brush is not None
# frozen for sending
item = items[self.Columns.LOCAL_BALANCE]
if chan.is_frozen_for_sending():
item.setBackground(ColorScheme.BLUE.as_color(True))
item.setToolTip(_("This channel is frozen for sending. It will not be used for outgoing payments."))
else:
item.setBackground(self._default_item_bg_brush)
item.setToolTip("")
# frozen for receiving
item = items[self.Columns.REMOTE_BALANCE]
if chan.is_frozen_for_receiving():
item.setBackground(ColorScheme.BLUE.as_color(True))
item.setToolTip(_("This channel is frozen for receiving. It will not be included in invoices."))
else:
item.setBackground(self._default_item_bg_brush)
item.setToolTip("")
def update_can_send(self, lnworker: LNWallet):
msg = _('Can send') + ' ' + self.parent.format_amount(lnworker.num_sats_can_send())\
+ ' ' + self.parent.base_unit() + '; '\
+ _('can receive') + ' ' + self.parent.format_amount(lnworker.num_sats_can_receive())\
+ ' ' + self.parent.base_unit()
self.can_send_label.setText(msg)
self.update_swap_button(lnworker)
def update_swap_button(self, lnworker: LNWallet):
if lnworker.num_sats_can_send() or lnworker.num_sats_can_receive():
self.swap_button.setEnabled(True)
else:
self.swap_button.setEnabled(False)
def get_toolbar(self):
h = QHBoxLayout()
self.can_send_label = QLabel('')
h.addWidget(self.can_send_label)
h.addStretch()
self.swap_button = EnterButton(_('Swap'), self.swap_dialog)
self.swap_button.setToolTip("Have at least one channel to do swaps.")
self.swap_button.setDisabled(True)
self.new_channel_button = EnterButton(_('Open Channel'), self.new_channel_with_warning)
self.new_channel_button.setEnabled(self.parent.wallet.has_lightning())
h.addWidget(self.new_channel_button)
h.addWidget(self.swap_button)
return h
def new_channel_with_warning(self):
lnworker = self.parent.wallet.lnworker
if not lnworker.channels and not lnworker.channel_backups:
warning = _(messages.MSG_LIGHTNING_WARNING)
answer = self.parent.question(
_('Do you want to create your first channel?') + '\n\n' + warning)
if answer:
self.new_channel_dialog()
else:
self.new_channel_dialog()
def statistics_dialog(self):
channel_db = self.parent.network.channel_db
capacity = self.parent.format_amount(channel_db.capacity()) + ' '+ self.parent.base_unit()
d = WindowModalDialog(self.parent, _('Lightning Network Statistics'))
d.setMinimumWidth(400)
vbox = QVBoxLayout(d)
h = QGridLayout()
h.addWidget(QLabel(_('Nodes') + ':'), 0, 0)
h.addWidget(QLabel('{}'.format(channel_db.num_nodes)), 0, 1)
h.addWidget(QLabel(_('Channels') + ':'), 1, 0)
h.addWidget(QLabel('{}'.format(channel_db.num_channels)), 1, 1)
h.addWidget(QLabel(_('Capacity') + ':'), 2, 0)
h.addWidget(QLabel(capacity), 2, 1)
vbox.addLayout(h)
vbox.addLayout(Buttons(OkButton(d)))
d.exec_()
def new_channel_dialog(self):
lnworker = self.parent.wallet.lnworker
d = WindowModalDialog(self.parent, _('Open Channel'))
vbox = QVBoxLayout(d)
if self.parent.network.channel_db:
vbox.addWidget(QLabel(_('Enter Remote Node ID or connection string or invoice')))
remote_nodeid = QLineEdit()
remote_nodeid.setMinimumWidth(700)
suggest_button = QPushButton(d, text=_('Suggest Peer'))
def on_suggest():
self.parent.wallet.network.start_gossip()
nodeid = bh2u(lnworker.suggest_peer() or b'')
if not nodeid:
remote_nodeid.setText("")
remote_nodeid.setPlaceholderText(
"Please wait until the graph is synchronized to 30%, and then try again.")
else:
remote_nodeid.setText(nodeid)
remote_nodeid.repaint() # macOS hack for #6269
suggest_button.clicked.connect(on_suggest)
else:
from electrum.lnworker import hardcoded_trampoline_nodes
vbox.addWidget(QLabel(_('Choose a trampoline node to open a channel with')))
trampolines = hardcoded_trampoline_nodes()
trampoline_names = list(trampolines.keys())
trampoline_combo = QComboBox()
trampoline_combo.addItems(trampoline_names)
trampoline_combo.setCurrentIndex(1)
amount_e = RVNAmountEdit(self.parent.get_decimal_point)
# max button
def spend_max():
amount_e.setFrozen(max_button.isChecked())
if not max_button.isChecked():
return
dummy_nodeid = ecc.GENERATOR.get_public_key_bytes(compressed=True)
make_tx = self.parent.mktx_for_open_channel(funding_sat='!', node_id=dummy_nodeid)
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
max_button.setChecked(False)
amount_e.setFrozen(False)
self.main_window.show_error(str(e))
return
amount = tx.output_value()
amount = min(amount, LN_MAX_FUNDING_SAT)
amount_e.setAmount(amount)
max_button = EnterButton(_("Max"), spend_max)
max_button.setFixedWidth(100)
max_button.setCheckable(True)
clear_button = QPushButton(d, text=_('Clear'))
def on_clear():
amount_e.setText('')
amount_e.setFrozen(False)
amount_e.repaint() # macOS hack for #6269
if self.parent.network.channel_db:
remote_nodeid.setText('')
remote_nodeid.repaint() # macOS hack for #6269
max_button.setChecked(False)
max_button.repaint() # macOS hack for #6269
clear_button.clicked.connect(on_clear)
clear_button.setFixedWidth(100)
h = QGridLayout()
if self.parent.network.channel_db:
h.addWidget(QLabel(_('Remote Node ID')), 0, 0)
h.addWidget(remote_nodeid, 0, 1, 1, 4)
h.addWidget(suggest_button, 0, 5)
else:
h.addWidget(QLabel(_('Trampoline')), 0, 0)
h.addWidget(trampoline_combo, 0, 1, 1, 4)
h.addWidget(QLabel('Amount'), 2, 0)
h.addWidget(amount_e, 2, 1)
h.addWidget(max_button, 2, 2)
h.addWidget(clear_button, 2, 3)
vbox.addLayout(h)
vbox.addStretch()
ok_button = OkButton(d)
ok_button.setDefault(True)
vbox.addLayout(Buttons(CancelButton(d), ok_button))
if not d.exec_():
return
if max_button.isChecked() and amount_e.get_amount() < LN_MAX_FUNDING_SAT:
# if 'max' enabled and amount is strictly less than max allowed,
# that means we have fewer coins than max allowed, and hence we can
# spend all coins
funding_sat = '!'
else:
funding_sat = amount_e.get_amount()
if self.parent.network.channel_db:
connect_str = str(remote_nodeid.text()).strip()
else:
name = trampoline_names[trampoline_combo.currentIndex()]
connect_str = str(trampolines[name])
if not connect_str or not funding_sat:
return
self.parent.open_channel(connect_str, funding_sat, 0)
def swap_dialog(self):
from .swap_dialog import SwapDialog
d = SwapDialog(self.parent)
d.run()
class ChannelFeature(ABC):
def __init__(self):
self.rect = QRect()
@abstractmethod
def tooltip(self) -> str:
pass
@abstractmethod
def icon(self) -> QIcon:
pass
class ChanFeatChannel(ChannelFeature):
def tooltip(self) -> str:
return _("This is a channel")
def icon(self) -> QIcon:
return read_QIcon("lightning")
class ChanFeatBackup(ChannelFeature):
def tooltip(self) -> str:
return _("This is a static channel backup")
def icon(self) -> QIcon:
return read_QIcon("lightning_disconnected")
class ChanFeatTrampoline(ChannelFeature):
def tooltip(self) -> str:
return _("The channel peer can route Trampoline payments.")
def icon(self) -> QIcon:
return read_QIcon("kangaroo")
class ChanFeatNoOnchainBackup(ChannelFeature):
def tooltip(self) -> str:
return _("This channel cannot be recovered from your seed. You must back it up manually.")
def icon(self) -> QIcon:
return read_QIcon("nocloud")
class ChannelFeatureIcons:
ICON_SIZE = QSize(16, 16)
def __init__(self, features: Sequence['ChannelFeature']):
self.features = features
@classmethod
def from_channel(cls, chan: AbstractChannel) -> 'ChannelFeatureIcons':
feats = []
if chan.is_backup():
feats.append(ChanFeatBackup())
if chan.is_imported:
feats.append(ChanFeatNoOnchainBackup())
else:
feats.append(ChanFeatChannel())
if chan.lnworker.is_trampoline_peer(chan.node_id):
feats.append(ChanFeatTrampoline())
if not chan.has_onchain_backup():
feats.append(ChanFeatNoOnchainBackup())
return ChannelFeatureIcons(feats)
def paint(self, painter: QPainter, rect: QRect) -> None:
painter.save()
cur_x = rect.x()
for feat in self.features:
icon_rect = QRect(cur_x, rect.y(), self.ICON_SIZE.width(), self.ICON_SIZE.height())
feat.rect = icon_rect
if rect.contains(icon_rect): # stay inside parent
painter.drawPixmap(icon_rect, feat.icon().pixmap(self.ICON_SIZE))
cur_x += self.ICON_SIZE.width() + 1
painter.restore()
def sizeHint(self, default_size: QSize) -> QSize:
if not self.features:
return default_size
width = len(self.features) * (self.ICON_SIZE.width() + 1)
return QSize(width, default_size.height())
def show_tooltip(self, evt: QHelpEvent) -> bool:
assert isinstance(evt, QHelpEvent)
for feat in self.features:
if feat.rect.contains(evt.pos()):
QToolTip.showText(evt.globalPos(), feat.tooltip())
break
else:
QToolTip.hideText()
evt.ignore()
return True
| 44.605172
| 181
| 0.638978
|
e9326e27c1985656bac32da4ed08d70ca574b301
| 1,017
|
py
|
Python
|
src/temporal/PlotSZA.py
|
scottaiton/gemini3d
|
21f59aabb4d842d587dbeba428ba7353905e063e
|
[
"Apache-2.0"
] | 22
|
2020-09-17T05:08:59.000Z
|
2022-02-24T18:22:06.000Z
|
src/temporal/PlotSZA.py
|
scottaiton/gemini3d
|
21f59aabb4d842d587dbeba428ba7353905e063e
|
[
"Apache-2.0"
] | 52
|
2020-07-23T13:55:59.000Z
|
2022-03-19T13:59:43.000Z
|
src/temporal/PlotSZA.py
|
scottaiton/gemini3d
|
21f59aabb4d842d587dbeba428ba7353905e063e
|
[
"Apache-2.0"
] | 11
|
2020-08-20T12:07:06.000Z
|
2021-12-14T13:03:04.000Z
|
import astropy.coordinates as ac
import astropy
import astropy.units as u
from argparse import ArgumentParser
def solarzenithangle(t, glat, glon, alt_m):
"""
Parameters
----------
t : datetime
time of observation
glat : float
latitude
glon : float
longitude
alt_m : float
observer altitude [meters]
Returns
-------
sza : float
solar zenith angle [degrees]
"""
obs = ac.EarthLocation(lat=glat * u.deg, lon=glon * u.deg, height=alt_m * u.m)
times = astropy.time.Time(t, scale="ut1")
sun = ac.get_sun(times)
sunobs = sun.transform_to(ac.AltAz(obstime=times, location=obs))
return 90.0 - sunobs.alt.degree
if __name__ == "__main__":
p = ArgumentParser()
p.add_argument("time", help="UTC")
p.add_argument("latlon", nargs=2, type=float)
p.add_argument("altitude", help="meters", type=float)
a = p.parse_args()
sza = solarzenithangle(a.time, a.latlon[0], a.latlon[1], a.altitude)
print(sza)
| 22.108696
| 82
| 0.631268
|
4b8ec0567a2469c2836fb326882a543759b65a15
| 814
|
py
|
Python
|
peon/tests/test_commandline/test_project_tree/test_inspect.py
|
roch1990/peon
|
0e9e40956c05138c0820fe380b354fdd1fe95e01
|
[
"MIT"
] | 32
|
2020-05-18T14:02:59.000Z
|
2022-02-06T15:00:12.000Z
|
peon/tests/test_commandline/test_project_tree/test_inspect.py
|
roch1990/peon
|
0e9e40956c05138c0820fe380b354fdd1fe95e01
|
[
"MIT"
] | 42
|
2020-05-22T20:29:08.000Z
|
2021-03-10T21:24:23.000Z
|
peon/tests/test_commandline/test_project_tree/test_inspect.py
|
roch1990/peon
|
0e9e40956c05138c0820fe380b354fdd1fe95e01
|
[
"MIT"
] | 4
|
2020-07-02T06:32:42.000Z
|
2022-01-24T22:46:02.000Z
|
import pytest
from peon.src.comandline.project_tree import ProjectTree
from peon.tests.utils import TestProjectTree
def test_inspect_success():
assert list(
set(
ProjectTree(
path_to_project=[f'{TestProjectTree().pythonpath}/tests/fixtures'],
).inspect(),
) - set([
f'{TestProjectTree().pythonpath}/tests/fixtures/dummy_code.py',
f'{TestProjectTree().pythonpath}/tests/fixtures/__init__.py',
f'{TestProjectTree().pythonpath}/tests/fixtures/dummy_folder/__init__.py',
]),
) == []
def test_inspect_type_error():
with pytest.raises(TypeError):
assert ProjectTree(None).inspect()
def test_inspect_wrong_path():
assert ProjectTree(['atatat/tratata']).inspect() == []
| 29.071429
| 93
| 0.632678
|
7d30cb6381cf8d5ddfeb26d6aa4375bb4502c0a3
| 10,133
|
py
|
Python
|
preprocessy/resampling/_split.py
|
RiyaGupta99/preprocessy
|
1cccf56e96f95394e939ea9aa2751857c071af75
|
[
"MIT"
] | null | null | null |
preprocessy/resampling/_split.py
|
RiyaGupta99/preprocessy
|
1cccf56e96f95394e939ea9aa2751857c071af75
|
[
"MIT"
] | null | null | null |
preprocessy/resampling/_split.py
|
RiyaGupta99/preprocessy
|
1cccf56e96f95394e939ea9aa2751857c071af75
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from ..utils import num_of_samples
class Split:
"""Class for resampling and splitting input data"""
def __init__(self):
self.df = None
self.train_df = None
self.target_label = None
self.train_y = None
self.test_df = None
self.test_y = None
self.test_size = None
self.train_size = None
self.random_state = 69
def __repr__(self):
return f"Split(test_size={self.test_size}, train_size={self.train_size}, random_state={self.random_state})"
def __validate_input(self):
"""Function to validate inputs received by train_test_split
Parameters
----------
X : pandas.core.frames.DataFrame
Input dataframe, may or may not consist of the target label.
y : pandas.core.series.Series
Target label series. If None then X consists target label
test_size : float or int
Size of test set after splitting. Can take values from 0 - 1 for float point values,
0 - Number of samples for integer values. Is complementary to train size.
train_size : float or int
Size of train set after splitting. Can take values from 0 - 1 for float point values,
0 - Number of samples for integer values. Is complementary to test size.
random_state : int
Seeding to be provided for shuffling before splitting.
Returns
-------
train_size: float or int
Returns default value of 0.7 if not provided any value.
test_size: float or int
Returns default value of 0.3 if not provided any value.
"""
if self.train_df is None:
raise ValueError("Feature dataframe should not be of None")
if not isinstance(self.train_df, pd.core.frame.DataFrame):
raise TypeError(
"Feature dataframe is not a valid dataframe.\nExpected object"
" type: pandas.core.frame.DataFrame"
)
n_samples = num_of_samples(self.train_df)
if self.train_y is not None:
if n_samples != self.train_y.shape[0]:
raise ValueError(
"Number of samples of target label and feature dataframe"
" unequal.\nSamples in feature dataframe:"
f" {self.X.shape[0]}\nSamples in target label: {self.y.shape[0]}"
)
if not isinstance(self.train_y, pd.core.series.Series):
raise TypeError(
"Target label is not a valid dataframe.\nExpected object"
" type: pandas.core.series.Series"
)
if self.test_size and self.train_size:
if not isinstance(self.test_size, int) or not isinstance(
self.test_size, float
):
raise TypeError("test_size must be of type int or float")
if not isinstance(self.train_size, int) or not isinstance(
self.train_size, float
):
raise TypeError("train_size must be of type int or float")
if not isinstance(self.test_size, self.train_size):
raise TypeError(
"Data types of test_size and train_size do not"
f" match.\ntest_size: {type(self.test_size)}.\ntrain_size:"
f" {type(self.train_size)}"
)
if (
isinstance(self.test_size, float)
and self.test_size + self.train_size != 1
):
raise ValueError("test_size + train_size should be equal to 1")
elif (
isinstance(self.test_size, int)
and self.test_size + self.train_size != n_samples
):
raise ValueError(
"test_size + train_size not equal to number of samples"
)
elif self.test_size:
if isinstance(self.test_size, float) and (
self.test_size < 0 or self.test_size > 1
):
raise ValueError("test_size should be between 0 and 1")
if isinstance(self.test_size, int) and (
self.test_size < 0 or self.test_size > n_samples
):
raise ValueError(
f"test_size should be between 0 and {n_samples}"
)
self.train_size = (
1 - self.test_size
if isinstance(self.test_size, float)
else n_samples - self.test_size
)
elif self.train_size:
if isinstance(self.train_size, float) and (
self.train_size < 0 or self.train_size > 1
):
raise ValueError("train_size should be between 0 and 1")
if isinstance(self.train_size, int) and (
self.train_size < 0 or self.train_size > n_samples
):
raise ValueError(
f"train_size should be between 0 and {n_samples}"
)
self.test_size = (
1 - self.train_size
if isinstance(self.train_size, float)
else n_samples - self.train_size
)
else:
if self.train_y is None:
self.test_size = 0.2
self.train_size = 0.8
else:
features = len(self.train_df.columns)
self.test_size = float(1 / np.sqrt(features))
self.train_size = 1 - self.test_size
if not isinstance(self.random_state, int):
raise TypeError("random_state should be of type int")
def train_test_split(self, params):
"""Performs train test split on the input data
:param train_df: Input dataframe, may or may not consist of the target label.
Should not be ``None``
:type train_df: pandas.core.frames.DataFrame
:param test_df: Input dataframe, may or may not consist of the target label.
Should not be ``None``
:type test_df: pandas.core.frames.DataFrame
:param target_label: Name of the Target Column.
:type target_label: str
:param test_size: Size of test set after splitting. Can take values from
0 - 1 for float point values, 0 - Number of samples for
integer values. Is complementary to train size.
:type test_size: float, int
:param train_size: Size of train set after splitting. Can take values from
0 - 1 for float point values, 0 - Number of samples for
integer values. Is complementary to test size.
:type train_size: float, int
:param random_state: Seeding to be provided for shuffling before splitting.
:type random_state: int
The functions inserts the following into ``params`` -
If target label is provided
- **X_train** : pandas.core.frames.DataFrame
- **y_train** : pandas.core.series.Series
- **X_test** : pandas.core.frames.DataFrame
- **y_test** : pandas.core.series.Series
Else
- **train**: pandas.core.frames.DataFrame
- **test**: pandas.core.frames.DataFrame
:raises ValueError: If the target column does not have a ``name`` property
``ValueError`` is raised.
"""
if "train_df" in params.keys():
self.train_df = params["train_df"]
if "test_df" in params.keys():
self.test_df = params["test_df"]
if "target_label" in params.keys():
self.target_label = params["target_label"]
if "test_size" in params.keys():
self.test_size = params["test_size"]
if "train_size" in params.keys():
self.train_size = params["train_size"]
if "random_state" in params.keys():
self.random_state = params["random_state"]
if self.target_label:
self.train_y = self.train_df[self.target_label]
if self.test_df is not None:
self.test_y = self.test_df[self.target_label]
self.__validate_input()
if self.test_df is not None and self.test_y is not None:
params["X_train"] = self.train_df
params["X_test"] = self.test_df
params["y_train"] = self.train_y
params["y_test"] = self.test_y
elif self.test_df is not None:
params["X_train"] = self.train_df
params["X_test"] = self.test_df
else:
np.random.seed(self.random_state)
if self.train_y is not None:
self.df = pd.concat([self.train_df, self.train_y], axis=1)
else:
self.df = self.train_df
self.df = self.df.iloc[
np.random.permutation(len(self.df))
].reset_index(drop=True)
if isinstance(self.test_size, float):
index = int(self.test_size * len(self.df))
train = self.df.iloc[index:]
test = self.df.iloc[:index]
else:
train = self.df.iloc[self.test_size :]
test = self.df.iloc[: self.test_size]
if self.train_y is not None:
if not self.train_y.name:
raise ValueError(
f"Target column needs to have a name. ${self.train_y.name} was provided."
)
y_train = train[self.train_y.name]
X_train = train.drop([self.train_y.name], axis=1)
y_test = test[self.train_y.name]
X_test = test.drop([self.train_y.name], axis=1)
params["X_train"] = X_train
params["X_test"] = X_test
params["y_train"] = y_train
params["y_test"] = y_test
else:
params["X_train"] = train
params["X_test"] = test
| 37.52963
| 115
| 0.552847
|
f63a2d58eeba59549da507addf1250590db6b23a
| 5,460
|
py
|
Python
|
denise/retrain_market.py
|
DeepRPCA/Denise
|
90d08dbcf03a126567093d99bce98bee0b4f6357
|
[
"Apache-2.0"
] | null | null | null |
denise/retrain_market.py
|
DeepRPCA/Denise
|
90d08dbcf03a126567093d99bce98bee0b4f6357
|
[
"Apache-2.0"
] | null | null | null |
denise/retrain_market.py
|
DeepRPCA/Denise
|
90d08dbcf03a126567093d99bce98bee0b4f6357
|
[
"Apache-2.0"
] | null | null | null |
"""Retrain already trained models on market data"""
import sys
import shutil
from absl import app
from absl import flags
from lsr import algo_tf
from lsr import evaluation
from lsr import market_matrices # pylint: disable=unused-import
from lsr import positive_semidefinite_matrices # pylint: disable=unused-import
from lsr import model_lib_tf
from lsr import evaluation
from lsr.script_prepare_datasets import DIR
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow import keras as k
import numpy as np
FLAGS = flags.FLAGS
flags.DEFINE_integer("N", None, "Size of matrix NxN.")
flags.DEFINE_integer("K", None, "Initial rank of L0.")
flags.DEFINE_integer("forced_rank", None, "Forced rank")
flags.DEFINE_integer("sparsity", None, "Sparsity (%) of S0.")
flags.DEFINE_bool("shrink", False, "Should we shrink while training / eval?")
# Training only:
flags.DEFINE_integer("batch_size", 100, "Batch size (for training).")
flags.DEFINE_float("learning_rate", 1e-3, "Learing rate (training).")
flags.DEFINE_float(
"eps_nn", 1e-6,
"Stops training when diff between 2 loss functions < eps_nn")
flags.DEFINE_string(
'trained_weights_dir_path', None,
'Path to directory where to load weights from.')
flags.DEFINE_integer("epochs", 10, "number epochs to retrain")
def retrain(argv):
# copy weights
shutil.copytree(FLAGS.trained_weights_dir_path, FLAGS.weights_dir_path)
# get dataset
ds_name = "MarketMatricesTrainVal/N{}".format(FLAGS.N)
builder = tfds.builder(ds_name, data_dir=DIR)
builder.download_and_prepare()
ds_train = builder.as_dataset(split="train", shuffle_files=True)
ds_val = builder.as_dataset(split="validation")
def transform(rec):
M = rec['M']
return M, (M, M)
training_ds = ds_train.map(transform).repeat()
val_ds = ds_val.map(transform).repeat()
# losses and metrics
def loss_S(S0, S):
return k.backend.sum(k.backend.abs(S))
def loss_L(L0, L):
del L0 # Unused
del L
return tf.constant(0.)
def sparsity(A, tolerance=0.01):
"""Returns ~% of zeros."""
positives = tf.math.abs(A) > tolerance
non_zeros = tf.cast(tf.math.count_nonzero(positives), tf.float32)
size_A = tf.cast(tf.size(A), tf.float32)
return (size_A - non_zeros) / size_A
def sparsity_metric(y_true, y_pred):
sparsityS = []
for i in range(FLAGS.batch_size):
sparsityS.append(sparsity(y_pred[i]))
return tf.math.reduce_mean(sparsityS)
def ML_metric(y_true, y_pred):
RE_ML = []
for i in range(FLAGS.batch_size):
M_sample, L_sample = y_true[i], y_pred[i]
RE_ML.append(
tf.norm(M_sample - L_sample) / tf.norm(M_sample))
return tf.math.reduce_mean(RE_ML)
# load model
weights_path, model = model_lib_tf.get_model(
FLAGS.N, FLAGS.forced_rank, shrink=False)
my_optimizer = tf.compat.v1.train.AdamOptimizer(FLAGS.learning_rate)
model.compile(optimizer=my_optimizer, loss=[loss_S, loss_L],
metrics=[[sparsity_metric], [ML_metric]])
# training
tboard_logs = algo_tf._get_tb_logdir()
print("TensorBoard logs at %s" % tboard_logs)
# tboard = subprocess.Popen('tensorboard --logdir=%s' % tboard_logs, shell=True)
callbacks = [
k.callbacks.EarlyStopping(monitor='loss', min_delta=FLAGS.eps_nn,
patience=5000),
k.callbacks.ModelCheckpoint(
weights_path, monitor='loss', save_weights_only=True,
save_best_only=True),
k.callbacks.TensorBoard(log_dir=tboard_logs, batch_size=FLAGS.batch_size),
]
training_ds = training_ds.batch(FLAGS.batch_size)
print("training dataset", training_ds)
n = builder.info.splits["train"].num_examples
steps_per_epoch = n // FLAGS.batch_size
print('size:', n, 'steps per epoch:', steps_per_epoch)
n_val = builder.info.splits["validation"].num_examples
steps_per_epoch_val = n_val // FLAGS.batch_size
val_ds = val_ds.batch(FLAGS.batch_size)
print("validation dataset", val_ds)
print('size:', n_val, 'steps per epoch:', steps_per_epoch_val)
# eval before retraining
eval_losses = model.evaluate(x=val_ds, steps=steps_per_epoch_val)
loss_names = model.metrics_names
print("before retraining:")
print(loss_names)
print(eval_losses)
# retrain
history = model.fit(
x=training_ds, epochs=FLAGS.epochs, callbacks=callbacks,
steps_per_epoch=steps_per_epoch, validation_data=val_ds,
validation_steps=steps_per_epoch_val)
history = history.history
ind = np.argmin(history["L_ML_metric"])
print("minimal L_ML_metric (on train set): {}".format(history["L_ML_metric"][ind]))
print("validation errors at minimal training ML-reconstruction loss:")
print("val_S_sparsity_metric: {}, val_L_ML_metric: {}".format(
history["val_S_sparsity_metric"][ind], history["val_L_ML_metric"][ind]))
ind = np.argmax(history["S_sparsity_metric"])
print("max S_sparsity_metric (on train set): {}".format(history["S_sparsity_metric"][ind]))
print("validation errors at max training S_sparsity_metric:")
print("val_S_sparsity_metric: {}, val_L_ML_metric: {}".format(
history["val_S_sparsity_metric"][ind], history["val_L_ML_metric"][ind]))
if __name__ == '__main__':
app.run(retrain)
| 36.4
| 95
| 0.689194
|
fc5f94d0c66a455674d38876821ec3ed0c32ccb4
| 16,314
|
py
|
Python
|
src/apps/dailytrans/reports/last5yearsreport.py
|
COAStatistics/aprp
|
8b06116a32001b040868a3cfa44e7d1f3bfb4742
|
[
"MIT"
] | 2
|
2020-07-11T23:20:54.000Z
|
2021-07-14T03:15:28.000Z
|
src/apps/dailytrans/reports/last5yearsreport.py
|
COAStatistics/aprp
|
8b06116a32001b040868a3cfa44e7d1f3bfb4742
|
[
"MIT"
] | 38
|
2018-09-26T15:11:34.000Z
|
2022-03-18T08:05:02.000Z
|
src/apps/dailytrans/reports/last5yearsreport.py
|
COAStatistics/aprp
|
8b06116a32001b040868a3cfa44e7d1f3bfb4742
|
[
"MIT"
] | 6
|
2018-08-24T05:50:32.000Z
|
2019-03-12T01:22:44.000Z
|
import pandas as pd
import numpy as np
import psycopg2
from datetime import date
from sqlalchemy import create_engine
from _pydecimal import Decimal, Context, ROUND_HALF_UP
from django.conf import settings
from apps.configs.models import AbstractProduct, Last5YearsItems
from functools import reduce
import operator
import logging
db_logger = logging.getLogger('aprp')
#連結資料庫相關設定
myDBname = settings.DATABASES['default']['NAME']
my_uesrname = settings.DATABASES['default']['USER']
my_pwd = settings.DATABASES['default']['PASSWORD']
my_host = settings.DATABASES['default']['HOST']
my_port = settings.DATABASES['default']['PORT']
con = psycopg2.connect(database=myDBname, user=my_uesrname, password=my_pwd, host=my_host, port=my_port)
engine = create_engine('postgresql://'+my_uesrname+':'+my_pwd+'@'+my_host+':'+str(my_port)+'/'+myDBname, echo=False)
class Last5YearsReportFactory(object):
def __init__(self, product_id, source, is_hogs=False, is_rams=False):
self.product_id = product_id
self.source = source
self.today = date.today()
self.today_year = self.today.year
self.today_month = self.today.month
self.last_5_years_ago = self.today_year - 5
self.last_year = self.today_year - 1
self.is_hogs = is_hogs
self.is_rams = is_rams
def get_table(self):
#查詢品項五年內所有日期的數據總表
# 父類品項中尋找各子類品項(以下程式碼從 dashboard/utils.py/product_selector_base_extra_context 節錄過來修改)
product_qs = AbstractProduct.objects.filter(id__in=self.product_id)
products = product_qs.exclude(track_item=False)
if product_qs.filter(track_item=False):
sub_products = reduce(
operator.or_,
(product.children().filter(track_item=True) for product in product_qs.filter(track_item=False))
)
products = products | sub_products
if product_qs.first().track_item is False and product_qs.first().config.id == 13:
products = product_qs
#(以上程式碼從 dashboard/utils.py/product_selector_base_extra_context 節錄過來修改)
self.all_product_id_list = [i.id for i in products]
all_date_list = [f'{self.last_5_years_ago}-01-01',self.today.strftime("%Y-%m-%d")]
table = pd.read_sql_query("select product_id, source_id, avg_price, avg_weight, volume, date from dailytrans_dailytran INNER JOIN unnest(%(all_product_id_list)s) as pid ON pid=dailytrans_dailytran.product_id where ((date between %(all_date_list00)s and %(all_date_list01)s))", params={'all_product_id_list':self.all_product_id_list,'all_date_list00':all_date_list[0],'all_date_list01':all_date_list[1]},con=engine)
table['date'] = pd.to_datetime(table['date'], format='%Y-%m-%d')
return table
def result(self,table):
source_list = self.source
product_data_dict = {}
avgprice_dict = {}
avgvolume_dict = {}
avgweight_dict = {}
avgvolumeweight_dict = {}
has_volume = False
has_weight = False
# 近五年各月數據
for y in range(self.last_5_years_ago,self.today_year+1):
avgprice_month_list = []
avgvolume_month_list = []
avgweight_month_list = []
avgvolumeweight_month_list = []
end_month = 13
if y == self.today_year:
end_month = self.today_month + 1
for m in range(1,end_month):
if source_list:
one_month_data = table[(pd.to_datetime(table['date']).dt.year == y) & (pd.to_datetime(table['date']).dt.month == m) ].query("source_id == @source_list")
else:
if self.is_hogs: #毛豬(規格豬)計算需排除澎湖市場
one_month_data = table[(pd.to_datetime(table['date']).dt.year == y) & (pd.to_datetime(table['date']).dt.month == m) ].query("source_id != 40050")
else:
one_month_data = table[(pd.to_datetime(table['date']).dt.year == y) & (pd.to_datetime(table['date']).dt.month == m) ]
if one_month_data['avg_price'].any():
has_volume = one_month_data['volume'].notna().sum() / one_month_data['avg_price'].count() > 0.8
has_weight = one_month_data['avg_weight'].notna().sum() / one_month_data['avg_price'].count() > 0.8
else:
has_volume = False
has_weight = False
if has_volume and has_weight:
avgprice=(one_month_data['avg_price']*one_month_data['avg_weight']*one_month_data['volume']).sum()/(one_month_data['avg_weight']*one_month_data['volume']).sum()
avgweight=(one_month_data['avg_weight']*one_month_data['volume']).sum()/(one_month_data['volume']).sum()
if self.is_rams : #羊的交易量,
avgvolume = one_month_data.groupby('date').sum()['volume'].mean()
elif self.is_hogs: #毛豬交易量為頭數
avgvolume = one_month_data.groupby('date').sum()['volume'].mean() / 1000
avgweight = avgweight
avgvolumeweight = (avgweight*avgvolume*1000) / 1000
else:
avgvolume = (one_month_data['avg_weight']*one_month_data['volume']).sum()/(one_month_data['volume']).sum()
elif has_volume:
avgprice=(one_month_data['avg_price']*one_month_data['volume']).sum()/one_month_data['volume'].sum()
avgvolume = one_month_data.groupby('date').sum()['volume'].mean() / 1000
else:
avgprice=one_month_data['avg_price'].mean()
avgvolume = np.nan
avgweight = np.nan
avgprice_month_list.append(float(Context(prec=28, rounding=ROUND_HALF_UP).create_decimal(avgprice)))
if has_volume:
avgvolume_month_list.append(float(Context(prec=28, rounding=ROUND_HALF_UP).create_decimal(avgvolume)))
if self.is_hogs and has_weight:
avgweight_month_list.append(float(Context(prec=28, rounding=ROUND_HALF_UP).create_decimal(avgweight)))
avgvolumeweight_month_list.append(float(Context(prec=28, rounding=ROUND_HALF_UP).create_decimal(avgvolumeweight)))
elif has_weight:
avgweight_month_list.append(float(Context(prec=28, rounding=ROUND_HALF_UP).create_decimal(avgweight)))
avgprice_dict[str(y-1911)+'年'] = avgprice_month_list
if has_volume:
avgvolume_dict[str(y-1911)+'年'] = avgvolume_month_list
if self.is_hogs and has_weight:
avgweight_dict[str(y-1911)+'年'] = avgweight_month_list
avgvolumeweight_dict[str(y-1911)+'年'] = avgvolumeweight_month_list
elif has_weight:
avgweight_dict[str(y-1911)+'年'] = avgweight_month_list
product_data_dict[self.all_product_id_list[0]] = {'avgprice' : avgprice_dict, 'avgvolume' : avgvolume_dict, 'avgweight' : avgweight_dict, 'avgvolumeweight' : avgvolumeweight_dict}
# 近五年平均值
last_5_years_avg_data = {}
last_5_years_avg_data['avgprice'] = {}
last_5_years_avg_data['avgvolume'] = {}
last_5_years_avg_data['avgweight'] = {}
last_5_years_avg_data['avgvolumeweight'] = {}
has_volume = False
has_weight = False
last_5_years_avgprice_list = []
last_5_years_avgvolume_list = []
last_5_years_avgweight_list = []
last_5_years_avgvolumeweight_list = []
avgprice_data = pd.DataFrame()
avgvolume_data = pd.DataFrame()
avgweight_data = pd.DataFrame()
avgvolumeweight_data = pd.DataFrame()
for m in range(1,13):
avgvolume_temp_list = []
if source_list:
last_5_years_onemonth_table = table[(pd.to_datetime(table['date']).dt.year >= self.last_5_years_ago) & (pd.to_datetime(table['date']).dt.year <= self.last_year) & (pd.to_datetime(table['date']).dt.month == m)].query("source_id == @source_list")
else:
if self.is_hogs: #毛豬(規格豬)計算需排除澎湖市場
last_5_years_onemonth_table = table[(pd.to_datetime(table['date']).dt.year >= self.last_5_years_ago) & (pd.to_datetime(table['date']).dt.year <= self.last_year) & (pd.to_datetime(table['date']).dt.month == m)].query("source_id != 40050")
else:
last_5_years_onemonth_table = table[(pd.to_datetime(table['date']).dt.year >= self.last_5_years_ago) & (pd.to_datetime(table['date']).dt.year <= self.last_year) & (pd.to_datetime(table['date']).dt.month == m)]
if last_5_years_onemonth_table['avg_price'].any():
has_volume = last_5_years_onemonth_table['volume'].notna().sum() / last_5_years_onemonth_table['avg_price'].count() > 0.8
has_weight = last_5_years_onemonth_table['avg_weight'].notna().sum() / last_5_years_onemonth_table['avg_price'].count() > 0.8
else:
has_volume = False
has_weight = False
last_5_years_onemonth_table=last_5_years_onemonth_table.copy() #此步驟為避免後續 dataframe 計算過程中出現警告訊息
if has_volume and has_weight:
last_5_years_onemonth_table['pvw'] = last_5_years_onemonth_table['avg_price'] * last_5_years_onemonth_table['volume'] * last_5_years_onemonth_table['avg_weight']
last_5_years_onemonth_table['vw'] = last_5_years_onemonth_table['volume'] * last_5_years_onemonth_table['avg_weight']
one_month_avgprice = last_5_years_onemonth_table.groupby('date')['pvw'].sum()/last_5_years_onemonth_table.groupby('date')['vw'].sum()
one_month_avgweight = last_5_years_onemonth_table.groupby('date')['vw'].sum()/last_5_years_onemonth_table.groupby('date')['volume'].sum()
one_month_sumvolume = last_5_years_onemonth_table.groupby('date')['volume'].sum()
avgprice_one_month = (one_month_avgprice*one_month_sumvolume*one_month_avgweight).sum()/(one_month_sumvolume*one_month_avgweight).sum()
avgweight_one_month = (one_month_sumvolume*one_month_avgweight).sum()/(one_month_sumvolume).sum()
last_5_years_avg_data['avgprice'][m] = float(Context(prec=28, rounding=ROUND_HALF_UP).create_decimal(avgprice_one_month))
if self.is_rams: #羊的交易量,
last_5_years_avg_data['avgvolume'][m] = last_5_years_onemonth_table.groupby('date').sum()['volume'].mean()
last_5_years_avg_data['avgweight'][m] = float(Context(prec=28, rounding=ROUND_HALF_UP).create_decimal(avgweight_one_month))
elif self.is_hogs: #毛豬交易量為頭數
last_5_years_avg_data['avgvolume'][m] = last_5_years_onemonth_table.groupby('date').sum()['volume'].mean() / 1000
last_5_years_avg_data['avgweight'][m] = float(Context(prec=28, rounding=ROUND_HALF_UP).create_decimal(avgweight_one_month))
one_month_avgvolumeweight = (last_5_years_onemonth_table.groupby('date')['vw'].sum()).mean() / 1000
last_5_years_avg_data['avgvolumeweight'][m] = float(Context(prec=28, rounding=ROUND_HALF_UP).create_decimal(one_month_avgvolumeweight))
last_5_years_avgvolumeweight_list.append(last_5_years_avg_data['avgvolumeweight'][m])
else:
last_5_years_avg_data['avgvolume'][m] = (last_5_years_onemonth_table['avg_weight']*last_5_years_onemonth_table['volume']).sum()/(last_5_years_onemonth_table['volume']).sum()
last_5_years_avgprice_list.append(last_5_years_avg_data['avgprice'][m])
last_5_years_avgvolume_list.append(last_5_years_avg_data['avgvolume'][m])
last_5_years_avgweight_list.append(last_5_years_avg_data['avgweight'][m])
elif has_volume:
#平均價
last_5_years_onemonth_table['pv']=last_5_years_onemonth_table['avg_price'] * last_5_years_onemonth_table['volume']
one_month_avgprice = last_5_years_onemonth_table.groupby('date')['pv'].sum()/last_5_years_onemonth_table.groupby('date')['volume'].sum()
one_month_sumvolume = last_5_years_onemonth_table.groupby('date').sum()['volume'].values
avgprice_one_month = (one_month_avgprice*one_month_sumvolume).sum()/one_month_sumvolume.sum()
last_5_years_avg_data['avgprice'][m] = float(Context(prec=28, rounding=ROUND_HALF_UP).create_decimal(avgprice_one_month))
last_5_years_avgprice_list.append(last_5_years_avg_data['avgprice'][m])
#平均量
last_5_years_avgvolume_month = last_5_years_onemonth_table.groupby('date').sum()['volume'].values
for j in last_5_years_avgvolume_month:
avgvolume_temp_list.append(float(Context(prec=28, rounding=ROUND_HALF_UP).create_decimal(j)))
last_5_years_avg_data['avgvolume'][m] = sum(avgvolume_temp_list) / len(avgvolume_temp_list) / 1000
last_5_years_avgvolume_list.append(last_5_years_avg_data['avgvolume'][m])
else:
if last_5_years_onemonth_table.groupby('date').mean()['avg_price'].values.any():
one_month_avgprice = last_5_years_onemonth_table.groupby('date').mean()['avg_price'].values.mean()
last_5_years_avg_data['avgprice'][m] = one_month_avgprice
last_5_years_avgprice_list.append(last_5_years_avg_data['avgprice'][m])
has_price = True
else:
last_5_years_avgprice_list.append(np.nan)
has_price = False
# 為避免list對應月份數量錯誤,缺少數值的月份補空值
last_5_years_avgvolume_list.append(np.nan)
last_5_years_avgweight_list.append(np.nan)
last_5_years_avgvolumeweight_list.append(np.nan)
columns_name = ['1月', '2月', '3月', '4月', '5月', '6月', '7月', '8月', '9月', '10月', '11月', '12月']
avgprice_data = pd.DataFrame.from_dict(product_data_dict[self.all_product_id_list[0]]['avgprice'], orient='index')
avgprice_data.columns = columns_name
avgprice_data.loc['近五年平均'] = last_5_years_avgprice_list
avgprice_data = avgprice_data.round(1)
if has_volume:
avgvolume_data = pd.DataFrame.from_dict(product_data_dict[self.all_product_id_list[0]]['avgvolume'], orient='index')
avgvolume_data.columns = columns_name
avgvolume_data.loc['近五年平均'] = last_5_years_avgvolume_list
avgvolume_data = avgvolume_data.round(1)
if self.is_hogs and has_weight:
avgweight_data = pd.DataFrame.from_dict(product_data_dict[self.all_product_id_list[0]]['avgweight'], orient='index')
avgweight_data.columns = columns_name
avgweight_data.loc['近五年平均'] = last_5_years_avgweight_list
avgweight_data = avgweight_data.round(1)
avgvolumeweight_data = pd.DataFrame.from_dict(product_data_dict[self.all_product_id_list[0]]['avgvolumeweight'], orient='index')
avgvolumeweight_data.columns = columns_name
avgvolumeweight_data.loc['近五年平均'] = last_5_years_avgvolumeweight_list
avgvolumeweight_data = avgvolumeweight_data.round(1)
elif has_weight:
avgweight_data = pd.DataFrame.from_dict(product_data_dict[self.all_product_id_list[0]]['avgweight'], orient='index')
avgweight_data.columns = columns_name
avgweight_data.loc['近五年平均'] = last_5_years_avgweight_list
avgweight_data = avgweight_data.round(1)
return avgprice_data, avgvolume_data, avgweight_data, avgvolumeweight_data
def __call__(self):
#獲取完整交易表
table = self.get_table()
if not table.empty:
return self.result(table)
else:
db_logger.error(f'DB query error : product_id_list = {self.all_product_id_list}; source_list = {self.source}', extra={'type_code': 'LOT-last5yearsreport'})
| 58.264286
| 422
| 0.642761
|
6644c14a3ad8f6acdaad9f56536c7d2f6e01c491
| 1,035
|
py
|
Python
|
python3/tests/test_merge_k_sorted_lists.py
|
qianbinbin/leetcode
|
915cecab0c940cd13847683ec55b17b77eb0f39b
|
[
"MIT"
] | 4
|
2018-03-05T02:27:16.000Z
|
2021-03-15T14:19:44.000Z
|
python3/tests/test_merge_k_sorted_lists.py
|
qianbinbin/leetcode
|
915cecab0c940cd13847683ec55b17b77eb0f39b
|
[
"MIT"
] | null | null | null |
python3/tests/test_merge_k_sorted_lists.py
|
qianbinbin/leetcode
|
915cecab0c940cd13847683ec55b17b77eb0f39b
|
[
"MIT"
] | 2
|
2018-07-22T10:32:10.000Z
|
2018-10-20T03:14:28.000Z
|
from unittest import TestCase
from leetcodepy.merge_k_sorted_lists import *
from leetcodepy.utils import linked_lists
SOLUTION1 = Solution1()
SOLUTION2 = Solution2()
def LISTS1():
return [linked_lists.from_values(1, 4, 5), linked_lists.from_values(1, 3, 4), linked_lists.from_values(2, 6)]
EXPECTED1 = linked_lists.from_values(1, 1, 2, 3, 4, 4, 5, 6)
LISTS2 = []
EXPECTED2 = None
LISTS3 = [None]
EXPECTED3 = None
class TestMergeKSortedLists(TestCase):
def test1(self):
self.assertTrue(linked_lists.equals(EXPECTED1, SOLUTION1.mergeKLists(LISTS1())))
self.assertTrue(linked_lists.equals(EXPECTED2, SOLUTION1.mergeKLists(LISTS2)))
self.assertTrue(linked_lists.equals(EXPECTED3, SOLUTION1.mergeKLists(LISTS3)))
def test2(self):
self.assertTrue(linked_lists.equals(EXPECTED1, SOLUTION2.mergeKLists(LISTS1())))
self.assertTrue(linked_lists.equals(EXPECTED2, SOLUTION2.mergeKLists(LISTS2)))
self.assertTrue(linked_lists.equals(EXPECTED3, SOLUTION2.mergeKLists(LISTS3)))
| 31.363636
| 113
| 0.74686
|
449d2506d04fa2a56fb2cafe4c4e521ada190706
| 15,601
|
py
|
Python
|
homeassistant/components/homekit/util.py
|
bannhead/core
|
b113984875381b486ef6722080d992db254559bf
|
[
"Apache-2.0"
] | 1
|
2021-09-11T19:53:41.000Z
|
2021-09-11T19:53:41.000Z
|
homeassistant/components/homekit/util.py
|
bannhead/core
|
b113984875381b486ef6722080d992db254559bf
|
[
"Apache-2.0"
] | 47
|
2020-07-23T07:14:33.000Z
|
2022-03-31T06:01:46.000Z
|
homeassistant/components/homekit/util.py
|
bannhead/core
|
b113984875381b486ef6722080d992db254559bf
|
[
"Apache-2.0"
] | 1
|
2020-12-21T22:15:32.000Z
|
2020-12-21T22:15:32.000Z
|
"""Collection of useful functions for the HomeKit component."""
from collections import OrderedDict, namedtuple
import io
import ipaddress
import logging
import os
import re
import secrets
import socket
import pyqrcode
import voluptuous as vol
from homeassistant.components import binary_sensor, fan, media_player, sensor
from homeassistant.const import (
ATTR_CODE,
ATTR_SUPPORTED_FEATURES,
CONF_NAME,
CONF_TYPE,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant, split_entity_id
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.storage import STORAGE_DIR
import homeassistant.util.temperature as temp_util
from .const import (
AUDIO_CODEC_COPY,
AUDIO_CODEC_OPUS,
CONF_AUDIO_CODEC,
CONF_AUDIO_MAP,
CONF_AUDIO_PACKET_SIZE,
CONF_FEATURE,
CONF_FEATURE_LIST,
CONF_LINKED_BATTERY_CHARGING_SENSOR,
CONF_LINKED_BATTERY_SENSOR,
CONF_LINKED_HUMIDITY_SENSOR,
CONF_LINKED_MOTION_SENSOR,
CONF_LOW_BATTERY_THRESHOLD,
CONF_MAX_FPS,
CONF_MAX_HEIGHT,
CONF_MAX_WIDTH,
CONF_STREAM_ADDRESS,
CONF_STREAM_COUNT,
CONF_STREAM_SOURCE,
CONF_SUPPORT_AUDIO,
CONF_VIDEO_CODEC,
CONF_VIDEO_MAP,
CONF_VIDEO_PACKET_SIZE,
DEFAULT_AUDIO_CODEC,
DEFAULT_AUDIO_MAP,
DEFAULT_AUDIO_PACKET_SIZE,
DEFAULT_LOW_BATTERY_THRESHOLD,
DEFAULT_MAX_FPS,
DEFAULT_MAX_HEIGHT,
DEFAULT_MAX_WIDTH,
DEFAULT_STREAM_COUNT,
DEFAULT_SUPPORT_AUDIO,
DEFAULT_VIDEO_CODEC,
DEFAULT_VIDEO_MAP,
DEFAULT_VIDEO_PACKET_SIZE,
DOMAIN,
FEATURE_ON_OFF,
FEATURE_PLAY_PAUSE,
FEATURE_PLAY_STOP,
FEATURE_TOGGLE_MUTE,
HOMEKIT_FILE,
HOMEKIT_PAIRING_QR,
HOMEKIT_PAIRING_QR_SECRET,
TYPE_FAUCET,
TYPE_OUTLET,
TYPE_SHOWER,
TYPE_SPRINKLER,
TYPE_SWITCH,
TYPE_VALVE,
VIDEO_CODEC_COPY,
VIDEO_CODEC_H264_OMX,
VIDEO_CODEC_LIBX264,
)
_LOGGER = logging.getLogger(__name__)
MAX_PORT = 65535
VALID_VIDEO_CODECS = [VIDEO_CODEC_LIBX264, VIDEO_CODEC_H264_OMX, AUDIO_CODEC_COPY]
VALID_AUDIO_CODECS = [AUDIO_CODEC_OPUS, VIDEO_CODEC_COPY]
BASIC_INFO_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_LINKED_BATTERY_SENSOR): cv.entity_domain(sensor.DOMAIN),
vol.Optional(CONF_LINKED_BATTERY_CHARGING_SENSOR): cv.entity_domain(
binary_sensor.DOMAIN
),
vol.Optional(
CONF_LOW_BATTERY_THRESHOLD, default=DEFAULT_LOW_BATTERY_THRESHOLD
): cv.positive_int,
}
)
FEATURE_SCHEMA = BASIC_INFO_SCHEMA.extend(
{vol.Optional(CONF_FEATURE_LIST, default=None): cv.ensure_list}
)
CAMERA_SCHEMA = BASIC_INFO_SCHEMA.extend(
{
vol.Optional(CONF_STREAM_ADDRESS): vol.All(ipaddress.ip_address, cv.string),
vol.Optional(CONF_STREAM_SOURCE): cv.string,
vol.Optional(CONF_AUDIO_CODEC, default=DEFAULT_AUDIO_CODEC): vol.In(
VALID_AUDIO_CODECS
),
vol.Optional(CONF_SUPPORT_AUDIO, default=DEFAULT_SUPPORT_AUDIO): cv.boolean,
vol.Optional(CONF_MAX_WIDTH, default=DEFAULT_MAX_WIDTH): cv.positive_int,
vol.Optional(CONF_MAX_HEIGHT, default=DEFAULT_MAX_HEIGHT): cv.positive_int,
vol.Optional(CONF_MAX_FPS, default=DEFAULT_MAX_FPS): cv.positive_int,
vol.Optional(CONF_AUDIO_MAP, default=DEFAULT_AUDIO_MAP): cv.string,
vol.Optional(CONF_VIDEO_MAP, default=DEFAULT_VIDEO_MAP): cv.string,
vol.Optional(CONF_STREAM_COUNT, default=DEFAULT_STREAM_COUNT): vol.All(
vol.Coerce(int), vol.Range(min=1, max=10)
),
vol.Optional(CONF_VIDEO_CODEC, default=DEFAULT_VIDEO_CODEC): vol.In(
VALID_VIDEO_CODECS
),
vol.Optional(
CONF_AUDIO_PACKET_SIZE, default=DEFAULT_AUDIO_PACKET_SIZE
): cv.positive_int,
vol.Optional(
CONF_VIDEO_PACKET_SIZE, default=DEFAULT_VIDEO_PACKET_SIZE
): cv.positive_int,
vol.Optional(CONF_LINKED_MOTION_SENSOR): cv.entity_domain(binary_sensor.DOMAIN),
}
)
HUMIDIFIER_SCHEMA = BASIC_INFO_SCHEMA.extend(
{vol.Optional(CONF_LINKED_HUMIDITY_SENSOR): cv.entity_domain(sensor.DOMAIN)}
)
CODE_SCHEMA = BASIC_INFO_SCHEMA.extend(
{vol.Optional(ATTR_CODE, default=None): vol.Any(None, cv.string)}
)
MEDIA_PLAYER_SCHEMA = vol.Schema(
{
vol.Required(CONF_FEATURE): vol.All(
cv.string,
vol.In(
(
FEATURE_ON_OFF,
FEATURE_PLAY_PAUSE,
FEATURE_PLAY_STOP,
FEATURE_TOGGLE_MUTE,
)
),
)
}
)
SWITCH_TYPE_SCHEMA = BASIC_INFO_SCHEMA.extend(
{
vol.Optional(CONF_TYPE, default=TYPE_SWITCH): vol.All(
cv.string,
vol.In(
(
TYPE_FAUCET,
TYPE_OUTLET,
TYPE_SHOWER,
TYPE_SPRINKLER,
TYPE_SWITCH,
TYPE_VALVE,
)
),
)
}
)
HOMEKIT_CHAR_TRANSLATIONS = {
0: " ", # nul
10: " ", # nl
13: " ", # cr
33: "-", # !
34: " ", # "
36: "-", # $
37: "-", # %
40: "-", # (
41: "-", # )
42: "-", # *
43: "-", # +
47: "-", # /
58: "-", # :
59: "-", # ;
60: "-", # <
61: "-", # =
62: "-", # >
63: "-", # ?
64: "-", # @
91: "-", # [
92: "-", # \
93: "-", # ]
94: "-", # ^
95: " ", # _
96: "-", # `
123: "-", # {
124: "-", # |
125: "-", # }
126: "-", # ~
127: "-", # del
}
def validate_entity_config(values):
"""Validate config entry for CONF_ENTITY."""
if not isinstance(values, dict):
raise vol.Invalid("expected a dictionary")
entities = {}
for entity_id, config in values.items():
entity = cv.entity_id(entity_id)
domain, _ = split_entity_id(entity)
if not isinstance(config, dict):
raise vol.Invalid(f"The configuration for {entity} must be a dictionary.")
if domain in ("alarm_control_panel", "lock"):
config = CODE_SCHEMA(config)
elif domain == media_player.const.DOMAIN:
config = FEATURE_SCHEMA(config)
feature_list = {}
for feature in config[CONF_FEATURE_LIST]:
params = MEDIA_PLAYER_SCHEMA(feature)
key = params.pop(CONF_FEATURE)
if key in feature_list:
raise vol.Invalid(f"A feature can be added only once for {entity}")
feature_list[key] = params
config[CONF_FEATURE_LIST] = feature_list
elif domain == "camera":
config = CAMERA_SCHEMA(config)
elif domain == "switch":
config = SWITCH_TYPE_SCHEMA(config)
elif domain == "humidifier":
config = HUMIDIFIER_SCHEMA(config)
else:
config = BASIC_INFO_SCHEMA(config)
entities[entity] = config
return entities
def get_media_player_features(state):
"""Determine features for media players."""
features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
supported_modes = []
if features & (
media_player.const.SUPPORT_TURN_ON | media_player.const.SUPPORT_TURN_OFF
):
supported_modes.append(FEATURE_ON_OFF)
if features & (media_player.const.SUPPORT_PLAY | media_player.const.SUPPORT_PAUSE):
supported_modes.append(FEATURE_PLAY_PAUSE)
if features & (media_player.const.SUPPORT_PLAY | media_player.const.SUPPORT_STOP):
supported_modes.append(FEATURE_PLAY_STOP)
if features & media_player.const.SUPPORT_VOLUME_MUTE:
supported_modes.append(FEATURE_TOGGLE_MUTE)
return supported_modes
def validate_media_player_features(state, feature_list):
"""Validate features for media players."""
supported_modes = get_media_player_features(state)
if not supported_modes:
_LOGGER.error("%s does not support any media_player features", state.entity_id)
return False
if not feature_list:
# Auto detected
return True
error_list = []
for feature in feature_list:
if feature not in supported_modes:
error_list.append(feature)
if error_list:
_LOGGER.error(
"%s does not support media_player features: %s", state.entity_id, error_list
)
return False
return True
SpeedRange = namedtuple("SpeedRange", ("start", "target"))
SpeedRange.__doc__ += """ Maps Home Assistant speed \
values to percentage based HomeKit speeds.
start: Start of the range (inclusive).
target: Percentage to use to determine HomeKit percentages \
from HomeAssistant speed.
"""
class HomeKitSpeedMapping:
"""Supports conversion between Home Assistant and HomeKit fan speeds."""
def __init__(self, speed_list):
"""Initialize a new SpeedMapping object."""
if speed_list[0] != fan.SPEED_OFF:
_LOGGER.warning(
"%s does not contain the speed setting "
"%s as its first element. "
"Assuming that %s is equivalent to 'off'",
speed_list,
fan.SPEED_OFF,
speed_list[0],
)
self.speed_ranges = OrderedDict()
list_size = len(speed_list)
for index, speed in enumerate(speed_list):
# By dividing by list_size -1 the following
# desired attributes hold true:
# * index = 0 => 0%, equal to "off"
# * index = len(speed_list) - 1 => 100 %
# * all other indices are equally distributed
target = index * 100 / (list_size - 1)
start = index * 100 / list_size
self.speed_ranges[speed] = SpeedRange(start, target)
def speed_to_homekit(self, speed):
"""Map Home Assistant speed state to HomeKit speed."""
if speed is None:
return None
speed_range = self.speed_ranges[speed]
return round(speed_range.target)
def speed_to_states(self, speed):
"""Map HomeKit speed to Home Assistant speed state."""
for state, speed_range in reversed(self.speed_ranges.items()):
if speed_range.start <= speed:
return state
return list(self.speed_ranges.keys())[0]
def show_setup_message(hass, entry_id, bridge_name, pincode, uri):
"""Display persistent notification with setup information."""
pin = pincode.decode()
_LOGGER.info("Pincode: %s", pin)
buffer = io.BytesIO()
url = pyqrcode.create(uri)
url.svg(buffer, scale=5)
pairing_secret = secrets.token_hex(32)
hass.data[DOMAIN][entry_id][HOMEKIT_PAIRING_QR] = buffer.getvalue()
hass.data[DOMAIN][entry_id][HOMEKIT_PAIRING_QR_SECRET] = pairing_secret
message = (
f"To set up {bridge_name} in the Home App, "
f"scan the QR code or enter the following code:\n"
f"### {pin}\n"
f""
)
hass.components.persistent_notification.create(
message, "HomeKit Bridge Setup", entry_id
)
def dismiss_setup_message(hass, entry_id):
"""Dismiss persistent notification and remove QR code."""
hass.components.persistent_notification.dismiss(entry_id)
def convert_to_float(state):
"""Return float of state, catch errors."""
try:
return float(state)
except (ValueError, TypeError):
return None
def cleanup_name_for_homekit(name):
"""Ensure the name of the device will not crash homekit."""
#
# This is not a security measure.
#
# UNICODE_EMOJI is also not allowed but that
# likely isn't a problem
return name.translate(HOMEKIT_CHAR_TRANSLATIONS)
def temperature_to_homekit(temperature, unit):
"""Convert temperature to Celsius for HomeKit."""
return round(temp_util.convert(temperature, unit, TEMP_CELSIUS), 1)
def temperature_to_states(temperature, unit):
"""Convert temperature back from Celsius to Home Assistant unit."""
return round(temp_util.convert(temperature, TEMP_CELSIUS, unit) * 2) / 2
def density_to_air_quality(density):
"""Map PM2.5 density to HomeKit AirQuality level."""
if density <= 35:
return 1
if density <= 75:
return 2
if density <= 115:
return 3
if density <= 150:
return 4
return 5
def get_persist_filename_for_entry_id(entry_id: str):
"""Determine the filename of the homekit state file."""
return f"{DOMAIN}.{entry_id}.state"
def get_aid_storage_filename_for_entry_id(entry_id: str):
"""Determine the ilename of homekit aid storage file."""
return f"{DOMAIN}.{entry_id}.aids"
def get_persist_fullpath_for_entry_id(hass: HomeAssistant, entry_id: str):
"""Determine the path to the homekit state file."""
return hass.config.path(STORAGE_DIR, get_persist_filename_for_entry_id(entry_id))
def get_aid_storage_fullpath_for_entry_id(hass: HomeAssistant, entry_id: str):
"""Determine the path to the homekit aid storage file."""
return hass.config.path(
STORAGE_DIR, get_aid_storage_filename_for_entry_id(entry_id)
)
def format_sw_version(version):
"""Extract the version string in a format homekit can consume."""
match = re.search(r"([0-9]+)(\.[0-9]+)?(\.[0-9]+)?", str(version).replace("-", "."))
if match:
return match.group(0)
return None
def migrate_filesystem_state_data_for_primary_imported_entry_id(
hass: HomeAssistant, entry_id: str
):
"""Migrate the old paths to the storage directory."""
legacy_persist_file_path = hass.config.path(HOMEKIT_FILE)
if os.path.exists(legacy_persist_file_path):
os.rename(
legacy_persist_file_path, get_persist_fullpath_for_entry_id(hass, entry_id)
)
legacy_aid_storage_path = hass.config.path(STORAGE_DIR, "homekit.aids")
if os.path.exists(legacy_aid_storage_path):
os.rename(
legacy_aid_storage_path,
get_aid_storage_fullpath_for_entry_id(hass, entry_id),
)
def remove_state_files_for_entry_id(hass: HomeAssistant, entry_id: str):
"""Remove the state files from disk."""
persist_file_path = get_persist_fullpath_for_entry_id(hass, entry_id)
aid_storage_path = get_aid_storage_fullpath_for_entry_id(hass, entry_id)
os.unlink(persist_file_path)
if os.path.exists(aid_storage_path):
os.unlink(aid_storage_path)
return True
def _get_test_socket():
"""Create a socket to test binding ports."""
test_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
test_socket.setblocking(False)
test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return test_socket
def port_is_available(port: int):
"""Check to see if a port is available."""
test_socket = _get_test_socket()
try:
test_socket.bind(("", port))
except OSError:
return False
return True
def find_next_available_port(start_port: int):
"""Find the next available port starting with the given port."""
test_socket = _get_test_socket()
for port in range(start_port, MAX_PORT):
try:
test_socket.bind(("", port))
return port
except OSError:
if port == MAX_PORT:
raise
continue
def pid_is_alive(pid):
"""Check to see if a process is alive."""
try:
os.kill(pid, 0)
return True
except OSError:
pass
return False
| 30.35214
| 88
| 0.651753
|
715faa6fd9f0b9020130e843ef5596024c9ea2e8
| 4,954
|
py
|
Python
|
mimic3models/length_of_stay/logistic/main.py
|
PNilayam/CS598_DLH
|
058809856d1ac4d78857679b0880fd7a810ed8e8
|
[
"MIT"
] | null | null | null |
mimic3models/length_of_stay/logistic/main.py
|
PNilayam/CS598_DLH
|
058809856d1ac4d78857679b0880fd7a810ed8e8
|
[
"MIT"
] | null | null | null |
mimic3models/length_of_stay/logistic/main.py
|
PNilayam/CS598_DLH
|
058809856d1ac4d78857679b0880fd7a810ed8e8
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import print_function
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from mimic3benchmark.readers import LengthOfStayReader
from mimic3models import common_utils
from mimic3models.metrics import print_metrics_regression
from mimic3models.length_of_stay.utils import save_results
import os
import numpy as np
import argparse
import json
from tqdm import tqdm
def read_and_extract_features(reader, count, period, features):
read_chunk_size = 1000
Xs = []
ys = []
names = []
ts = []
for i in tqdm(range(0, count, read_chunk_size), desc="Extracting features"):
j = min(count, i + read_chunk_size)
ret = common_utils.read_chunk(reader, j - i)
X = common_utils.extract_features_from_rawdata(ret['X'], ret['header'], period, features)
Xs.append(X)
ys += ret['y']
names += ret['name']
ts += ret['t']
Xs = np.concatenate(Xs, axis=0)
return (Xs, ys, names, ts)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--period', type=str, default='all', help='specifies which period extract features from',
choices=['first4days', 'first8days', 'last12hours', 'first25percent', 'first50percent', 'all'])
parser.add_argument('--features', type=str, default='all', help='specifies what features to extract',
choices=['all', 'len', 'all_but_len'])
parser.add_argument('--data', type=str, help='Path to the data of length-of-stay task',
default=os.path.join(os.path.dirname(__file__), '../../../data/length-of-stay/'))
parser.add_argument('--output_dir', type=str, help='Directory relative which all output files are stored',
default='.')
args = parser.parse_args()
print(args)
train_reader = LengthOfStayReader(dataset_dir=os.path.join(args.data, 'train'),
listfile=os.path.join(args.data, 'train_listfile.csv'))
val_reader = LengthOfStayReader(dataset_dir=os.path.join(args.data, 'train'),
listfile=os.path.join(args.data, 'val_listfile.csv'))
test_reader = LengthOfStayReader(dataset_dir=os.path.join(args.data, 'test'),
listfile=os.path.join(args.data, 'test_listfile.csv'))
print('Reading data and extracting features ...')
n_train = min(100000, train_reader.get_number_of_examples())
n_val = min(100000, val_reader.get_number_of_examples())
(train_X, train_y, train_names, train_ts) = read_and_extract_features(
train_reader, n_train, args.period, args.features)
(val_X, val_y, val_names, val_ts) = read_and_extract_features(
val_reader, n_val, args.period, args.features)
(test_X, test_y, test_names, test_ts) = read_and_extract_features(
test_reader, test_reader.get_number_of_examples(), args.period, args.features)
print('Imputing missing values ...')
imputer = SimpleImputer(missing_values=np.nan, strategy='mean', verbose=0, copy=True)
imputer.fit(train_X)
train_X = np.array(imputer.transform(train_X), dtype=np.float32)
val_X = np.array(imputer.transform(val_X), dtype=np.float32)
test_X = np.array(imputer.transform(test_X), dtype=np.float32)
print('Normalizing the data to have zero mean and unit variance ...')
scaler = StandardScaler()
scaler.fit(train_X)
train_X = scaler.transform(train_X)
val_X = scaler.transform(val_X)
test_X = scaler.transform(test_X)
file_name = "{}.{}".format(args.period, args.features)
linreg = LinearRegression()
linreg.fit(train_X, train_y)
result_dir = os.path.join(args.output_dir, 'results')
common_utils.create_directory(result_dir)
with open(os.path.join(result_dir, 'train_{}.json'.format(file_name)), "w") as res_file:
print("Training metrics")
ret = print_metrics_regression(train_y, linreg.predict(train_X))
ret = {k: float(v) for k, v in ret.items()}
json.dump(ret, res_file)
with open(os.path.join(result_dir, 'val_{}.json'.format(file_name)), 'w') as res_file:
print("Validation metrics")
ret = print_metrics_regression(val_y, linreg.predict(val_X))
ret = {k: float(v) for k, v in ret.items()}
json.dump(ret, res_file)
prediction = linreg.predict(test_X)
with open(os.path.join(result_dir, 'test_{}.json'.format(file_name)), 'w') as res_file:
print("Test metrics")
ret = print_metrics_regression(test_y, prediction)
ret = {k: float(v) for k, v in ret.items()}
json.dump(ret, res_file)
save_results(test_names, test_ts, prediction, test_y,
os.path.join(args.output_dir, 'predictions', file_name + '.csv'))
if __name__ == '__main__':
main()
| 41.630252
| 119
| 0.671175
|
f02e5977f8af4e02720001fe1de67f768a0f95fd
| 2,542
|
py
|
Python
|
surfactant_example/formulation/tests/test_formulation_data_source.py
|
force-h2020/force-bdss-plugin-surfactant-example
|
ba442f2b39919f7d071f4384f8eaba0d99f44b1f
|
[
"BSD-2-Clause",
"MIT"
] | null | null | null |
surfactant_example/formulation/tests/test_formulation_data_source.py
|
force-h2020/force-bdss-plugin-surfactant-example
|
ba442f2b39919f7d071f4384f8eaba0d99f44b1f
|
[
"BSD-2-Clause",
"MIT"
] | null | null | null |
surfactant_example/formulation/tests/test_formulation_data_source.py
|
force-h2020/force-bdss-plugin-surfactant-example
|
ba442f2b39919f7d071f4384f8eaba0d99f44b1f
|
[
"BSD-2-Clause",
"MIT"
] | null | null | null |
from unittest import TestCase
from traits.testing.unittest_tools import UnittestTools
from force_bdss.api import DataValue
from surfactant_example.surfactant_plugin import SurfactantPlugin
from surfactant_example.formulation.formulation_data_source import (
calculate_solvent_conc, MissingIngredientException)
from surfactant_example.tests.probe_classes.probe_ingredients import (
ProbePrimaryIngredient, ProbeSaltIngredient, ProbeSolventIngredient
)
class TestFormulationDataSource(UnittestTools, TestCase):
def setUp(self):
self.plugin = SurfactantPlugin()
self.factory = self.plugin.data_source_factories[6]
self.data_source = self.factory.create_data_source()
def test_basic_function(self):
model = self.factory.create_model()
in_slots = self.data_source.slots(model)[0]
self.assertEqual(5, len(in_slots))
input_values = [
ProbePrimaryIngredient(), 12,
ProbeSaltIngredient(), 1.0,
ProbeSolventIngredient()
]
data_values = [
DataValue(type=slot.type, value=value)
for slot, value in zip(in_slots, input_values)
]
res = self.data_source.run(model, data_values)
self.assertEqual(1, len(res))
self.assertEqual('FORMULATION', res[0].type)
formulation = res[0].value
self.assertEqual(3, len(formulation.ingredients))
self.assertListEqual([12, 1, 87], formulation.concentrations)
def test__check_ingredient_roles(self):
ingredients = [
ProbePrimaryIngredient(),
ProbeSaltIngredient(),
ProbeSolventIngredient()
]
self.assertTrue(
self.data_source._check_ingredient_roles(ingredients)
)
with self.assertRaises(MissingIngredientException):
self.data_source._check_ingredient_roles(
ingredients[:-1])
def test_calculate_solvent_conc(self):
solvent_conc = calculate_solvent_conc(
[10, 3, 6]
)
self.assertEqual(81, solvent_conc)
with self.assertRaises(AssertionError):
calculate_solvent_conc(
[100, 3, 6]
)
with self.assertRaises(AssertionError):
calculate_solvent_conc(
[-100, 3, 6]
)
def test_n_surfactants_slots(self):
model = self.factory.create_model()
with self.assertTraitChanges(model, 'changes_slots'):
model.n_surfactants = 3
| 29.55814
| 71
| 0.656963
|
355abb2572ee40967f7056901a3c3292ffff1885
| 4,047
|
py
|
Python
|
tests/test_web_player.py
|
dachrisch/spotify_sentiment_classifier
|
3365d49b3f63e0140b916e05cade7c04c6ee3094
|
[
"Unlicense"
] | 1
|
2020-05-12T21:38:57.000Z
|
2020-05-12T21:38:57.000Z
|
tests/test_web_player.py
|
dachrisch/spotify_sentiment_classifier
|
3365d49b3f63e0140b916e05cade7c04c6ee3094
|
[
"Unlicense"
] | 1
|
2020-05-12T19:51:21.000Z
|
2020-05-12T19:51:21.000Z
|
tests/test_web_player.py
|
dachrisch/spotify_sentiment_classifier
|
3365d49b3f63e0140b916e05cade7c04c6ee3094
|
[
"Unlicense"
] | null | null | null |
from unittest import TestCase
from bs4 import BeautifulSoup
from sentiment.classify.sentiment import Sentiment
from tests.web_testing_base import TestClientMixin
class TestWebPlayer(TestCase, TestClientMixin):
def setUp(self):
self._setup_testclient()
self._setup_logged_in()
def test_login_shown_when_not_logged_in(self):
self._setup_not_logged_in()
response = self.test_client.get('/player/', follow_redirects=False)
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.data, features='html.parser')
button = soup.find(id='spotify_login')
self.assertIsNotNone(button)
self.assertIn('Login with Spotify', button.next)
def test_main_not_shown_when_not_logged_in(self):
self._setup_not_logged_in()
response = self.test_client.get('/player/', follow_redirects=False)
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.data, features='html.parser')
button = soup.find(id='main')
self.assertIsNone(button)
def test_login_not_shown_when_logged_in(self):
response = self.test_client.get('/player/', follow_redirects=False)
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.data, features='html.parser')
button = soup.find(id='spotify_login')
self.assertIsNone(button)
soup = BeautifulSoup(response.data, features='html.parser')
button = soup.find(id='logged_in_user')
self.assertIsNotNone(button)
self.assertIn('Logged in as', button.next)
def test_button_active_when_not_analysed(self):
response = self.test_client.get('/player/', follow_redirects=False)
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.data, features='html.parser')
button = soup.find(id='btn_analyse')
self.assertIsNotNone(button)
self.assertIn('Analyse your music library', button.next)
def test_button_deactivated_when_analysed(self):
self._setup_account_as_analysed()
response = self.test_client.get('/player/', follow_redirects=False)
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.data, features='html.parser')
button = soup.find(id='btn_analysed')
self.assertIsNotNone(button)
self.assertIn('Music library analysed', button.next)
def test_press_sentiment_button(self):
self._setup_account_as_analysed()
response = self.test_client.post('/player/', follow_redirects=False, data={'ANGER': True})
self.assertEqual(200, response.status_code)
def test_playlist_is_anger(self):
self._setup_account_as_analysed()
response = self.test_client.post('/player/', follow_redirects=False, data={'ANGER': True})
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.data, features='html.parser')
spotify_player = soup.find(id='spotify_player_{}'.format(Sentiment.ANGER))
expected_id = self._auth_service.service_instance.playlist_manager.playlist_for_sentiment(Sentiment.ANGER)[
'id']
self.assertIsNotNone(spotify_player)
self.assertEqual('https://open.spotify.com/embed/playlist/{}'.format(expected_id), spotify_player.attrs['src'])
def test_error_message(self):
# setup error
with self.test_client.session_transaction() as session:
session['_flashes'] = [('error', "Analyse failed! You don't have any saved tracks."), ]
# check if error message present
response = self.test_client.get('/player/', follow_redirects=False)
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.data, features='html.parser')
error_messages = soup.find(id='error_messages')
self.assertIsNotNone(error_messages)
self.assertIn('''Analyse failed! You don't have any saved tracks.''', error_messages.contents[1].next)
| 42.6
| 119
| 0.699778
|
8aaf4c8f72b65d2533d4612642ec4004964b63ea
| 917
|
py
|
Python
|
mcad/src/dl180_g6_tray_2in.py
|
nicklasfrahm/hardware
|
968adb7888fa22fae37cacbd82efb420e37ff148
|
[
"MIT"
] | 4
|
2021-11-21T16:13:07.000Z
|
2022-02-23T20:34:14.000Z
|
mcad/src/dl180_g6_tray_2in.py
|
nicklasfrahm/hardware
|
968adb7888fa22fae37cacbd82efb420e37ff148
|
[
"MIT"
] | 1
|
2022-03-26T13:22:06.000Z
|
2022-03-26T13:22:06.000Z
|
mcad/src/dl180_g6_tray_2in.py
|
nicklasfrahm/libre19
|
968adb7888fa22fae37cacbd82efb420e37ff148
|
[
"MIT"
] | null | null | null |
"""
A 2.5-inch drive tray for the HP Proliant DL180 G6.
"""
from solid import OpenSCADObject, cube, translate
from lib.utils import build, stl, combine
print()
print("Rendering to STL via OpenSCAD CLI may take several")
print("minutes due to STL import! Please be patient!")
print()
original = stl("vendor/proliant_tray_2in.stl")
front = original
front -= combine(
cube([130, 150, 100]),
translate([70, 0, 0]),
)
back = original
back -= combine(
cube([81.5, 150, 100]),
translate([-10, -10, 0]),
)
# Shorten tray by 1.5mm to make it compatible with G6.
solid = front
solid += combine(
back,
translate([-1.5, 0, 0]),
)
def obj() -> OpenSCADObject:
"""
Retrieve part object when importing it into assemblies or similar.
"""
return solid
# Boilerplate code to export the part as `.scad` file if invoked as a script.
if __name__ == "__main__":
build(obj(), __file__)
| 20.377778
| 77
| 0.664122
|
3abcec4f0c404d786ddd724310b954f27494c802
| 706
|
py
|
Python
|
setup.py
|
joostsijm/python_supremacy1914
|
a6c8fd9835bbf26fed2adabb2996c92197f362f6
|
[
"Apache-2.0"
] | 2
|
2020-05-28T06:07:22.000Z
|
2020-06-05T13:18:42.000Z
|
setup.py
|
joostsijm/python_supremacy1914
|
a6c8fd9835bbf26fed2adabb2996c92197f362f6
|
[
"Apache-2.0"
] | 2
|
2020-04-22T21:15:36.000Z
|
2020-05-29T17:48:39.000Z
|
setup.py
|
joostsijm/python_supremacy1914
|
a6c8fd9835bbf26fed2adabb2996c92197f362f6
|
[
"Apache-2.0"
] | 1
|
2020-05-28T06:07:47.000Z
|
2020-05-28T06:07:47.000Z
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="supremacy1914_wrapper",
version="0.1.4",
author="Joost Sijm",
author_email="joostsijm@gmail.com",
description="Supremacy1914 API wrapper in Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/joostsijm/supremacy1914_wrapper",
packages=setuptools.find_packages(),
install_requires=[
'requests',
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
)
| 28.24
| 61
| 0.671388
|
20110c4e3fe8d8aab7036b566df626525858b64c
| 13,098
|
py
|
Python
|
webServer/app/PavementPainter.py
|
sheggen/pavement-painter
|
88e151f5262e04581e4df5740d06e628cda32aca
|
[
"MIT"
] | null | null | null |
webServer/app/PavementPainter.py
|
sheggen/pavement-painter
|
88e151f5262e04581e4df5740d06e628cda32aca
|
[
"MIT"
] | 8
|
2019-03-27T16:10:11.000Z
|
2022-03-11T23:53:53.000Z
|
webServer/app/PavementPainter.py
|
sheggen/pavement-painter
|
88e151f5262e04581e4df5740d06e628cda32aca
|
[
"MIT"
] | null | null | null |
from PIL import Image
import numpy
import time, datetime
from app.PCA_9685 import PCA_9685
from app.OBD2 import *
import threading
import RPi.GPIO as GPIO
import sys, os
numpy.set_printoptions(threshold=sys.maxsize) # for printing array during testing
class PavementPainter(threading.Thread):
def __init__(self, threadID):
"""
Initializes a new Pavement Painter object and starts it painting.
"""
self.num_solenoids = 152 #Set to the number of solenoids to fire
self.solenoid_spacing = 9.525 # in millimeters (3/8" = 9.525 mm)
self.scale_factor = 1000000 # 1000000 would print "to scale"
self.car_speed = 0.01
self.fire_duration = .01
self.fire_rate = .01 # How long to keep the solenoid open #NO LONGER USED
self.fire_percentage = .3# What percentage of time to fire/stop firing
self.raw_image = None
self.img_dict = {}
self.img_dir = 'app/static/images/'
self.img_file = "Dandelion.jpg"
self.new_height = 0 # Height of the image after resizing
self.img_matrix = []
self.PCAs = []
# States for web server
self.amIPrinting = False
self.amIMotorUp = False
self.amIMotorDown = False
self.amISpeedUp = False
self.amISpeedDown = False
self.amIFlushing = False
# GPIO pins
self.initialize_solenoids_button = 13
self.start_button = 19
self.stop_button = 26
self.lift_up_button = 12
self.lift_down_button = 6
self.speed_up_button = 23
self.speed_down_button = 24
self.dir_L = 16
self.dir_R = 20
#self.enable_lift = 21
# Begin the magic
self.init_GPIO()
self.obd2 = OBD2() # Connect to OBD sensor
self.parse_image() # Load image
self.init_PCAs() # Ready the PCAs
# self.init_solenoids()
self.last_button_state = 0 # 0 = not firing, 1 = firing
# For threading with the live feed camera
threading.Thread.__init__(self)
self.threadID = threadID
def run(self): # webServerRun # Switch function name to run(self): to use webserver instead of buttons
print("Rain started")
while True:
self.init_GPIO_2() # Resets GPIOs since they are cleaned out each loop iteration
# Paint if button was pressed once
if self.amIPrinting:
self.paint()
else:
self.stop_all()
# Move the motors up/down
if self.amIMotorUp:
# print("Motor up started")
GPIO.output(self.dir_L, GPIO.HIGH)
GPIO.output(self.dir_R, GPIO.LOW)
if self.amIMotorDown:
# print("Motor Down started")
GPIO.output(self.dir_R, GPIO.HIGH)
GPIO.output(self.dir_L, GPIO.LOW)
# Adjust the speed up/down
if self.amISpeedUp:
# print("Speed up")
self.scale_factor += 100
if self.amISpeedDown:
# print("Speed down")
self.scale_factor -= 100
# Flush the solenoids
if self.amIFlushing:
self.init_solenoids()
self.amIFlushing = False
GPIO.cleanup()
def GPIOrun(self): # Switch function name to run(self): to use buttons instead of webserver
# Let it rain
print("Rain started")
while True:
self.init_GPIO() # Resets GPIOs since they are cleaned out each loop iteration
# Lift up/down buttons
while GPIO.input(self.lift_up_button):
#print("Going up?")
#GPIO.output(self.enable_lift, GPIO.HIGH)
GPIO.output(self.dir_L, GPIO.HIGH)
GPIO.output(self.dir_R, GPIO.LOW)
while GPIO.input(self.lift_down_button):
#print("Going down?")
#GPIO.output(self.enable_lift, GPIO.HIGH)
GPIO.output(self.dir_R, GPIO.HIGH)
GPIO.output(self.dir_L, GPIO.LOW)
# print("Stopping lift")
GPIO.output(self.dir_R, GPIO.LOW)
GPIO.output(self.dir_L, GPIO.LOW)
# Start, Stop, and Initialize buttons
if GPIO.input(self.initialize_solenoids_button): # Initialize solenoids
self.init_solenoids()
if GPIO.input(self.stop_button): # Stop all solenoids
self.last_button_state = 0
self.stop_all()
if not GPIO.input(self.initialize_solenoids_button) and not GPIO.input(self.stop_button) and not GPIO.input(self.start_button): # keep firing if already firing
if self.last_button_state:
self.paint()
if GPIO.input(self.start_button): # Start all solenoids
print("Starting print")
self.last_button_state = 1
self.paint()
# Speed adjust buttons
if not self.last_button_state:
if GPIO.input(self.speed_up_button):
self.scale_factor += 100
print("Speed up: ", self.scale_factor)
if GPIO.input(self.speed_down_button):
self.scale_factor -= 100
print("Speed down: ", self.scale_factor)
GPIO.cleanup()
def init_GPIO_2(self):
GPIO.setmode(GPIO.BCM) # Broadcom pin-numbering scheme
GPIO.setup(self.dir_L, GPIO.OUT)
GPIO.setup(self.dir_R, GPIO.OUT)
def init_GPIO(self):
GPIO.setmode(GPIO.BCM) # Broadcom pin-numbering scheme
#GPIO.setup(18, GPIO.OUT) # Using 18 as power # DID You stop doing this?
#GPIO.output(18, GPIO.HIGH) # Did you stop doing this?
GPIO.setup(self.initialize_solenoids_button, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(self.start_button, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Button pin set as input w/ pull-up
GPIO.setup(self.stop_button, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Button pin set as input w/ pull-up
#GPIO.setup(self.enable_lift, GPIO.OUT)
GPIO.setup(self.dir_L, GPIO.OUT)
GPIO.setup(self.dir_R, GPIO.OUT)
GPIO.setup(self.lift_up_button, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Button pin set as input w/ pull-up
GPIO.setup(self.lift_down_button, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Button pin set as input w/ pull-up
GPIO.setup(self.speed_up_button, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(self.speed_down_button, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def init_PCAs(self):
"""
Connects to the PCAs
:return: None
"""
num_sols = self.num_solenoids
addr = 0x40
while num_sols > 0:
self.PCAs.append(PCA_9685(16, addr))
num_sols -= 16
addr += 1
def stop_all(self):
for i in range(self.num_solenoids):
self.stop_fire(i)
#time.sleep(.01)
# print("Stopping print")
def init_solenoids(self):
self.stop_all()
# input("Press any key to begin")
#print("Firing all to test")
for i in range(self.num_solenoids):
if self.amIFlushing:
self.fire(i)
time.sleep(.25)
self.stop_fire(i)
else:
return
#print("Test complete")
def adjust_speed(self, speed):
"""
Adjusts the speed attribute based on vehicle speed.
:param speed: the speed of the vehicle in KPH, which will control painting rate
:return: None
"""
self.fire_duration = self.solenoid_spacing / (speed * self.scale_factor / 3600)
#print("Speed: ", speed, ", Fire duration: ", self.fire_duration)
def parse_image(self):
"""
Reads in an arbitrary image file and converts it to a binary image.
:return: None
"""
try:
# print(os.getcwd())
self.raw_image = Image.open(self.img_dir + self.img_file)
# self.raw_image.show("Original image")
self.new_height = int(self.num_solenoids *(self.raw_image.size[1]/self.raw_image.size[0]))
print(self.new_height)
self.raw_image = self.raw_image.resize((self.num_solenoids, self.new_height))
# self.raw_image.show("Resized image based on number of solenoids")
self.raw_image = self.raw_image.convert("L")
# self.raw_image.show("Black and white image")
self.raw_image = self.raw_image.point(lambda i: i > 128 and 255) # Converts image to a binary image
# self.raw_image.show("Binary image")
# print(numpy.array(self.raw_image))
# Construct a sparse dictionary representing image
self.createSparseDict()
except Exception as e:
print(e)
def createSparseDict(self):
# Construct a sparse dictionary representing image
counter = 0
for pixel in numpy.nditer(numpy.array(self.raw_image)):
if pixel == 0:
self.img_dict[counter//self.num_solenoids] = self.img_dict.get(counter//self.num_solenoids, [])
self.img_dict[counter//self.num_solenoids].append(counter%self.num_solenoids)
counter += 1
def paint(self):
"""
Fires solenoids based on binary image.
:return: None
"""
new_speed = self.obd2.get_speed()
if new_speed:
#self.camera.camera.annotate_text = "{} KPH/{:0.2f} MPH".format(new_speed,
# new_speed / 0.621371)
self.adjust_speed(new_speed)
# st = time.time()
# Paint from the list (slow)
#self.paint_from_list()
# Paint from the dictionary (faster?)
self.paint_from_dict()
# print("Paint time:", time.time() - st)
def paint_from_dict(self):
for i in range(self.new_height):
for k in self.img_dict.get(i, []):
if self.amIPrinting:
self.fire(k)
else:
return
# time.sleep(.1)
# time.sleep((self.fire_duration * self.fire_percentage))
# self.reset() # TODO Does this work?
for k in self.img_dict.get(i, []):
self.stop_fire(k)
# time.sleep((self.fire_duration * (1 - self.fire_percentage)))
def paint_from_list(self):
counter = 0
fire_list = []
for pixel in numpy.nditer(numpy.array(self.raw_image)):
# TODO MOVE GPIO to own class.
#if GPIO.input(self.speed_up_button):
# self.scale_factor += 10
# print("Speed up: ", self.scale_factor)
#if GPIO.input(self.speed_down_button):
# self.scale_factor -= 10
# print("Speed down: ", self.scale_factor)
if pixel == 0: # 0 for negative space; 255 for positive space
# Add solenoid to fire list
fire_list.append(counter % self.num_solenoids)
counter += 1
if counter == self.num_solenoids:
# Adjust speed
new_speed = self.obd2.get_speed()
# print("Speed in main from obd: ", new_speed)
if new_speed:
# self.camera.camera.annotate_text = "{} KPH/{:0.2f} MPH".format(new_speed,
# new_speed / 0.621371)
self.adjust_speed(new_speed)
# Fire solenoids
for solenoid in fire_list:
self.fire(solenoid)
# time.sleep((self.fire_duration * self.fire_percentage)/6)
for solenoid in fire_list:
self.stop_fire(solenoid)
# print("Waiting: ", self.fire_duration)
# time.sleep((self.fire_duration * (1 - self.fire_percentage))/6)
counter = 0
fire_list = []
def fire(self, solenoid):
"""
Fire the solenoid for the correct amount of time, based on self.fire_rate
:param solenoid: solenoid address
:return: None
"""
# print("Firing PCA: ", solenoid//16, ", Solenoid: ", solenoid % 16)
self.PCAs[solenoid//16].fire_away(solenoid % 16) # Picks the right PCA, then fires the right solenoid
def stop_fire(self, solenoid):
self.PCAs[solenoid//16].seize_fire(solenoid % 16)
def reset(self):
pass
for pca in self.PCAs:
pca.reset()
self.init_PCAs()
| 38.186589
| 172
| 0.558864
|
28d63f8c67a401d7ccace6b35bfa5d0495d7c02e
| 16,381
|
py
|
Python
|
src/analyze-log.py
|
slamajakub/visnav-py
|
872363a8f115ae2dc8966f7c890891a41cb60b16
|
[
"MIT"
] | null | null | null |
src/analyze-log.py
|
slamajakub/visnav-py
|
872363a8f115ae2dc8966f7c890891a41cb60b16
|
[
"MIT"
] | null | null | null |
src/analyze-log.py
|
slamajakub/visnav-py
|
872363a8f115ae2dc8966f7c890891a41cb60b16
|
[
"MIT"
] | null | null | null |
import sys
import csv
import math
import numpy as np
import matplotlib.pyplot as plt
from batch1 import get_system_model
from missions.didymos import DidymosSystemModel
from missions.rosetta import RosettaSystemModel
try:
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
except:
print('Requires scikit-learn, install using "conda install scikit-learn"')
sys.exit()
from settings import *
from algo import tools
EASY_LIMITS = ((80, 180), (0, 12), (50, 270), (0.8, 1))
FAIL_ERRS = {
'rel shift error (m/km)': 200,
'altitude error': 2000,
'dist error (m/km)': 200,
'lat error (m/km)': 200,
'rot error': 25,
}
MAX_ROTATION_ERR = 7
# read logfiles
def read_data(sm, logfile, predictors, target):
X, y, rot_err, labels = [], [], [], []
with open(logfile, newline='') as csvfile:
rad = sm.asteroid.mean_radius * 0.001
data = csv.reader(csvfile, delimiter='\t')
first = True
for row in data:
if len(row)>10:
if first:
first = False
prd_i = [row.index(p) for p in predictors if p not in ('distance', 'visible')]
trg_i = row.index(target)
rot_i = row.index('rot error')
pos_i = [row.index(p+' sc pos') for p in ('x','y','z')]
lbl_i = row.index('iter')
else:
row = np.array(row)
try:
pos = row[pos_i].astype(np.float)
except ValueError as e:
print('Can\'t convert cols %s to float on row %s' % (pos_i, row[0]))
raise e
distance = np.sqrt(np.sum(pos**2))
xt = abs(pos[2])*math.tan(math.radians(sm.cam.x_fov)/2)
yt = abs(pos[2])*math.tan(math.radians(sm.cam.y_fov)/2)
#xm = np.clip((xt - (abs(pos[0])-rad))/rad/2, 0, 1)
#ym = np.clip((yt - (abs(pos[1])-rad))/rad/2, 0, 1)
xm = 1 - (max(0, pos[0]+rad - xt) + max(0, rad-pos[0] - xt))/rad/2
ym = 1 - (max(0, pos[1]+rad - yt) + max(0, rad-pos[1] - yt))/rad/2
X.append(np.concatenate((
row[prd_i].astype(np.float),
[distance],
[xm*ym],
)))
# err m/km
tmp = row[trg_i].astype(np.float) if len(row)>trg_i else float('nan')
y.append(tmp)
rot_err.append(row[rot_i].astype(np.float))
labels.append(row[lbl_i])
X = np.array(X)
# for classification of fails
yc = np.isnan(y)
rot_err = np.array(rot_err)
if True:
yc = np.logical_or(yc, np.isnan(rot_err))
if MAX_ROTATION_ERR > 0:
I = np.logical_not(yc)
rot_err[I] = np.abs(tools.wrap_degs(rot_err[I]))
yc[I] = np.logical_or(yc[I], rot_err[I] > MAX_ROTATION_ERR)
# for regression
yr = np.array(y)
#yr[np.isnan(yr)] = FAIL_ERRS[target] # np.nanmax(yr)
if target == 'rot error':
yr = np.abs(tools.wrap_degs(yr))
return X, yc, yr, labels
if __name__ == '__main__':
if len(sys.argv)<2:
print('USAGE: python analyze-log.py <path to log file> [gpr|1d|easy] [shift|alt|dist|lat|orient]')
sys.exit()
mode = sys.argv[2]
if len(sys.argv) > 3:
sc = 1
if sys.argv[3] == 'shift':
target = 'rel shift error (m/km)'
elif sys.argv[3] == 'alt':
target = 'altitude error'
sc = 1000
elif sys.argv[3] == 'dist':
target = 'dist error (m/km)'
elif sys.argv[3] == 'lat':
target = 'lat error (m/km)'
elif sys.argv[3] == 'orient':
target = 'rot error'
else:
assert False, 'unknown target: %s' % sys.argv[3]
predictors = (
'sol elong', # solar elongation
'total dev angle', # total angle between initial estimate and actual relative orientation
'distance', # distance of object
'visible', # esimate of % visible because of camera view edge
)
predictor_labels = (
'Solar Elongation (deg)',
'Initial orientation error (deg)',
'Distance (km)',
'In camera view (%)',
)
target = target or 'rel shift error (m/km)' #'shift error km' #if not one_d_only else 'dist error'
data = []
for logfile in sys.argv[1].split(" "):
mission = logfile.split('-')[0]
sm = get_system_model(mission)
# read data
X, yc, yr, labels = read_data(sm, os.path.join(LOG_DIR, logfile), predictors, target)
X[:, 1] = np.abs(tools.wrap_degs(X[:, 1]))
data.append((logfile, X, yc, yr*sc, labels))
if mode in ('1d', 'easy'):
n_groups = 6
#yr = yr/1000
#idxs = (0, 1, 2, 3)
idxs = (2,)
for idx in idxs:
fig, axs = plt.subplots(len(data), 1, figsize=(20, 18), sharex=True)
for i, (logfile, X, yc, yr, labels) in enumerate(data):
if mode == 'easy':
q997 = np.percentile(np.abs(yr), 99.7)
tmp = tuple((X[:, k] >= EASY_LIMITS[k][0], X[:, k] <= EASY_LIMITS[k][1]) for k in idxs if k!=idx)
# concatenate above & take logical and, also remove worst 0.3%
I = np.logical_and.reduce(sum(tmp, ()) + (np.logical_or(np.abs(yr) < q997, yr == FAIL_ERRS[target]),))
else:
I = np.ones((X.shape[0],), dtype='bool')
xmin, xmax = np.min(X[I, idx]), np.max(X[I, idx])
ax = axs[i] if len(data) > 1 else axs
line, = ax.plot(X[I, idx], yr[I], 'x')
if n_groups:
# calc means and stds in bins
#x = [1/v for v in np.linspace(1/xmin, 1/xmax, n_groups+1)]
x = np.linspace(xmin, xmax, n_groups + 1)
y_grouped = [yr[np.logical_and.reduce((
I,
np.logical_not(yc),
X[:, idx] > x[i],
X[:, idx] < x[i+1],
))] for i in range(n_groups)]
#means = [np.percentile(yg, 50) for yg in y_grouped]
means = np.array([np.mean(yg) for yg in y_grouped])
#stds = np.subtract([np.percentile(yg, 68) for yg in y_grouped], means)
stds = np.array([np.std(yg) for yg in y_grouped])
x = x.reshape((-1, 1))
stds = stds.reshape((-1, 1))
means = means.reshape((-1, 1))
xstep = np.concatenate((x, x), axis=1).flatten()[1:-1]
sstep = np.concatenate((stds, stds), axis=1).flatten()
mstep = np.concatenate((means, means), axis=1).flatten()
ax.plot(xstep, sstep, '-')
ax.plot(xstep, mstep, '-')
# bar_width = (xmax - xmin)/n_groups * 0.2
# rects1 = ax.bar((x[1:] + x[:-1]) * 0.5, stds, width=bar_width, bottom=means-stds/2,
# alpha=0.4, color='b', yerr=stds, error_kw={'ecolor': '0.3'}, label='error')
else:
# filtered means, stds
xt = np.linspace(xmin, xmax, 100)
if False:
# exponential weight
weight_fun = lambda d: 0.01**abs(d/(xmax-xmin))
else:
# gaussian weight
from scipy.stats import norm
from scipy.interpolate import interp1d
interp = interp1d(xt-xmin, norm.pdf(xt-xmin, 0, (xmax-xmin)/10))
weight_fun = lambda d: interp(abs(d))
if False:
# use smoothed mean for std calc
yma = tools.smooth1d(X[I, idx], X[I, idx], yr[I], weight_fun)
else:
# use global mean for std calc (fast)
yma = np.mean(yr[I])
ym = tools.smooth1d(xt, X[I, idx], yr[I], weight_fun)
ystd = tools.smooth1d(xt, X[I, idx], (yr[I] - yma)**2, weight_fun) ** (1/2)
ax.plot(xt, ym, '-')
ax.plot(xt, ystd, '-')
ax.set_title('%s: %s by %s' % (logfile, target, predictor_labels[idx]))
ax.set_xlabel(predictor_labels[idx])
ax.set_ylabel(target)
ax.set_yticks(range(-200, 201, 50))
ax.hlines(range(-200, 201, 10), xmin, xmax, '0.95', '--')
ax.hlines(range(-200, 201, 50), xmin, xmax, '0.7', '-')
plt.setp(ax.get_xticklabels(), rotation='vertical', fontsize=14)
plt.setp(ax.get_yticklabels(), fontsize=14)
tools.hover_annotate(fig, ax, line, np.array(labels)[I])
#ax.set_xticks((x[1:] + x[:-1]) * 0.5)
#ax.set_xticklabels(['%.2f-%.2f' % (x[i], x[i+1]) for i in range(n_groups)])
#ax.legend()
# operation zones for didymos mission
if mission[:4]=='didy' and idx==2:
ax.set_xticks(np.arange(0.1, 10.5, 0.2))
if i==0:
ax.axvspan(1.1, 1.3, facecolor='cyan', alpha=0.3)
ax.axvspan(3.8, 4.2, facecolor='orange', alpha=0.3)
elif i==1:
ax.axvspan(0.15, 0.3, facecolor='pink', alpha=0.5)
ax.axvspan(1.1, 1.3, facecolor='cyan', alpha=0.3)
elif i == 2:
ax.axvspan(3.8, 4.2, facecolor='orange', alpha=0.3)
elif i==3:
ax.axvspan(1.1, 1.3, facecolor='cyan', alpha=0.3)
ax.axvspan(2.8, 5.2, facecolor='orange', alpha=0.3)
plt.tight_layout()
while(plt.waitforbuttonpress() == False):
pass
elif mode == '2d':
# 0: solar elong
# 1: initial deviation angle
# 2: distance
# 3: ratio in view
idxs = tuple(range(4))
pairs = (
# (2, 0),
# (1, 3),
(2, 3),
(0, 1),
)
titles = ['ORB', 'AKAZE', 'SURF', 'SIFT']
nd = len(data)
r, c = {
1: (1, 1),
2: (1, 2),
3: (3, 1),
4: (2, 2),
}[nd]
fig, axs = plt.subplots(r, c*len(pairs), figsize=(32, 18))
for j, (logfile, X, yc, yr, labels) in enumerate(data):
for i, (i0, i1) in enumerate(pairs):
ax = axs.flatten()[j*len(pairs) + i]
# filter out difficult regions of axis that are not shown
tmp = tuple((X[:, k] >= EASY_LIMITS[k][0], X[:, k] <= EASY_LIMITS[k][1]) for k in idxs if k not in (i0, i1))
I = np.logical_and.reduce(sum(tmp, ()))
# add some offset if ratio in view is one so that they dont all stack in same place
offsets = (X[I, 3] == 1) * np.random.uniform(0, 0.2, (np.sum(I),))
off0 = 0 if i0 != 3 else offsets
off1 = 0 if i1 != 3 else offsets
line = ax.scatter(X[I, i0] + off0, X[I, i1] + off1, s=60, c=yc[I], cmap=plt.cm.Paired, alpha=0.5) #edgecolors=(0, 0, 0))
ax.tick_params(labelsize=18)
ax.set_xlabel(predictors[i0], fontsize=22)
ax.set_ylabel(predictors[i1], fontsize=22)
tools.hover_annotate(fig, ax, line, np.array(labels)[I])
if i==0:
col, row = j%c, j//c
fig.text(0.26+col*0.5, 0.96-row*0.5, titles[j], fontsize=30, horizontalalignment='center')
# ax.set_xbound(xmin, xmax)
# ax.set_ybound(ymin, ymax)
plt.tight_layout()
plt.subplots_adjust(top=0.94, hspace=0.3, wspace=0.25)
plt.show()
elif mode == 'gpr':
logfile, X, yc, yr, labels = data[0]
pairs = (
(0, 1),
(0, 2),
(1, 2),
# (0,3),(1,3),(2,3),
)
for pair in pairs:
xmin, xmax = np.min(X[:, pair[0]]), np.max(X[:, pair[0]])
ymin, ymax = np.min(X[:, pair[1]]), np.max(X[:, pair[1]])
xx, yy = np.meshgrid(np.linspace(xmin, xmax, 50), np.linspace(ymin, ymax, 50))
kernel = 0.01 * RBF(length_scale=((xmax - xmin) * 2, (ymax - ymin) * 2))
if False:
y = yc
# fit hyper parameters
kernel += 0.1 * WhiteKernel(noise_level=0.001)
gpc = GaussianProcessClassifier(kernel=kernel, warm_start=True).fit(X[:, pair], yc)
# hyper parameter results
res = gpc.kernel_, gpc.log_marginal_likelihood(gpc.kernel_.theta)
# classify on each grid point
P = gpc.predict_proba(np.vstack((xx.ravel(), yy.ravel())).T)[:, 1]
else:
y = yr
# fit hyper parameters
kernel += 4.0 * WhiteKernel(noise_level=4.0)
gpr = GaussianProcessRegressor(kernel=kernel, alpha=0, normalize_y=True).fit(X[:, pair], yr)
# hyper parameter results
res = gpr.kernel_, gpr.log_marginal_likelihood(gpr.kernel_.theta)
# regress on each grid point
P = gpr.predict(np.vstack((xx.ravel(), yy.ravel())).T)
P = P.reshape(xx.shape)
# plot classifier output
fig = plt.figure(figsize=(8, 8))
if True:
print('%s' % ((np.min(P), np.max(P), np.min(y), np.max(y)),))
image = plt.imshow(P, interpolation='nearest', extent=(xmin, xmax, ymin, ymax),
aspect='auto', origin='lower', cmap=plt.cm.PuOr_r)
plt.scatter(X[:, pair[0]], X[:, pair[1]], s=30, c=y, cmap=plt.cm.Paired, edgecolors=(0, 0, 0))
cb = plt.colorbar(image)
ax = fig.gca()
else:
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import Normalize
ax = fig.gca(projection='3d')
scalarMap = plt.cm.ScalarMappable(norm=Normalize(vmin=np.min(P), vmax=np.max(P)),
cmap=plt.cm.PuOr_r)
ax.plot_surface(xx, yy, P, rstride=1, cstride=1, facecolors=scalarMap.to_rgba(P), antialiased=True)
cb.ax.tick_params(labelsize=18)
ax.tick_params(labelsize=18)
plt.xlabel(predictors[pair[0]], fontsize=22)
plt.ylabel(predictors[pair[1]], fontsize=22)
plt.axis([xmin, xmax, ymin, ymax])
# plt.title("%s\n Log-Marginal-Likelihood:%.3f" % res, fontsize=12)
plt.tight_layout()
plt.show()
elif mode == '3d':
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
logfile, X, yc, yr, labels = data[0]
xmin, xmax = np.min(X[:, 0]), np.max(X[:, 0])
ymin, ymax = np.min(X[:, 1]), np.max(X[:, 1])
zmin, zmax = np.min(X[:, 2]), np.max(X[:, 2])
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=yr, cmap=plt.cm.Paired, edgecolors=(0, 0, 0))
# cb = plt.colorbar(image)
# cb.ax.tick_params(labelsize=18)
ax.tick_params(labelsize=18)
ax.set_xlabel(predictors[0], fontsize=22)
ax.set_ylabel(predictors[1], fontsize=22)
ax.set_zlabel(predictors[2], fontsize=22)
ax.set_xbound(xmin, xmax)
ax.set_ybound(ymin, ymax)
ax.set_zbound(zmin, zmax)
plt.tight_layout()
plt.show()
else:
assert False, 'wrong mode'
#plt.waitforbuttonpress()
| 41.055138
| 137
| 0.4819
|
92e87e64d736abc4aea7637893ccda59f678f36d
| 4,281
|
py
|
Python
|
EscapePyPromptLineString/main.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 117
|
2015-12-18T07:18:27.000Z
|
2022-03-28T00:25:54.000Z
|
EscapePyPromptLineString/main.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 8
|
2018-10-03T09:38:46.000Z
|
2021-12-13T19:51:09.000Z
|
EscapePyPromptLineString/main.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 28
|
2016-08-02T17:43:47.000Z
|
2022-03-21T08:31:12.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import traceback
import time
import sys
try:
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
except:
try:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
except:
from PySide.QtGui import *
from PySide.QtCore import *
def log_uncaught_exceptions(ex_cls, ex, tb):
text = '{}: {}:\n'.format(ex_cls.__name__, ex)
text += ''.join(traceback.format_tb(tb))
print(text)
QMessageBox.critical(None, 'Error', text)
sys.exit(1)
sys.excepthook = log_uncaught_exceptions
class MainWindow(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle('EscapePyPromptLineString')
self.text_edit_input = QPlainTextEdit()
self.text_edit_output = QPlainTextEdit()
self.text_edit_output.setReadOnly(True)
self.label_error = QLabel()
self.label_error.setStyleSheet("QLabel { color : red; }")
self.label_error.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.label_error.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
self.button_detail_error = QPushButton('...')
self.button_detail_error.setFixedSize(20, 20)
self.button_detail_error.setToolTip('Detail error')
self.button_detail_error.clicked.connect(self.show_detail_error_massage)
self.last_error_message = None
self.last_detail_error_message = None
self.text_edit_input.textChanged.connect(self.input_text_changed)
splitter = QSplitter()
splitter.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
splitter.addWidget(self.text_edit_input)
splitter.addWidget(self.text_edit_output)
layout = QVBoxLayout()
layout.addWidget(splitter)
layout_error = QHBoxLayout()
layout_error.addWidget(self.label_error)
layout_error.addWidget(self.button_detail_error)
layout.addLayout(layout_error)
self.setLayout(layout)
def show_detail_error_massage(self):
message = self.last_error_message + '\n\n' + self.last_detail_error_message
mb = QErrorMessage()
mb.setWindowTitle('Error')
# Сообщение ошибки содержит отступы, символы-переходы на следующую строку,
# которые поломаются при вставке через QErrorMessage.showMessage, и нет возможности
# выбрать тип текста, то делаем такой хак.
mb.findChild(QTextEdit).setPlainText(message)
mb.exec_()
def input_text_changed(self):
self.label_error.clear()
self.button_detail_error.hide()
self.last_error_message = None
self.last_detail_error_message = None
try:
t = time.perf_counter()
out_text = self.text_edit_input.toPlainText()
new_out_text = []
for line in out_text.splitlines():
if line.startswith('>>> ') or line.startswith('... ') or line == '...':
line = line[4:]
new_out_text.append(line)
out_text = '\n'.join(new_out_text)
self.text_edit_output.setPlainText(out_text)
print('Escape for {:.3f} secs'.format(time.perf_counter() - t))
except Exception as e:
# Выводим ошибку в консоль
traceback.print_exc()
# Сохраняем в переменную
tb = traceback.format_exc()
self.last_error_message = str(e)
self.last_detail_error_message = str(tb)
self.button_detail_error.show()
self.label_error.setText('Error: ' + self.last_error_message)
if __name__ == '__main__':
app = QApplication([])
mw = MainWindow()
mw.resize(650, 500)
mw.text_edit_input.setPlainText("""\
>>> import bcrypt
>>> password = b"super secret password"
>>> # Hash a password for the first time, with a randomly-generated salt
>>> hashed = bcrypt.hashpw(password, bcrypt.gensalt())
>>> # Check that an unhashed password matches one that has previously been
>>> # hashed
>>> if bcrypt.checkpw(password, hashed):
... print("It Matches!")
... else:
... print("It Does not Match :(")
"""
)
mw.show()
sys.exit(app.exec_())
| 28.164474
| 91
| 0.648914
|
e2c4760638840726cd8e5e8f7fe3bbc850c9981a
| 698
|
py
|
Python
|
code/taskA/migrations/0001_initial.py
|
nft-appraiser/nft-appraiser-api
|
6d6495049851afd3d9bfc6969d0e1c9bc430dc81
|
[
"MIT"
] | null | null | null |
code/taskA/migrations/0001_initial.py
|
nft-appraiser/nft-appraiser-api
|
6d6495049851afd3d9bfc6969d0e1c9bc430dc81
|
[
"MIT"
] | null | null | null |
code/taskA/migrations/0001_initial.py
|
nft-appraiser/nft-appraiser-api
|
6d6495049851afd3d9bfc6969d0e1c9bc430dc81
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.8 on 2021-12-01 02:32
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TaskA_table',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('asset_contract_address', models.CharField(max_length=100)),
('token_id', models.IntegerField()),
('img', models.ImageField(default='defo', upload_to='taskA/')),
('pred_price', models.FloatField()),
],
),
]
| 27.92
| 117
| 0.574499
|
51ae944c54e6d9c5307d2b5d672967d89497bc50
| 26,111
|
py
|
Python
|
py_entitymatching/debugblocker/backup_debugblocker.py
|
dmvieira/py_entitymatching
|
25b48cf3a60f0cd05f25ffd38b735a461686eff7
|
[
"BSD-3-Clause"
] | null | null | null |
py_entitymatching/debugblocker/backup_debugblocker.py
|
dmvieira/py_entitymatching
|
25b48cf3a60f0cd05f25ffd38b735a461686eff7
|
[
"BSD-3-Clause"
] | null | null | null |
py_entitymatching/debugblocker/backup_debugblocker.py
|
dmvieira/py_entitymatching
|
25b48cf3a60f0cd05f25ffd38b735a461686eff7
|
[
"BSD-3-Clause"
] | null | null | null |
from collections import namedtuple
import heapq as hq
import logging
import numpy
from operator import attrgetter
import pandas as pd
from py_entitymatching.utils.validation_helper import validate_object_type
import py_entitymatching as em
import py_entitymatching.catalog.catalog_manager as cm
logger = logging.getLogger(__name__)
def backup_debug_blocker(candset, ltable, rtable, output_size=200,
attr_corres=None, verbose=False):
"""
This is the old version of the blocker debugger. It is not reccomended
to use this version unless the new blocker debugger is not working properly.
This function debugs the blocker output and reports a list of potential
matches that are discarded by a blocker (or a blocker sequence).
Specifically, this function takes in the two input tables for
matching and the candidate set returned by a blocker (or a blocker
sequence), and produces a list of tuple pairs which are rejected by the
blocker but with high potential of being true matches.
Args:
candset (DataFrame): The candidate set generated by
applying the blocker on the ltable and rtable.
ltable,rtable (DataFrame): The input DataFrames that are used to
generate the blocker output.
output_size (int): The number of tuple pairs that will be
returned (defaults to 200).
attr_corres (list): A list of attribute correspondence tuples.
When ltable and rtable have different schemas, or the same
schema but different words describing the attributes, the
user needs to manually specify the attribute correspondence.
Each element in this list should be a tuple of strings
which are the corresponding attributes in ltable and rtable.
The default value is None, and if the user doesn't specify
this list, a built-in function for finding the
attribute correspondence list will be called. But we highly
recommend the users manually specify the attribute
correspondences, unless the schemas of ltable and rtable are
identical (defaults to None).
verbose (boolean): A flag to indicate whether the debug information
should be logged (defaults to False).
Returns:
A pandas DataFrame with 'output_size' number of rows. Each row in the
DataFrame is a tuple pair which has potential of being a true
match, but is rejected by the blocker (meaning that the tuple
pair is in the Cartesian product of ltable and rtable subtracted
by the candidate set). The fields in the returned DataFrame are
from ltable and rtable, which are useful for determining similar
tuple pairs.
Raises:
AssertionError: If `ltable`, `rtable` or `candset` is not of type
pandas DataFrame.
AssertionError: If `ltable` or `rtable` is empty (size of 0).
AssertionError: If the output `size` parameter is less than or equal
to 0.
AssertionError: If the attribute correspondence (`attr_corres`) list is
not in the correct format (a list of tuples).
AssertionError: If the attribute correspondence (`attr_corres`)
cannot be built correctly.
Examples:
>>> import py_entitymatching as em
>>> ob = em.OverlapBlocker()
>>> C = ob.block_tables(A, B, l_overlap_attr='title', r_overlap_attr='title', overlap_size=3)
>>> corres = [('ID','ssn'), ('name', 'ename'), ('address', 'location'),('zipcode', 'zipcode')]
>>> D = em.backup_debug_blocker(C, A, B, attr_corres=corres)
>>> import py_entitymatching as em
>>> ob = em.OverlapBlocker()
>>> C = ob.block_tables(A, B, l_overlap_attr='name', r_overlap_attr='name', overlap_size=3)
>>> D = em.backup_debug_blocker(C, A, B, output_size=150)
"""
# Check input types.
_validate_types(ltable, rtable, candset, output_size,
attr_corres, verbose)
# Check table size.
if len(ltable) == 0:
raise AssertionError('Error: ltable is empty!')
if len(rtable) == 0:
raise AssertionError('Error: rtable is empty!')
# Check the value of output size.
if output_size <= 0:
raise AssertionError('The input parameter: \'output_size\''
' is less than or equal to 0. Nothing needs'
' to be done!')
# Get table metadata.
l_key, r_key = cm.get_keys_for_ltable_rtable(ltable, rtable, logger, verbose)
# Validate metadata
cm._validate_metadata_for_table(ltable, l_key, 'ltable', logger, verbose)
cm._validate_metadata_for_table(rtable, r_key, 'rtable', logger, verbose)
# Check the user input field correst list (if exists) and get the raw
# version of our internal correst list.
_check_input_field_correspondence_list(ltable, rtable, attr_corres)
corres_list = _get_field_correspondence_list(ltable, rtable,
l_key, r_key, attr_corres)
# Build the (col_name: col_index) dict to speed up locating a field in
# the schema.
ltable_col_dict = _build_col_name_index_dict(ltable)
rtable_col_dict = _build_col_name_index_dict(rtable)
# Filter correspondence list to remove numeric types. We only consider
# string types for document concatenation.
_filter_corres_list(ltable, rtable, l_key, r_key,
ltable_col_dict, rtable_col_dict, corres_list)
# Get field filtered new table.
ltable_filtered, rtable_filtered = _get_filtered_table(
ltable, rtable, l_key, r_key, corres_list)
# Select a subset of fields with high scores.
feature_list = _select_features(ltable_filtered, rtable_filtered, l_key)
# Map the record key value to its index in the table.
lrecord_id_to_index_map = _get_record_id_to_index_map(ltable_filtered, l_key)
rrecord_id_to_index_map = _get_record_id_to_index_map(rtable_filtered, r_key)
# Build the tokenized record list delimited by a white space on the
# selected fields.
lrecord_list = _get_tokenized_table(ltable_filtered, l_key, feature_list)
rrecord_list = _get_tokenized_table(rtable_filtered, r_key, feature_list)
# Reformat the candidate set from a dataframe to a list of record index
# tuple pair.
new_formatted_candidate_set = _index_candidate_set(
candset, lrecord_id_to_index_map, rrecord_id_to_index_map, verbose)
# Build the token order according to token's frequency. To run a
# prefix filtering based similarity join algorithm, we first need
# the global token order.
order_dict = {}
_build_global_token_order(lrecord_list, order_dict)
_build_global_token_order(rrecord_list, order_dict)
# Sort the tokens in each record by the global order.
_sort_record_tokens_by_global_order(lrecord_list, order_dict)
_sort_record_tokens_by_global_order(rrecord_list, order_dict)
# Run the topk similarity join.
topk_heap = _topk_sim_join(
lrecord_list, rrecord_list, new_formatted_candidate_set, output_size)
# Assemble the topk record list to a dataframe.
ret_dataframe = _assemble_topk_table(topk_heap, ltable_filtered, rtable_filtered)
return ret_dataframe
# Validate the types of input parameters.
def _validate_types(ltable, rtable, candidate_set, output_size,
attr_corres, verbose):
validate_object_type(ltable, pd.DataFrame, 'Input left table')
validate_object_type(rtable, pd.DataFrame, 'Input right table')
validate_object_type(candidate_set, pd.DataFrame, 'Input candidate set')
validate_object_type(output_size, int, 'Output size')
if attr_corres is not None:
if not isinstance(attr_corres, list):
logging.error('Input attribute correspondence is not of'
' type list')
raise AssertionError('Input attribute correspondence is'
' not of type list')
for pair in attr_corres:
if not isinstance(pair, tuple):
logging.error('Pair in attribute correspondence list is not'
' of type tuple')
raise AssertionError('Pair in attribute correspondence list'
' is not of type tuple')
if not isinstance(verbose, bool):
logger.error('Parameter verbose is not of type bool')
raise AssertionError('Parameter verbose is not of type bool')
# Assemble the topk heap to a dataframe.
def _assemble_topk_table(topk_heap, ltable, rtable, ret_key='_id',
l_output_prefix='ltable_', r_output_prefix='rtable_'):
topk_heap.sort(key=lambda tup: tup[0], reverse=True)
ret_data_col_name_list = ['_id', 'similarity']
ltable_col_names = list(ltable.columns)
rtable_col_names = list(rtable.columns)
lkey = em.get_key(ltable)
rkey = em.get_key(rtable)
lkey_index = 0
rkey_index = 0
for i in range(len(ltable_col_names)):
if ltable_col_names[i] == lkey:
lkey_index = i
for i in range(len(rtable_col_names)):
if rtable_col_names[i] == rkey:
rkey_index = i
ret_data_col_name_list.append(l_output_prefix + lkey)
ret_data_col_name_list.append(r_output_prefix + rkey)
ltable_col_names.remove(lkey)
rtable_col_names.remove(rkey)
for i in range(len(ltable_col_names)):
ret_data_col_name_list.append(l_output_prefix + ltable_col_names[i])
for i in range(len(rtable_col_names)):
ret_data_col_name_list.append(r_output_prefix + rtable_col_names[i])
ret_tuple_list = []
for i in range(len(topk_heap)):
tup = topk_heap[i]
lrecord = list(ltable.ix[tup[1]])
rrecord = list(rtable.ix[tup[2]])
ret_tuple = [i, tup[0]]
ret_tuple.append(lrecord[lkey_index])
ret_tuple.append(rrecord[rkey_index])
for j in range(len(lrecord)):
if j != lkey_index:
ret_tuple.append(lrecord[j])
for j in range(len(rrecord)):
if j != rkey_index:
ret_tuple.append(rrecord[j])
ret_tuple_list.append(ret_tuple)
data_frame = pd.DataFrame(ret_tuple_list)
# When the ret data frame is empty, we cannot assign column names.
if len(data_frame) == 0:
return data_frame
data_frame.columns = ret_data_col_name_list
lkey = em.get_key(ltable)
rkey = em.get_key(rtable)
cm.set_candset_properties(data_frame, ret_key, l_output_prefix + lkey,
r_output_prefix + rkey, ltable, rtable)
return data_frame
# Topk similarity join wrapper.
def _topk_sim_join(lrecord_list, rrecord_list, cand_set, output_size):
# Build prefix events.
prefix_events = _generate_prefix_events(lrecord_list, rrecord_list)
topk_heap = _topk_sim_join_impl(lrecord_list, rrecord_list,
prefix_events, cand_set, output_size)
return topk_heap
# Implement topk similarity join. Refer to "top-k set similarity join"
# by Xiao et al. for details.
def _topk_sim_join_impl(lrecord_list, rrecord_list, prefix_events,
cand_set, output_size):
total_compared_pairs = 0
compared_set = set()
l_inverted_index = {}
r_inverted_index = {}
topk_heap = []
while len(prefix_events) > 0:
if len(topk_heap) == output_size and\
topk_heap[0][0] >= prefix_events[0][0] * -1:
break
event = hq.heappop(prefix_events)
table_indicator = event[1]
rec_idx = event[2]
tok_idx = event[3]
if table_indicator == 0:
token = lrecord_list[rec_idx][tok_idx]
if token in r_inverted_index:
r_records = r_inverted_index[token]
for r_rec_idx in r_records:
pair = (rec_idx, r_rec_idx)
# Skip if the pair is in the candidate set.
if pair in cand_set:
continue
# Skip if the pair has been compared.
if pair in compared_set:
continue
sim = _jaccard_sim(
set(lrecord_list[rec_idx]), set(rrecord_list[r_rec_idx]))
if len(topk_heap) == output_size:
hq.heappushpop(topk_heap, (sim, rec_idx, r_rec_idx))
else:
hq.heappush(topk_heap, (sim, rec_idx, r_rec_idx))
total_compared_pairs += 1
compared_set.add(pair)
# Update the inverted index.
if token not in l_inverted_index:
l_inverted_index[token] = set()
l_inverted_index[token].add(rec_idx)
else:
token = rrecord_list[rec_idx][tok_idx]
if token in l_inverted_index:
l_records = l_inverted_index[token]
for l_rec_idx in l_records:
pair = (l_rec_idx, rec_idx)
# Skip if the pair is in the candidate set.
if pair in cand_set:
continue
# Skip if the pair has been compared.
if pair in compared_set:
continue
sim = _jaccard_sim(
set(lrecord_list[l_rec_idx]), set(rrecord_list[rec_idx]))
if len(topk_heap) == output_size:
hq.heappushpop(topk_heap, (sim, l_rec_idx, rec_idx))
else:
hq.heappush(topk_heap, (sim, l_rec_idx, rec_idx))
total_compared_pairs += 1
compared_set.add(pair)
# Update the inverted index.
if token not in r_inverted_index:
r_inverted_index[token] = set()
r_inverted_index[token].add(rec_idx)
return topk_heap
# Calculate the token-based Jaccard similarity of two string sets.
def _jaccard_sim(l_token_set, r_token_set):
l_len = len(l_token_set)
r_len = len(r_token_set)
intersect_size = len(l_token_set & r_token_set)
if l_len + r_len == 0:
return 0.0
return intersect_size * 1.0 / (l_len + r_len - intersect_size)
# Check the input field correspondence list.
def _check_input_field_correspondence_list(ltable, rtable, field_corres_list):
if field_corres_list is None:
return
true_ltable_fields = list(ltable.columns)
true_rtable_fields = list(rtable.columns)
for pair in field_corres_list:
# Raise an error if the pair in not a tuple or the length is not two.
if type(pair) != tuple or len(pair) != 2:
raise AssertionError('Error in checking user input field'
' correspondence: the input field pairs'
'are not in the required tuple format!')
given_ltable_fields = [field[0] for field in field_corres_list]
given_rtable_fields = [field[1] for field in field_corres_list]
# Raise an error if a field is in the correspondence list but not in
# the table schema.
for given_field in given_ltable_fields:
if given_field not in true_ltable_fields:
raise AssertionError('Error in checking user input field'
' correspondence: the field \'%s\' is'
' not in the ltable!' % given_field)
for given_field in given_rtable_fields:
if given_field not in true_rtable_fields:
raise AssertionError('Error in checking user input field'
' correspondence:'
' the field \'%s\' is not in the'
' rtable!' % given_field)
return
# Get the field correspondence list. If the input list is empty, call
# the system builtin function to get the correspondence, or use the
# user input as the correspondence.
def _get_field_correspondence_list(ltable, rtable, lkey, rkey, attr_corres):
corres_list = []
if attr_corres is None or len(attr_corres) == 0:
corres_list = em.get_attr_corres(ltable, rtable)['corres']
if len(corres_list) == 0:
raise AssertionError('Error: the field correspondence list'
' is empty. Please specify the field'
' correspondence!')
else:
for tu in attr_corres:
corres_list.append(tu)
# If the key correspondence is not in the list, add it in.
key_pair = (lkey, rkey)
if key_pair not in corres_list:
corres_list.append(key_pair)
return corres_list
# Filter the correspondence list. Remove the fields in numeric types.
def _filter_corres_list(ltable, rtable, ltable_key, rtable_key,
ltable_col_dict, rtable_col_dict, corres_list):
ltable_dtypes = list(ltable.dtypes)
rtable_dtypes = list(rtable.dtypes)
for i in reversed(range(len(corres_list))):
lcol_name = corres_list[i][0]
rcol_name = corres_list[i][1]
# Filter the pair where both fields are numeric types.
if ltable_dtypes[ltable_col_dict[lcol_name]] != numpy.dtype('O')\
and rtable_dtypes[rtable_col_dict[rcol_name]] != numpy.dtype('O'):
if lcol_name != ltable_key and rcol_name != rtable_key:
corres_list.pop(i)
if len(corres_list) == 1 and corres_list[0][0] == ltable_key\
and corres_list[0][1] == rtable_key:
raise AssertionError('The field correspondence list is empty after'
' filtering: please verify your correspondence'
' list, or check if each field is of numeric'
' type!')
# Filter the original input tables according to the correspondence list.
# The filtered tables will only contain the fields in the correspondence list.
def _get_filtered_table(ltable, rtable, lkey, rkey, corres_list):
ltable_cols = [col_pair[0] for col_pair in corres_list]
rtable_cols = [col_pair[1] for col_pair in corres_list]
lfiltered_table = ltable[ltable_cols]
rfiltered_table = rtable[rtable_cols]
em.set_key(lfiltered_table, lkey)
em.set_key(rfiltered_table, rkey)
return lfiltered_table, rfiltered_table
# Build the mapping bewteen field name and its index in the schema.
def _build_col_name_index_dict(table):
col_dict = {}
col_names = list(table.columns)
for i in range(len(col_names)):
col_dict[col_names[i]] = i
return col_dict
# Select the most important fields for similarity join. The importance
# of a fields is measured by the combination of field value uniqueness
# and non-emptyness.
def _select_features(ltable, rtable, lkey):
lcolumns = list(ltable.columns)
rcolumns = list(rtable.columns)
lkey_index = -1
if len(lcolumns) != len(rcolumns):
raise AssertionError('Error: FILTERED ltable and FILTERED rtable'
' have different number of fields!')
for i in range(len(lcolumns)):
if lkey == lcolumns[i]:
lkey_index = i
lweight = _get_feature_weight(ltable)
rweight = _get_feature_weight(rtable)
Rank = namedtuple('Rank', ['index', 'weight'])
rank_list = []
for i in range(len(lweight)):
rank_list.append(Rank(i, lweight[i] * rweight[i]))
rank_list.pop(lkey_index)
rank_list = sorted(rank_list, key=attrgetter('weight'), reverse=True)
rank_index_list = []
num_selected_fields = 0
if len(rank_list) <= 3:
num_selected_fields = len(rank_list)
elif len(rank_list) <= 5:
num_selected_fields = 3
else:
num_selected_fields = int(len(rank_list) / 2)
for i in range(num_selected_fields):
rank_index_list.append(rank_list[i].index)
return sorted(rank_index_list)
# Calculate the importance (weight) for each field in a table.
def _get_feature_weight(table):
num_records = len(table)
if num_records == 0:
raise AssertionError('Error: empty table!')
weight = []
for col in table.columns:
value_set = set()
non_empty_count = 0
col_values = table[col]
for value in col_values:
if not pd.isnull(value) and value != '':
value_set.add(value)
non_empty_count += 1
selectivity = 0.0
if non_empty_count != 0:
selectivity = len(value_set) * 1.0 / non_empty_count
non_empty_ratio = non_empty_count * 1.0 / num_records
# The field weight is the combination of non-emptyness
# and uniqueness.
weight.append(non_empty_ratio + selectivity)
return weight
# Build the mapping of record key value and its index in the table.
def _get_record_id_to_index_map(table, table_key):
record_id_to_index = {}
id_col = list(table[table_key])
for i in range(len(id_col)):
if id_col[i] in record_id_to_index:
raise AssertionError('Duplicate keys found:', id_col[i])
record_id_to_index[id_col[i]] = i
return record_id_to_index
# Tokenize a table. First tokenize each table column by a white space,
# then concatenate the column of each record. The reason for tokenizing
# columns first is that it's more efficient than iterate each dataframe
# tuple.
def _get_tokenized_table(table, table_key, feature_list):
record_list = []
columns = table.columns[feature_list]
tmp_table = []
for col in columns:
column_token_list = _get_tokenized_column(table[col])
tmp_table.append(column_token_list)
num_records = len(table[table_key])
for i in range(num_records):
token_list = []
index_map = {}
for j in range(len(columns)):
tmp_col_tokens = tmp_table[j][i]
for token in tmp_col_tokens:
if token != '':
if token in index_map:
token_list.append(token + '_' + str(index_map[token]))
index_map[token] += 1
else:
token_list.append(token)
index_map[token] = 1
record_list.append(token_list)
return record_list
# Tokenize each table column by white spaces.
def _get_tokenized_column(column):
column_token_list = []
for value in list(column):
tmp_value = _replace_nan_to_empty(value)
if tmp_value != '':
tmp_list = list(tmp_value.lower().split(' '))
column_token_list.append(tmp_list)
else:
column_token_list.append([''])
return column_token_list
# Check the value of each field. Replace nan with empty string.
# Cast floats into integers.
def _replace_nan_to_empty(field):
if pd.isnull(field):
return ''
elif type(field) in [float, numpy.float64, int, numpy.int64]:
return str('{0:.0f}'.format(field))
else:
return field
# Reformat the input candidate set. Since the input format is DataFrame,
# it's difficult for us to know if a tuple pair is in the candidate
# set or not. We will use the reformatted candidate set in the topk
# similarity join.
def _index_candidate_set(candidate_set, lrecord_id_to_index_map, rrecord_id_to_index_map, verbose):
new_formatted_candidate_set = set()
if len(candidate_set) == 0:
return new_formatted_candidate_set
# Get metadata
key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key =\
cm.get_metadata_for_candset(candidate_set, logger, verbose)
# validate metadata
cm._validate_metadata_for_candset(candidate_set, key, fk_ltable, fk_rtable,
ltable, rtable, l_key, r_key,
logger, verbose)
ltable_key_data = list(candidate_set[fk_ltable])
rtable_key_data = list(candidate_set[fk_rtable])
for i in range(len(ltable_key_data)):
new_formatted_candidate_set.add((lrecord_id_to_index_map[ltable_key_data[i]],
rrecord_id_to_index_map[rtable_key_data[i]]))
return new_formatted_candidate_set
# Build the global order of tokens in the table by frequency.
def _build_global_token_order(record_list, order_dict):
for record in record_list:
for token in record:
if token in order_dict:
order_dict[token] += 1
else:
order_dict[token] = 1
# Sort each tokenized record by the global token order.
def _sort_record_tokens_by_global_order(record_list, order_dict):
for i in range(len(record_list)):
tmp_record = []
for token in record_list[i]:
if token in order_dict:
tmp_record.append(token)
record_list[i] = sorted(tmp_record, key=lambda x: (order_dict[x], x))
# Generate the prefix events of two tables for topk similarity joins.
# Refer to "top-k set similarity join" by Xiao et al. for details.
def _generate_prefix_events(lrecord_list, rrecord_list):
prefix_events = []
_generate_prefix_events_impl(lrecord_list, prefix_events, 0)
_generate_prefix_events_impl(rrecord_list, prefix_events, 1)
return prefix_events
# Prefix event generation for a table.
def _generate_prefix_events_impl(record_list, prefix_events, table_indicator):
for i in range(len(record_list)):
length = len(record_list[i])
for j in range(length):
threshold = _calc_threshold(j, length)
hq.heappush(prefix_events,
(-1.0 * threshold, table_indicator, i, j, record_list[i][j]))
# Calculate the corresponding topk similarity join of a token in a record.
# Refer to "top-k set similarity join" by Xiao et al. for details.
def _calc_threshold(token_index, record_length):
return 1 - token_index * 1.0 / record_length
| 40.357032
| 102
| 0.648807
|
db54fcfcb819c7863f8816485eea8ba0b95be50e
| 2,165
|
py
|
Python
|
common/src/autogluon/common/utils/pandas_utils.py
|
taesup-aws/autogluon
|
51b20c4a18de148b4f06b384e56b102c86727153
|
[
"Apache-2.0"
] | null | null | null |
common/src/autogluon/common/utils/pandas_utils.py
|
taesup-aws/autogluon
|
51b20c4a18de148b4f06b384e56b102c86727153
|
[
"Apache-2.0"
] | null | null | null |
common/src/autogluon/common/utils/pandas_utils.py
|
taesup-aws/autogluon
|
51b20c4a18de148b4f06b384e56b102c86727153
|
[
"Apache-2.0"
] | null | null | null |
import logging
import math
from functools import wraps
from pandas import DataFrame
from ..features.infer_types import get_type_map_raw
from ..features.types import R_INT, R_FLOAT, R_CATEGORY
logger = logging.getLogger(__name__)
def _suspend_logging(func):
"""hides any logs within the called func that are below warnings"""
@wraps(func)
def inner(*args, **kwargs):
root_logger = logging.getLogger()
previous_log_level = root_logger.getEffectiveLevel()
try:
root_logger.setLevel(max(30, previous_log_level))
return func(*args, **kwargs)
finally:
root_logger.setLevel(previous_log_level)
return inner
# suspend_logging to hide the Pandas log of NumExpr initialization
@_suspend_logging
def get_approximate_df_mem_usage(df: DataFrame, sample_ratio=0.2):
if sample_ratio >= 1:
return df.memory_usage(deep=True)
else:
num_rows = len(df)
num_rows_sample = math.ceil(sample_ratio * num_rows)
sample_ratio = num_rows_sample / num_rows
dtypes_raw = get_type_map_raw(df)
columns_category = [column for column in df if dtypes_raw[column] == R_CATEGORY]
columns_inexact = [column for column in df if dtypes_raw[column] not in [R_INT, R_FLOAT, R_CATEGORY]]
memory_usage = df.memory_usage()
if columns_category:
for column in columns_category:
num_categories = len(df[column].cat.categories)
num_categories_sample = math.ceil(sample_ratio * num_categories)
sample_ratio_cat = num_categories_sample / num_categories
memory_usage[column] = df[column].cat.codes.dtype.itemsize * num_rows + df[column].cat.categories[:num_categories_sample].memory_usage(deep=True) / sample_ratio_cat
if columns_inexact:
# this line causes NumExpr log, suspend_logging is used to hide the log.
memory_usage_inexact = df[columns_inexact].head(num_rows_sample).memory_usage(deep=True)[columns_inexact] / sample_ratio
memory_usage = memory_usage_inexact.combine_first(memory_usage)
return memory_usage
| 41.634615
| 180
| 0.703002
|
acc5521ab856c28a12c8d1f86487836fab5e2b64
| 4,155
|
py
|
Python
|
ask-smapi-model/ask_smapi_model/v1/skill/interaction_model/version/catalog_version_data.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | null | null | null |
ask-smapi-model/ask_smapi_model/v1/skill/interaction_model/version/catalog_version_data.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | null | null | null |
ask-smapi-model/ask_smapi_model/v1/skill/interaction_model/version/catalog_version_data.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_smapi_model.v1.skill.interaction_model.version.input_source import InputSourceV1
class CatalogVersionData(object):
"""
Catalog version data with metadata.
:param source:
:type source: (optional) ask_smapi_model.v1.skill.interaction_model.version.input_source.InputSource
:param description: Description string for specific catalog version.
:type description: (optional) str
:param version: Specific catalog version.
:type version: (optional) str
"""
deserialized_types = {
'source': 'ask_smapi_model.v1.skill.interaction_model.version.input_source.InputSource',
'description': 'str',
'version': 'str'
} # type: Dict
attribute_map = {
'source': 'source',
'description': 'description',
'version': 'version'
} # type: Dict
supports_multiple_types = False
def __init__(self, source=None, description=None, version=None):
# type: (Optional[InputSourceV1], Optional[str], Optional[str]) -> None
"""Catalog version data with metadata.
:param source:
:type source: (optional) ask_smapi_model.v1.skill.interaction_model.version.input_source.InputSource
:param description: Description string for specific catalog version.
:type description: (optional) str
:param version: Specific catalog version.
:type version: (optional) str
"""
self.__discriminator_value = None # type: str
self.source = source
self.description = description
self.version = version
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, CatalogVersionData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 33.508065
| 108
| 0.611071
|
589a4ebff241f508f6d4b05646dcc54758230db0
| 2,485
|
py
|
Python
|
genie/models/utils.py
|
epfl-dlab/GenIE
|
62ae6af936c9375c36d3d5ad60401bf579875bd9
|
[
"MIT"
] | 8
|
2022-02-08T11:12:37.000Z
|
2022-03-16T08:27:50.000Z
|
genie/models/utils.py
|
epfl-dlab/GenIE
|
62ae6af936c9375c36d3d5ad60401bf579875bd9
|
[
"MIT"
] | 1
|
2022-03-07T07:36:24.000Z
|
2022-03-07T20:58:12.000Z
|
genie/models/utils.py
|
epfl-dlab/GenIE
|
62ae6af936c9375c36d3d5ad60401bf579875bd9
|
[
"MIT"
] | 7
|
2022-02-22T22:48:35.000Z
|
2022-03-18T05:18:30.000Z
|
import numpy as np
import torch
import pickle
def label_smoothed_nll_loss(
lprobs: torch.Tensor,
target: torch.Tensor,
target_attention_mask: torch.Tensor,
epsilon: float,
ignore_index: int = None,
reduce: bool = True,
):
# target.shape -> batch_size x tgt_seq_length; lprobs.shape -> batch_size x tgt_seq_length x vocabulary_size
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1) # target.shape -> batch_size x tgt_seq_length x 1
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
target.clamp_min_(0)
nll_loss = -lprobs.gather(dim=-1, index=target) # get the log prob terms corresponding to the target indices
smooth_loss = -lprobs.sum(dim=-1, keepdim=True) # calculations needed for the smoothed loss
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
else:
# TODO: make sure that the dimensions here match i.e. do you actually want the squeeze??
# TODO: If anywhere, shouldn't it be outside? (see below)
nll_loss = -lprobs.gather(dim=-1, index=target)
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
# nll_loss = nll_loss.squeeze(-1)
# smooth_loss = smooth_loss.squeeze(-1)
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
# TODO: Why is "the number of classes" (lprobs.size(-1) - 1)? What is the -1 for? Is it padding?
eps_i = epsilon / (lprobs.size(-1) - 1)
# TODO: Check correctness. shouldn't it be loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
# loss = (1.0 - epsilon - eps_i) * nll_loss + eps_i * smooth_loss
loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
# Normalize the loss by diving with the number of non-padded
num_tokens = target_attention_mask.sum()
loss, nll_loss = loss / num_tokens, nll_loss / num_tokens
return loss, nll_loss
def set_seed(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def chunk_it(seq, num=1):
assert num > 0
chunk_len = len(seq) // num
chunks = [seq[i * chunk_len : i * chunk_len + chunk_len] for i in range(num)]
diff = len(seq) - chunk_len * num
for i in range(diff):
chunks[i].append(seq[chunk_len * num + i])
return chunks
| 34.041096
| 117
| 0.655936
|
9fd0e190a5f9922db8644d601d10fec58678d34a
| 4,908
|
py
|
Python
|
net/fpn_net_lite_old.py
|
YoungYoung619/Low-light-scene-augmentation
|
72a4124dfacbec86835ea9e102bc4679800c0627
|
[
"Apache-2.0"
] | 8
|
2019-06-11T06:30:38.000Z
|
2021-11-04T01:59:27.000Z
|
net/fpn_net_lite_old.py
|
YoungYoung619/Low-light-scene-augmentation
|
72a4124dfacbec86835ea9e102bc4679800c0627
|
[
"Apache-2.0"
] | 1
|
2020-05-28T06:31:17.000Z
|
2020-05-28T06:31:17.000Z
|
net/fpn_net_lite_old.py
|
YoungYoung619/Low-light-scene-augmentation
|
72a4124dfacbec86835ea9e102bc4679800c0627
|
[
"Apache-2.0"
] | 1
|
2020-11-16T07:28:06.000Z
|
2020-11-16T07:28:06.000Z
|
import tensorflow as tf
from net.lite_utils.conv_block import backbone_lite
slim = tf.contrib.slim
def group_norm(x, G=32, esp=1e-5):
# normalize
# tranpose: [bs, h, w, c] to [bs, c, h, w] following the paper
x = tf.transpose(x, [0, 3, 1, 2])
N, C, H, W = x.get_shape().as_list()
G = min(G, C)
x = tf.reshape(x, [-1, G, C // G, H, W])
mean, var = tf.nn.moments(x, [2, 3, 4], keep_dims=True)
x = (x - mean) / tf.sqrt(var + esp)
# per channel gamma and beta
gamma = tf.Variable(tf.constant(1.0, shape=[C]), dtype=tf.float32, name='gamma')
beta = tf.Variable(tf.constant(0.0, shape=[C]), dtype=tf.float32, name='beta')
gamma = tf.reshape(gamma, [1, C, 1, 1])
beta = tf.reshape(beta, [1, C, 1, 1])
output = tf.reshape(x, [-1, C, H, W]) * gamma + beta
# tranpose: [bs, c, h, w, c] to [bs, h, w, c] following the paper
output = tf.transpose(output, [0, 2, 3, 1])
return output
def lrelu(x):
return tf.maximum(x * 0.2, x)
def fpn_net_lite(input, is_training, norm_type='gn'): # Unet
endpoints = backbone_lite(input, is_training)
conv1 = endpoints['conv_1']
conv2 = endpoints['conv_2']
conv3 = endpoints['conv_3']
conv4 = endpoints['conv_4']
conv5 = endpoints['conv_5']
attention_2 = tf.reduce_sum(conv2, axis=-1, keep_dims=True)
attention_3 = tf.reduce_sum(conv3, axis=-1, keep_dims=True)
imitation_3_for_2 = tf.image.resize(attention_3, size=tuple(attention_2.get_shape().as_list()[1:3]))
attention_4 = tf.reduce_sum(conv4, axis=-1, keep_dims=True)
imitation_4_for_3 = tf.image.resize(attention_4, size=tuple(attention_3.get_shape().as_list()[1:3]))
attention_5 = tf.reduce_sum(conv5, axis=-1, keep_dims=True)
imitation_5_for_4 = tf.image.resize(attention_5, size=tuple(attention_4.get_shape().as_list()[1:3]))
## FPN
fpn_channel = 128
up5 = slim.conv2d(conv5, fpn_channel, [1, 1], rate=1, activation_fn=None, scope='fpn_conv5')
## expand the height, width, and combine with the conv4
f_size = conv4.get_shape().as_list()[1:3]
fpn_feat6 = tf.image.resize(up5, f_size)
fpn_conv4 = slim.conv2d(conv4, fpn_channel, [1, 1], rate=1, activation_fn=None, scope='fpn_conv4')
up6 = fpn_feat6 + fpn_conv4
up6 = group_norm(up6)
up6 = lrelu(up6)
f_size = conv3.get_shape().as_list()[1:3]
fpn_feat7 = tf.image.resize(up6, f_size)
fpn_conv3 = slim.conv2d(conv3, fpn_channel, [1, 1], rate=1, activation_fn=None, scope='fpn_conv3')
up7 = fpn_feat7 + fpn_conv3
up7 = group_norm(up7)
up7 = lrelu(up7)
f_size = conv2.get_shape().as_list()[1:3]
fpn_feat8 = tf.image.resize(up7, f_size)
fpn_conv2 = slim.conv2d(conv2, fpn_channel, [1, 1], rate=1, activation_fn=None, scope='fpn_conv2')
up8 = fpn_feat8 + fpn_conv2
up8 = group_norm(up8)
up8 = lrelu(up8)
f_size = conv1.get_shape().as_list()[1:3]
fpn_feat9 = tf.image.resize(up8, f_size)
fpn_conv1 = slim.conv2d(conv1, fpn_channel, [1, 1], rate=1, activation_fn=None, scope='fpn_conv1')
up9 = fpn_feat9 + fpn_conv1
up9 = group_norm(up9)
up9 = lrelu(up9)
conv9 = slim.conv2d(up9, 128, [1, 1], rate=1, activation_fn=None, scope='g_conv9_1')
conv9 = group_norm(conv9)
conv9 = lrelu(conv9)
conv9 = slim.conv2d(conv9, 128, [3, 3], rate=1, activation_fn=None, scope='g_conv9_2')
conv9 = group_norm(conv9)
conv9 = lrelu(conv9)
conv10 = slim.conv2d(conv9, 32, [1, 1], rate=1, activation_fn=None, scope='g_conv10')
conv10 = group_norm(conv10)
conv10 = lrelu(conv10)
conv11 = slim.conv2d(conv10, 32, [3, 3], rate=1, activation_fn=None, scope='g_conv11')
conv11 = group_norm(conv11)
conv11 = lrelu(conv11)
conv12 = slim.conv2d(conv11, 3, [3, 3], rate=1, activation_fn=None, scope='g_conv12')
conv12 = group_norm(conv12)
conv12 = lrelu(conv12)
conv13 = slim.conv2d(conv12, 3, [1, 1], rate=1, activation_fn=None, scope='g_conv13')
conv13 = group_norm(conv13)
conv13 = tf.sigmoid(conv13)
attentions = [attention_2, attention_3, attention_4, attention_5]
self_attention_pairs = [(attention_2, imitation_3_for_2), (attention_3, imitation_4_for_3), (attention_4, imitation_5_for_4)]
return conv13, self_attention_pairs, attentions
def stats_graph(graph):
"""this would include the init ops FLOPs"""
flops = tf.profiler.profile(graph, options=tf.profiler.ProfileOptionBuilder.float_operation())
params = tf.profiler.profile(graph, options=tf.profiler.ProfileOptionBuilder.trainable_variables_parameter())
print('FLOPs: {}; Trainable params: {}'.format(flops.total_float_ops, params.total_parameters))
if __name__ == '__main__':
with tf.Graph().as_default() as graph:
input = tf.placeholder(shape=[None, 139*2, 209*2, 3], dtype=tf.float32)
out = fpn_net_lite(input, is_training=False)
stats_graph(graph)
pass
| 40.229508
| 129
| 0.666259
|
9b26261a914cf28bf9d214d6691011717bcc892e
| 972
|
py
|
Python
|
project/urls.py
|
Kibet1816/Neighbourhood
|
fcfa7e23910f35085201440a537785c8bff363c9
|
[
"MIT"
] | null | null | null |
project/urls.py
|
Kibet1816/Neighbourhood
|
fcfa7e23910f35085201440a537785c8bff363c9
|
[
"MIT"
] | 6
|
2020-06-05T22:19:07.000Z
|
2022-03-11T23:55:52.000Z
|
project/urls.py
|
Kibet1816/Neighbourhood
|
fcfa7e23910f35085201440a537785c8bff363c9
|
[
"MIT"
] | null | null | null |
"""project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from django.contrib.auth import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'',include('app.urls')),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^logout/$', views.logout, {"next_page": '/'}),
]
| 36
| 79
| 0.693416
|
680a701631db69d3d7e589284845dfe3cdcaf388
| 330
|
py
|
Python
|
leapyear.py
|
SUBHAROOP/Learning-Python
|
83a28a5e71ed3ab33c1510ba90b62559d241039e
|
[
"MIT"
] | null | null | null |
leapyear.py
|
SUBHAROOP/Learning-Python
|
83a28a5e71ed3ab33c1510ba90b62559d241039e
|
[
"MIT"
] | null | null | null |
leapyear.py
|
SUBHAROOP/Learning-Python
|
83a28a5e71ed3ab33c1510ba90b62559d241039e
|
[
"MIT"
] | null | null | null |
a=int(input('Enter the Year:'))
if (a % 4 ==0):
if (a % 100 == 0):
if ( a % 400 == 0):
print('{0}Leap year'.format(a))
else:
print('{0} is not Leap year'.format(a))
else:
print('{0} is Leap year'.format(a))
else:
print('{0} is not Leap year'.format(a))
| 25.384615
| 53
| 0.448485
|
f4898e7b9dde1d3b0f675fb1efbac40a3f38500d
| 3,680
|
py
|
Python
|
huggingface/inferentia/question_answering/model.py
|
yeahhhhhhhhhh/djl-demo
|
261b30541a966561fba9a91a3c6f7d5dca1f5e69
|
[
"Apache-2.0"
] | 44
|
2021-04-21T14:22:39.000Z
|
2022-03-28T05:43:43.000Z
|
huggingface/inferentia/question_answering/model.py
|
yeahhhhhhhhhh/djl-demo
|
261b30541a966561fba9a91a3c6f7d5dca1f5e69
|
[
"Apache-2.0"
] | 9
|
2021-04-21T18:23:30.000Z
|
2022-01-17T05:14:42.000Z
|
huggingface/inferentia/question_answering/model.py
|
yeahhhhhhhhhh/djl-demo
|
261b30541a966561fba9a91a3c6f7d5dca1f5e69
|
[
"Apache-2.0"
] | 22
|
2021-04-23T02:33:19.000Z
|
2022-02-23T05:43:38.000Z
|
#!/usr/bin/env python
#
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. See the License for
# the specific language governing permissions and limitations under the License.
import logging
import os
import torch
import torch_neuron
from djl_python import Input
from djl_python import Output
from transformers import AutoTokenizer
class QuestionAnswering(object):
def __init__(self):
self.max_length = 128
self.device = None
self.model = None
self.tokenizer = None
self.initialized = False
def initialize(self, properties: dict):
visible_cores = os.getenv("NEURON_RT_VISIBLE_CORES")
logging.info("NEURON_RT_VISIBLE_CORES: " + visible_cores)
device_id = properties.get("device_id")
device_id = "cpu" if device_id == "-1" else "cuda:" + device_id
self.device = torch.device(device_id)
self.model = torch.jit.load("question_answering.pt").to(self.device)
self.tokenizer = AutoTokenizer.from_pretrained(os.getcwd(), do_lower_case=True)
self.initialized = True
def inference(self, inputs: Input):
try:
data = inputs.get_as_json()
question = data["question"]
paragraph = data["paragraph"]
tokens = self.tokenizer.encode_plus(question,
paragraph,
max_length=self.max_length,
truncation=True,
padding='max_length',
add_special_tokens=True,
return_tensors="pt")
input_ids = tokens["input_ids"].to(self.device)
attention_mask = tokens["attention_mask"].to(self.device)
inferences = []
out = self.model(input_ids, attention_mask)
answer_start_scores = out[0]
answer_end_scores = out[1]
num_rows, num_cols = answer_start_scores.shape
for i in range(num_rows):
answer_start_scores_one_seq = answer_start_scores[i].unsqueeze(0)
answer_start = torch.argmax(answer_start_scores_one_seq)
answer_end_scores_one_seq = answer_end_scores[i].unsqueeze(0)
answer_end = torch.argmax(answer_end_scores_one_seq) + 1
token_id = self.tokenizer.convert_ids_to_tokens(input_ids[i].tolist()[answer_start:answer_end])
prediction = self.tokenizer.convert_tokens_to_string(token_id)
inferences.append(prediction)
outputs = Output()
outputs.add_as_json(inferences)
except Exception as e:
logging.error(e, exc_info=True)
# error handling
outputs = Output(code=500, message=str(e))
outputs.add("inference failed", key="data")
return outputs
_model = QuestionAnswering()
def handle(inputs: Input):
"""
Default handler function
"""
if not _model.initialized:
_model.initialize(inputs.get_properties())
if inputs.is_empty():
# initialization request
return None
return _model.inference(inputs)
| 36.435644
| 111
| 0.61413
|
4095094fbb70cfbac18fcc1432f06dc749deb4ff
| 12,509
|
py
|
Python
|
recognition/scripts/face_detection_node.py
|
aaramirezd/open_ptrack_v2
|
2c4c29ee72450f67a9f4ea777d1f4b8e3dad7358
|
[
"BSD-3-Clause"
] | null | null | null |
recognition/scripts/face_detection_node.py
|
aaramirezd/open_ptrack_v2
|
2c4c29ee72450f67a9f4ea777d1f4b8e3dad7358
|
[
"BSD-3-Clause"
] | null | null | null |
recognition/scripts/face_detection_node.py
|
aaramirezd/open_ptrack_v2
|
2c4c29ee72450f67a9f4ea777d1f4b8e3dad7358
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import cv2
import sys, math
import dlib
import datetime
import numpy
import cProfile
import multiprocessing
import tf
import rospy
import rospkg
import cv_bridge
import message_filters
from std_msgs.msg import *
from sensor_msgs.msg import *
from geometry_msgs.msg import *
from opt_msgs.msg import *
from dynamic_reconfigure.server import Server
from recognition.cfg import FaceDetectionConfig
import recognition_utils as recutils
def timestampMs():
return float((datetime.datetime.utcnow() - datetime.datetime(1970,1,1)).total_seconds() * 1000)
# this node performs 2D face detection on the ROIs calculated from the people detection result
# it replaces the DetectionArray/detections/box_2D with the detected face regions while do not change the other members of the DetectionArray
# then, it outputs the modified DetectionArray as an array of face detection results
# note that the replaced box_2d is on the color image coordinate
class FaceDetectionNode:
def __init__(self, sensor_name):
self.sensor_name = sensor_name
self.cfg_server = Server(FaceDetectionConfig, self.cfg_callback)
self.cv_bridge = cv_bridge.CvBridge()
self.detector = dlib.fhog_object_detector(self.face_detector_path)
self.pool = multiprocessing.Pool(3)
# get transformation between world, color, and depth images
now = rospy.Time(0)
tf_listener = tf.TransformListener()
print self.sensor_name
self.ir2rgb = recutils.lookupTransform(tf_listener, self.sensor_name + '_ir_optical_frame', self.sensor_name + '_rgb_optical_frame', 10.0, now)
# self.ir2rgb = numpy.eye(4, 4).astype(numpy.float64)
print '--- ir2rgb ---\n', self.ir2rgb
self.world2rgb = recutils.lookupTransform(tf_listener, '/world', self.sensor_name + '_rgb_optical_frame', 10.0, now)
print '--- world2rgb ---\n', self.world2rgb
self.pub = rospy.Publisher('/face_detector/detections', DetectionArray, queue_size=10)
self.pub_local = rospy.Publisher(self.sensor_name + '/face_detector/detections', DetectionArray, queue_size=10)
try:
print 'tryingnsecs_round to listen raw rgb image topic...'
rospy.client.wait_for_message(self.sensor_name + '/rgb/image', Image, 1.0)
img_subscriber = message_filters.Subscriber(self.sensor_name + '/rgb/image', Image)
except rospy.ROSException:
print 'failed, listen compressed rgb image topic'
img_subscriber = message_filters.Subscriber(self.sensor_name + '/rgb/image/compressed', CompressedImage)
self.subscribers = [
img_subscriber,
message_filters.Subscriber(self.sensor_name + '/rgb/camera_info', CameraInfo),
message_filters.Subscriber('/detector/detections', DetectionArray)
]
# TypeSynchronizer doesn't work, the image time and the detection time are slightly different?
# self.ts = message_filters.TimeSynchronizer(self.subscribers, 5)
# self.ts = message_filters.ApproximateTimeSynchronizer(self.subscribers, 5, 0.0001)
self.ts = recutils.TimeSynchronizer(self.subscribers, 60, 1000)
self.ts.registerCallback(self.callback)
self.reset_time_sub = rospy.Subscriber('/reset_time', Empty, self.reset_time)
print("init complete")
# callback for dynamic configure
def cfg_callback(self, config, level):
package_path = rospkg.RosPack().get_path('recognition')
self.face_detector_path = package_path + config.face_detector_path # the path to the face detector model file
self.confidence_thresh = config.confidence_thresh # the threshold for confidence of face detection
self.roi_width = config.roi_width_ # the width of a face detection ROI in the world space [m]
self.calc_roi_from_top = config.calc_roi_from_top # if true, ROIs are calculated from the top positions of detected clusters
self.head_offset_z_top = config.head_offset_z_top # the distance between the top position of a human cluster and the center of the face [m]
self.head_offset_z_centroid = config.head_offset_z_centroid # the distance between the centroid of a human cluster and the center of the face [m]
self.upscale_minsize = config.upscale_minsize # the face detection ROI is upscaled so that its width get larger than #upscale_minsize
self.visualization = config.visualization # if true, the visualization of the detection will be shown
print '--- cfg_callback ---'
print 'confidence_thresh', config.confidence_thresh
print 'roi_width', config.roi_width_
print 'calc_roi_from_top', config.calc_roi_from_top
print 'head_offset_z_top', config.head_offset_z_top
print 'head_offset_z_centroid', config.head_offset_z_centroid
print 'upscale_minsize', config.upscale_minsize
print 'visualization', config.visualization
return config
def reset_time(self, msg):
print 'reset time'
self.ts = message_filters.ApproximateTimeSynchronizer(self.subscribers, 200, 0.00001)
self.ts.registerCallback(self.callback)
# callback
def callback(self, rgb_image_msg, rgb_info_msg, detection_msg):
if detection_msg.header.frame_id != self.sensor_name + '_ir_optical_frame':
print 'frame_ids not matched'
return
t1 = rospy.Time.now()
# read rgb image
if type(rgb_image_msg) is CompressedImage:
rgb_image = recutils.decompress(rgb_image_msg)
else:
rgb_image = self.cv_bridge.imgmsg_to_cv2(rgb_image_msg)
#gray_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2GRAY)
# calculate ROIs and then run the 2D face detector
rois = self.calc_rois(rgb_info_msg, detection_msg)
faces = map(lambda x: self.detect_face(rgb_image, x), rois)
#print(len(faces))
#print(faces)
# publish the face detection result
for face, detection in zip(faces, detection_msg.detections):
if face is None:
detection.box_2D = BoundingBox2D(x=0, y=0, width=0, height=0)
else:
detection.box_2D = BoundingBox2D(x=face[0], y=face[1], width=face[2]-face[0], height=face[3]-face[1])
self.pub.publish(detection_msg)
self.pub_local.publish(detection_msg)
t2 = rospy.Time.now()
#if self.visualization:
# self.visualize(rgb_image, rois, faces, (t2 - t1).to_sec())
def improve(self, rgb_image):
if numpy.amax(rgb_image) > 1:
info = numpy.iinfo(rgb_image.dtype)
rgb_image = rgb_image.astype(numpy.float) / info.max
"""
sz = rgb_image.shape
B = rgb_image[:,:,0]
G = rgb_image[:,:,1]
R = rgb_image[:,:,2]
B = numpy.pad(B, (10,), 'edge')
G = numpy.pad(G, (10,), 'edge')
R = numpy.pad(R, (10,), 'edge')
rgb_image = numpy.dstack((B, G, R))
"""
w = 0.8
Inv = 1 - rgb_image
B = Inv[:,:,0]
G = Inv[:,:,1]
R = Inv[:,:,2]
B1 = numpy.ravel(B)
G1 = numpy.ravel(G)
R1 = numpy.ravel(R)
I = (B1 + G1 + R1) / 3
n = I.size
N = math.floor(n * 0.002)
Be = cv2.erode(B, numpy.ones((7,7), numpy.uint8), 1)
Ge = cv2.erode(G, numpy.ones((7,7), numpy.uint8), 1)
Re = cv2.erode(R, numpy.ones((7,7), numpy.uint8), 1)
dc = numpy.minimum(numpy.minimum(numpy.ravel(Be), numpy.ravel(Ge)), numpy.ravel(Re))
i = numpy.argsort(-dc)
tmp = I[i[0:int(N)]]
j = numpy.argsort(-tmp)
Ab = B1[i[j[0]]]
Ag = G1[i[j[0]]]
Ar = R1[i[j[0]]]
t = numpy.maximum(1 - w * numpy.minimum(numpy.minimum(Re/Ar, Ge/Ag), Be/Ab), 10**(-7))
lc = t < 0.5
t[lc] = 2 * t[lc]**2
Sb = (B - Ab) / t + Ab
Sg = (G - Ag) / t + Ag
Sr = (R - Ar) / t + Ar
Sb = numpy.clip(Sb, 0, 1)
Sg = numpy.clip(Sg, 0, 1)
Sr = numpy.clip(Sr, 0, 1)
comb = numpy.dstack((Sb, Sg, Sr))
out = numpy.uint8((1 - comb)*255.999) #[11:sz[0]+10, 11:sz[1]+10, :]
#cv2.imwrite('improved.png', out)
return out
# visualizes the detection result
def visualize(self, rgb_image, rois, faces, processing_time):
for roi in rois:
cv2.rectangle(rgb_image, (roi[0], roi[1]), (roi[2], roi[3]), (0, 255, 0), 4)
for face in faces:
if face is None:
continue
cv2.rectangle(rgb_image, (face[0], face[1]), (face[2], face[3]), (0, 0, 255), 4)
factor = rgb_image.shape[1] / 480
rgb_image = cv2.resize(rgb_image, (rgb_image.shape[1]/factor, rgb_image.shape[0]/factor))
text = '%.2fmsec / %.2ffps' % (processing_time * 1000.0, (1.0 / max(processing_time, 0.0001)))
cv2.putText(rgb_image, text, (10, 15), cv2.FONT_HERSHEY_PLAIN, 0.8, (64, 64, 64), 3)
cv2.putText(rgb_image, text, (10, 15), cv2.FONT_HERSHEY_PLAIN, 0.8, (255, 255, 255))
cv2.imshow('rgb_image', rgb_image)
cv2.waitKey(30)
# calculate ROIs from the top positions of human clusters
def calc_rois(self, rgb_info_msg, detection_msg):
# the vector between the top position of a cluster and the center of the face
head_offset_z = self.head_offset_z_top if self.calc_roi_from_top else self.head_offset_z_centroid
head_offset = numpy.dot(self.world2rgb[:3, :3], [0, 0, head_offset_z])
head_offset = numpy.array([head_offset[0], head_offset[1], head_offset[2], 0.0])
# calculate the face positions from the detected clusters
face_positions = []
for detection in detection_msg.detections:
if self.calc_roi_from_top:
top_pt = [detection.top.x, detection.top.y, detection.top.z, 1.0]
else:
top_pt = [detection.centroid.x, detection.centroid.y, detection.centroid.z, 1.0]
top_pt += head_offset
face_positions.append(top_pt)
if len(face_positions) == 0:
return []
# transform the positions from the IR coordinate to the RGB coordinate
face_positions = numpy.transpose(numpy.dot(self.ir2rgb, numpy.transpose(face_positions))[:3, :])
# project the face positions on the image
rvec = numpy.array([0, 0, 0], dtype=numpy.float64)
tvec = numpy.array([0, 0, 0], dtype=numpy.float64)
camera_matrix = numpy.array(rgb_info_msg.K, dtype=numpy.float64).reshape(3, 3)
distortion = numpy.array(rgb_info_msg.D, dtype=numpy.float64)
projected = cv2.projectPoints(face_positions.astype(numpy.float64), rvec, tvec, camera_matrix, distortion)[0]
# calculate the ROIs
rois = []
for i in range(len(detection_msg.detections)):
roi = self.calc_roi(rgb_info_msg, self.roi_width, face_positions[i], projected[i, 0, :])
rois.append(roi)
return rois
# calculate ROI from a 3D position
def calc_roi(self, rgb_info_msg, w, xyz, uv):
# project the roi_width from the world coordinate[m] to the image coordinate[pix]
half_w = w * rgb_info_msg.K[0] / xyz[2]
left = int(uv[0] - half_w)
top = int(uv[1] - half_w)
right = int(uv[0] + half_w)
bottom = int(uv[1] + half_w)
# ROI range check
left = min(rgb_info_msg.width, max(0, left))
top = min(rgb_info_msg.height, max(0, top))
right = min(rgb_info_msg.width, max(0, right))
bottom = min(rgb_info_msg.height, max(0, bottom))
width = max(0, right - left)
height = max(0, bottom - top)
return (left, top, left + width, top + height)
# detect a face on an ROI
# return None if no face is detected
def detect_face(self, gray_image, roi_rect):
# check if the ROI is valid
print(roi_rect)
if roi_rect[2] <= roi_rect[0] or roi_rect[3] <= roi_rect[1]:
return None
# detection
roi = gray_image[roi_rect[1]:roi_rect[3], roi_rect[0]:roi_rect[2], :]
# roi = roi.reshape(roi.shape[0], roi.shape[1]).astype(numpy.uint8)
roi = self.improve(roi)
roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
scaling_factor = 1.0
#width = roi_rect[2] - roi_rect[0]
#if width < self.upscale_minsize:
# scaling_factor = float(width) / self.upscale_minsize
# roi = cv2.resize(roi, (self.upscale_minsize, self.upscale_minsize))
detected, scores, idx = self.detector.run(roi, 0, self.confidence_thresh)
if len(detected) <= 0:
return None
return (
int(scaling_factor * detected[0].left()) + roi_rect[0],
int(scaling_factor * detected[0].top()) + roi_rect[1],
int(scaling_factor * detected[0].right()) + roi_rect[0],
int(scaling_factor * detected[0].bottom()) + roi_rect[1]
)
def improve_faster(self, rgb_image): #but worse
B = rgb_image[:,:,0]
G = rgb_image[:,:,1]
R = rgb_image[:,:,2]
im_max = numpy.amax(rgb_image)
sh = rgb_image.shape
mean = numpy.sum(.114*B + .587*G + .299*R) / (sh[0] * sh[1])
p0 = mean * .035 #1.6
if p0 < 2:
p0 = 2
p1 = -0.018
alph = p0 + p1 * mean
B_int = 160.4 / (im_max + 15.81)
B_imp = alph * B_int * Ib
G_int = 179.3 / (im_max + 15.42)
G_imp = alph * G_int * Ig
R_int = 170.7 / (im_max + 15.49)
R_imp = alph * R_int * Ir
out = numpy.dstack((B_imp, G_imp, R_imp))
return out
def main():
sensor_name = '/kinect2_head' if len(sys.argv) < 2 else '/' + sys.argv[1]
print 'sensor_name', sensor_name
rospy.init_node('face_detection_node_' + sensor_name[1:])
node = FaceDetectionNode(sensor_name)
rospy.spin()
if __name__ == '__main__':
main()
| 35.638177
| 148
| 0.706371
|
d93c7a6e502feaef3ac8f8b041dee076c6531e46
| 576
|
py
|
Python
|
webapp/api/api/migrations/0063_auto_20220606_1107.py
|
CogStack/MedCATweb
|
8a0e751769e019502ff0fa4565f21a433a339eb4
|
[
"Apache-2.0"
] | 2
|
2019-07-18T05:48:49.000Z
|
2019-07-19T00:27:26.000Z
|
webapp/api/api/migrations/0063_auto_20220606_1107.py
|
CogStack/MedCATweb
|
8a0e751769e019502ff0fa4565f21a433a339eb4
|
[
"Apache-2.0"
] | null | null | null |
webapp/api/api/migrations/0063_auto_20220606_1107.py
|
CogStack/MedCATweb
|
8a0e751769e019502ff0fa4565f21a433a339eb4
|
[
"Apache-2.0"
] | 2
|
2019-07-16T17:07:00.000Z
|
2019-08-12T14:06:26.000Z
|
# Generated by Django 2.2.24 on 2022-06-06 11:07
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0062_auto_20220426_1558'),
]
operations = [
migrations.AlterField(
model_name='conceptdb',
name='name',
field=models.CharField(blank=True, default='', max_length=100, validators=[django.core.validators.RegexValidator('^[0-9a-zA-Z_-]*$', 'Only alpahanumeric characters, -, _ are allowed for CDB names')]),
),
]
| 28.8
| 212
| 0.645833
|
e10b56bb6de8dd0b0934537d2c9fa8097d7fc03d
| 11,636
|
py
|
Python
|
clients/kratos/python/ory_kratos_client/model/inline_response503.py
|
ory/sdk-generator
|
958314d130922ad6f20f439b5230141a832231a5
|
[
"Apache-2.0"
] | null | null | null |
clients/kratos/python/ory_kratos_client/model/inline_response503.py
|
ory/sdk-generator
|
958314d130922ad6f20f439b5230141a832231a5
|
[
"Apache-2.0"
] | null | null | null |
clients/kratos/python/ory_kratos_client/model/inline_response503.py
|
ory/sdk-generator
|
958314d130922ad6f20f439b5230141a832231a5
|
[
"Apache-2.0"
] | null | null | null |
"""
Ory Kratos API
Documentation for all public and administrative Ory Kratos APIs. Public and administrative APIs are exposed on different ports. Public APIs can face the public internet without any protection while administrative APIs should never be exposed without prior authorization. To protect the administative API port you should use something like Nginx, Ory Oathkeeper, or any other technology capable of authorizing incoming requests. # noqa: E501
The version of the OpenAPI document: v0.10.1
Contact: hi@ory.sh
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from ory_kratos_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from ory_kratos_client.exceptions import ApiAttributeError
class InlineResponse503(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'errors': ({str: (str,)},), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'errors': 'errors', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, errors, *args, **kwargs): # noqa: E501
"""InlineResponse503 - a model defined in OpenAPI
Args:
errors ({str: (str,)}): Errors contains a list of errors that caused the not ready status.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.errors = errors
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, errors, *args, **kwargs): # noqa: E501
"""InlineResponse503 - a model defined in OpenAPI
Args:
errors ({str: (str,)}): Errors contains a list of errors that caused the not ready status.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.errors = errors
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 44.243346
| 446
| 0.579151
|
fb933a0f53f9026bcbba5da216d3b8ee11958120
| 7,611
|
py
|
Python
|
tests/core/foundation_tests.py
|
LTD-Beget/cement
|
e885932583c14037599a2aa8a16d4d8a521364bd
|
[
"BSD-3-Clause"
] | null | null | null |
tests/core/foundation_tests.py
|
LTD-Beget/cement
|
e885932583c14037599a2aa8a16d4d8a521364bd
|
[
"BSD-3-Clause"
] | null | null | null |
tests/core/foundation_tests.py
|
LTD-Beget/cement
|
e885932583c14037599a2aa8a16d4d8a521364bd
|
[
"BSD-3-Clause"
] | null | null | null |
"""Tests for cement.core.setup."""
import os
import sys
from cement.core import foundation, exc, backend, config, extension, plugin
from cement.core import log, output, handler, hook, arg, controller
from cement.utils import test
from cement.utils.misc import init_defaults
def my_extended_func():
return 'KAPLA'
class DeprecatedApp(foundation.CementApp):
class Meta:
label = 'deprecated'
defaults = None
class TestOutputHandler(output.CementOutputHandler):
file_suffix = None
class Meta:
interface = output.IOutput
label = 'test_output_handler'
def _setup(self, config_obj):
self.config = config_obj
def render(self, data_dict, template=None):
return None
class BogusBaseController(controller.CementBaseController):
class Meta:
label = 'bad_base_controller_label'
def my_hook_one(app):
return 1
def my_hook_two(app):
return 2
def my_hook_three(app):
return 3
class FoundationTestCase(test.CementCoreTestCase):
def setUp(self):
self.app = self.make_app('my_app')
def test_argv_is_none(self):
app = self.make_app('myapp', argv=None)
app.setup()
self.eq(app.argv, list(sys.argv[1:]))
def test_bootstrap(self):
app = self.make_app('my_app', bootstrap='tests.bootstrap')
app.setup()
self.eq(app._loaded_bootstrap.__name__, 'tests.bootstrap')
def test_reload_bootstrap(self):
app = self.make_app('my_app', bootstrap='cement.utils.test')
app._loaded_bootstrap = test
app.setup()
self.eq(app._loaded_bootstrap.__name__, 'cement.utils.test')
def test_argv(self):
app = self.make_app('my_app', argv=['bogus', 'args'])
self.eq(app.argv, ['bogus', 'args'])
@test.raises(exc.FrameworkError)
def test_resolve_handler_bad_handler(self):
class Bogus(object):
pass
try:
self.app._resolve_handler('output', Bogus)
except exc.FrameworkError as e:
self.ok(e.msg.find('resolve'))
raise
def test_default(self):
self.app.setup()
self.app.run()
def test_passed_handlers(self):
from cement.ext import ext_configparser
from cement.ext import ext_logging
from cement.ext import ext_argparse
from cement.ext import ext_plugin
from cement.ext import ext_nulloutput
# forces CementApp._resolve_handler to register the handler
from cement.ext import ext_json
app = self.make_app('my-app-test',
config_handler=ext_configparser.ConfigParserConfigHandler,
log_handler=ext_logging.LoggingLogHandler(),
arg_handler=ext_argparse.ArgParseArgumentHandler(),
extension_handler=extension.CementExtensionHandler(),
plugin_handler=ext_plugin.CementPluginHandler(),
output_handler=ext_json.JsonOutputHandler(),
argv=[__file__, '--debug']
)
app.setup()
def test_debug(self):
app = self.make_app('my-app-test', argv=[__file__])
app.setup()
self.eq(app.debug, False)
self.reset_backend()
app = self.make_app('my-app-test', argv=[__file__, '--debug'])
app.setup()
self.eq(app.debug, True)
self.reset_backend()
defaults = init_defaults('my-app-test')
defaults['my-app-test']['debug'] = True
app = self.make_app('my-app-test', argv=[__file__],
config_defaults=defaults)
app.setup()
self.eq(app.debug, True)
def test_null_out(self):
null = foundation.NullOut()
null.write('nonsense')
def test_render(self):
# Render with default
self.app.setup()
self.app.render(dict(foo='bar'))
# Render with no output_handler... this is hackish, but there are
# circumstances where app.output would be None.
app = self.make_app('test', output_handler=None)
app.setup()
app.output = None
app.render(dict(foo='bar'))
@test.raises(exc.FrameworkError)
def test_bad_label(self):
try:
app = foundation.CementApp(None)
except exc.FrameworkError as e:
# FIX ME: verify error msg
raise
@test.raises(exc.FrameworkError)
def test_bad_label_chars(self):
try:
app = foundation.CementApp('some!bogus()label')
except exc.FrameworkError as e:
self.ok(e.msg.find('alpha-numeric'))
raise
def test_add_arg_shortcut(self):
self.app.setup()
self.app.add_arg('--foo', action='store')
def test_reset_output_handler(self):
app = self.make_app('test', argv=[], output_handler=TestOutputHandler)
app.setup()
app.run()
app.output = None
app._meta.output_handler = None
app._setup_output_handler()
def test_lay_cement(self):
app = self.make_app('test', argv=['--quiet'])
app = self.make_app('test', argv=['--json', '--yaml'])
def test_none_member(self):
class Test(object):
var = None
self.app.setup()
self.app.args.parsed_args = Test()
try:
self.app._parse_args()
except SystemExit:
pass
@test.raises(exc.CaughtSignal)
def test_cement_signal_handler(self):
import signal
try:
foundation.cement_signal_handler(signal.SIGTERM, 5)
except exc.CaughtSignal as e:
self.eq(e.signum, signal.SIGTERM)
self.eq(e.frame, 5)
raise
def test_cement_without_signals(self):
app = self.make_app('test', catch_signals=None)
app.setup()
def test_extend(self):
self.app.extend('kapla', my_extended_func)
self.eq(self.app.kapla(), 'KAPLA')
@test.raises(exc.FrameworkError)
def test_extended_duplicate(self):
self.app.extend('config', my_extended_func)
def test_no_handler(self):
app = self.make_app('myapp')
app._resolve_handler('cache', None, raise_error=False)
def test_config_files_is_none(self):
app = self.make_app('myapp', config_files=None)
app.setup()
label = 'myapp'
user_home = os.path.abspath(os.path.expanduser(os.environ['HOME']))
files = [
os.path.join('/', 'etc', label, '%s.conf' % label),
os.path.join(user_home, '.%s.conf' % label),
os.path.join(user_home, '.%s' % label, 'config'),
]
for f in files:
res = f in app._meta.config_files
self.ok(res)
@test.raises(exc.FrameworkError)
def test_base_controller_label(self):
app = self.make_app('myapp', base_controller=BogusBaseController)
app.setup()
def test_pargs(self):
app = self.make_app(argv=['--debug'])
app.setup()
app.run()
self.eq(app.pargs.debug, True)
def test_last_rendered(self):
self.app.setup()
output_text = self.app.render({'foo':'bar'})
last_data, last_output = self.app.last_rendered
self.eq({'foo':'bar'}, last_data)
self.eq(output_text, last_output)
def test_get_last_rendered(self):
### DEPRECATED - REMOVE AFTER THE FUNCTION IS REMOVED
self.app.setup()
output_text = self.app.render({'foo':'bar'})
last_data, last_output = self.app.get_last_rendered()
self.eq({'foo':'bar'}, last_data)
self.eq(output_text, last_output)
| 30.444
| 78
| 0.615556
|
8d96e3dec5ee1e8132cd8b11597c398e82e3563a
| 1,553
|
py
|
Python
|
geoparsing/product.py
|
sayonkumarsaha/satellite-index-earth-observation-experiement
|
83c853ef3e94fd01194af6f16eb66fb5ac191942
|
[
"MIT"
] | null | null | null |
geoparsing/product.py
|
sayonkumarsaha/satellite-index-earth-observation-experiement
|
83c853ef3e94fd01194af6f16eb66fb5ac191942
|
[
"MIT"
] | null | null | null |
geoparsing/product.py
|
sayonkumarsaha/satellite-index-earth-observation-experiement
|
83c853ef3e94fd01194af6f16eb66fb5ac191942
|
[
"MIT"
] | 1
|
2019-12-09T11:50:54.000Z
|
2019-12-09T11:50:54.000Z
|
from os.path import os, join
from granule import Granule
import time
def _substr(s, n, split_string):
""" Recursively get string between n-th and (n+1)-th occurence. Useful
for getting band information (bandinf) such as polarisation VV, VH, ...
"""
if n == 0:
return s[:s.find(split_string)]
else:
return _substr(s[s.find(split_string)+1:], n-1, split_string)
class Product():
""" Get a product, which is initialized with paths to granules. """
def __init__(self, path):
# file name of product, select only files which contain .SAFE and are n
p_fname = [f for f in os.listdir(path)][0]
# set path of product
self._path = join(path, p_fname)
# path to granules
self._granules_path = path + "/" + p_fname + "/GRANULE"
# file names of granules in granules path
granules_fnames = [g for g in os.listdir(self._granules_path) if g.find(".xml") == -1]
# paths to granules
self._granules_paths = {_substr(g_fname, 9, "_"): os.path.join(\
self._granules_path, g_fname) for g_fname in granules_fnames}
self._granules = {}
@property
def id(self):
return self._id
@property
def path(self):
return self._path
@property
def granules_ids(self):
return self._granules_paths.keys()
@property
def granules_paths(self):
return self._granules_paths
def get_granule(self, granule_id):
return Granule(self, granule_id)
| 27.732143
| 94
| 0.62009
|
31c839943dccdf450e5bb93e24e2938b061ef7ad
| 6,684
|
py
|
Python
|
Tests/Other.py
|
joel-intito/tm1py
|
42e59dcdeb70357577c19e974995936b5dbb1131
|
[
"MIT"
] | null | null | null |
Tests/Other.py
|
joel-intito/tm1py
|
42e59dcdeb70357577c19e974995936b5dbb1131
|
[
"MIT"
] | null | null | null |
Tests/Other.py
|
joel-intito/tm1py
|
42e59dcdeb70357577c19e974995936b5dbb1131
|
[
"MIT"
] | null | null | null |
import configparser
from pathlib import Path
import random
import unittest
from base64 import b64encode
from TM1py.Exceptions import TM1pyRestException, TM1pyException
from TM1py.Objects import MDXView, User
from TM1py.Services import TM1Service
from TM1py.Utils import Utils
config = configparser.ConfigParser()
config.read(Path(__file__).parent.joinpath('config.ini'))
class TestOtherMethods(unittest.TestCase):
tm1 = None
@classmethod
def setUpClass(cls):
cls.tm1 = TM1Service(**config['tm1srv01'])
@unittest.skip("Not deterministic. Needs improvement.")
def test_mdx_from_cubeview(self):
cube_names = self.tm1.cubes.get_all_names()
cube_name = cube_names[random.randrange(0, len(cube_names))]
_, public_views = self.tm1.cubes.views.get_all(cube_name=cube_name)
# if no views on cube. Recursion
if len(public_views) == 0:
self.test_mdx_from_cubeview()
else:
# random public view on random cube
view = public_views[random.randrange(0, len(public_views))]
# if random view is MDXView. Recursion
if isinstance(view, MDXView):
self.test_mdx_from_cubeview()
else:
# if native view has no dimensions on the columns. Recursion
if len(view._columns) == 0:
self.test_mdx_from_cubeview()
else:
# sum up all numeric cells in Native View
data_native_view = self.tm1.cubes.cells.get_view_content(cube_name, view.name, private=False)
sum_native_view = sum(
[float(cell['Value']) for cell in data_native_view.values() if str(cell['Value']).isdigit()])
# get mdx from native view
mdx = view.as_MDX
# sum up all numeric cells in the response of the mdx query
data_mdx = self.tm1.cubes.cells.execute_mdx(mdx)
sum_mdx = sum([float(cell['Value']) for cell in data_mdx.values() if str(cell['Value']).isdigit()])
# test it !
self.assertEqual(sum_mdx, sum_native_view)
def test_get_instances_from_adminhost(self):
servers = Utils.get_all_servers_from_adminhost(config['tm1srv01']['address'])
self.assertGreater(len(servers), 0)
def test_tm1service_with_encrypted_password_decode_b64_as_string(self):
user_name = "TM1py user name"
user = User(name=user_name, groups=["ADMIN"], password="apple")
if user.name in self.tm1.security.get_all_user_names():
self.tm1.security.delete_user(user_name)
self.tm1.security.create_user(user)
with TM1Service(
user=user.name,
password=b64encode(str.encode(user._password)),
decode_b64="True",
base_url=self.tm1._tm1_rest._base_url,
ssl=self.tm1._tm1_rest._ssl) as _:
# if no exception. Login was successful
pass
self.tm1.security.delete_user(user.name)
def test_tm1service_without_encrypted_password(self):
user_name = "TM1py user name"
user = User(name=user_name, groups=["ADMIN"], password="apple")
if user.name in self.tm1.security.get_all_user_names():
self.tm1.security.delete_user(user_name)
self.tm1.security.create_user(user)
with TM1Service(
user=user.name,
password=user._password,
decode_b64=False,
base_url=self.tm1._tm1_rest._base_url,
ssl=self.tm1._tm1_rest._ssl) as _:
# if no exception. Login was successful
pass
self.tm1.security.delete_user(user.name)
def test_tm1service_with_encrypted_password(self):
user_name = "TM1py user name"
user = User(name=user_name, groups=["ADMIN"], password="apple")
if user.name in self.tm1.security.get_all_user_names():
self.tm1.security.delete_user(user_name)
self.tm1.security.create_user(user)
with TM1Service(
user=user.name,
password=b64encode(str.encode(user._password)),
decode_b64=True,
base_url=self.tm1._tm1_rest._base_url,
ssl=self.tm1._tm1_rest._ssl) as _:
# if no exception. Login was successful
pass
self.tm1.security.delete_user(user.name)
def test_tm1service_with_encrypted_password_fail(self):
user_name = "TM1py user name"
user = User(name=user_name, groups=["ADMIN"], password="apple")
if user.name in self.tm1.security.get_all_user_names():
self.tm1.security.delete_user(user_name)
self.tm1.security.create_user(user)
self.assertRaises(TM1pyRestException, TM1Service,
user=user.name,
password=b64encode(str.encode("banana")),
decode_b64=True,
base_url=self.tm1._tm1_rest._base_url,
ssl=self.tm1._tm1_rest._ssl)
self.tm1.security.delete_user(user.name)
def test_tm1service_with_plain_password(self):
user_name = "TM1py user name"
user = User(name=user_name, groups=["ADMIN"], password="apple")
if user.name in self.tm1.security.get_all_user_names():
self.tm1.security.delete_user(user_name)
self.tm1.security.create_user(user)
with TM1Service(
user=user.name,
password=user.password,
base_url=self.tm1._tm1_rest._base_url,
ssl=self.tm1._tm1_rest._ssl) as _:
# if no exception. Login was successful
pass
self.tm1.security.delete_user(user.name)
def test_tm1service_with_plain_password_fail(self):
user_name = "TM1py user name"
user = User(name=user_name, groups=["ADMIN"], password="apple")
if user.name in self.tm1.security.get_all_user_names():
self.tm1.security.delete_user(user_name)
self.tm1.security.create_user(user)
# test with random (wrong) password
self.assertRaises(TM1pyRestException, TM1Service,
user=user.name,
password="banana",
base_url=self.tm1._tm1_rest._base_url,
ssl=self.tm1._tm1_rest._ssl)
self.tm1.security.delete_user(user.name)
@classmethod
def tearDownClass(cls):
cls.tm1.logout()
if __name__ == '__main__':
unittest.main()
| 39.785714
| 119
| 0.613106
|
1452080bb8eb7635db12dfddd182c3413527db9e
| 4,909
|
py
|
Python
|
screens/game.py
|
NoPlagiarism/Vk-Melody-Guess
|
8763a658705df32c1bea8abc71ac18aa45915d70
|
[
"MIT"
] | null | null | null |
screens/game.py
|
NoPlagiarism/Vk-Melody-Guess
|
8763a658705df32c1bea8abc71ac18aa45915d70
|
[
"MIT"
] | null | null | null |
screens/game.py
|
NoPlagiarism/Vk-Melody-Guess
|
8763a658705df32c1bea8abc71ac18aa45915d70
|
[
"MIT"
] | null | null | null |
from colorsys import hsv_to_rgb
from kivy.lang import Builder
from kivy.properties import ObjectProperty, StringProperty, ListProperty
from kivymd.uix.slider import MDSlider as _MDSlider
from kivy.uix.screenmanager import ScreenManager
from kivymd.app import MDApp
from kivymd.uix.screen import MDScreen
from random import randint, random
from time import sleep
import threading
game_helper = """
ScreenManager:
GameScreen
ResultScreen
OverScreen
<GameScreen>:
id: game
name: "game"
slider: slider
track_field: track_field
author_field: author_field
check_btn: check_btn
play_btn: play_btn
pause_btn: pause_btn
MDIconButton:
id: play_btn
icon: "play"
pos_hint: {"center_x": .1,"center_y": .9}
UnMDSlider:
id: slider
orientation: "horizontal"
pos_hint: {"center_x": .5, "center_y": .85}
size_hint_x: .7
hint: False
min: 0
max: 235
sensitivity: "handle"
disabled: False
cursor_disabled_image: "assets/cursor.png"
value: 1
MDIconButton:
id: pause_btn
icon: "pause"
pos_hint: {"center_x": .9,"center_y": .9}
MDLabel:
text: root.score_str
pos_hint: {"center_x": .5, "center_y": .9}
font_size: "48dp"
halign: "center"
MDTextField:
id: track_field
hint_text: "Введите название песни"
size_hint_x: .8
pos_hint: {"center_x": .5, "center_y": .55}
MDTextField:
id: author_field
hint_text: "Введите автора"
size_hint_x: .8
pos_hint: {"center_x": .5, "center_y": .45}
MDRaisedButton:
id: check_btn
text: "Проверить"
size_hint: (0.75, 0.08)
font_size: 17
pos_hint: {"center_x": .5,"center_y": .3}
<ResultScreen>:
id: result
name: "result"
MDLabel:
text: "0"
pos_hint: {"center_x": .5, "center_y": .9}
font_size: "48dp"
halign: "center"
MDLabel:
text: root.track
halign: "center"
pos_hint: {"center_x": .5, "center_y": .55}
font_size: "48 sp"
color: root.track_color
MDLabel:
text: root.author
halign: "center"
pos_hint: {"center_x": .5, "center_y": .45}
font_size: "32 sp"
color: root.author_color
MDLabel:
text: "+1"
halign: "center"
font_size: "52 sp"
pos_hint: {"center_x": .5, "center_y": .2}
color: (0, .75, .30, 1)
<OverScreen>:
id: over
name: "over"
back_btn: back_btn
MDLabel:
text: "Вы набрали"
pos_hint: {"center_x": .5, "center_y": .7}
font_size: "64 sp"
halign: "center"
MDLabel:
text: root.score_str
color: root.score_color
pos_hint: {"center_x": .5, "center_y": .5}
font_size: "150 sp"
halign: "center"
MDLabel:
text: "баллов"
pos_hint: {"center_x": .5, "center_y": .3}
font_size: "64 sp"
halign: "center"
MDRaisedButton:
id: back_btn
text: "Вернуться в меню"
size_hint: (0.75, 0.08)
font_size: 17
pos_hint: {"center_x": 0.5, "center_y": 0.1}
"""
class UnMDSlider(_MDSlider):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def on_touch_down(self, touch):
pass
def on_touch_move(self, touch):
pass
class GameScreen(MDScreen):
score_str = StringProperty("0")
slider = ObjectProperty(None)
track_field = ObjectProperty(None)
author_field = ObjectProperty(None)
check_btn = ObjectProperty(None)
play_btn = ObjectProperty(None)
pause_btn = ObjectProperty(None)
def __init__(self, **kwargs):
super(GameScreen, self).__init__(**kwargs)
def init_binds(self, btn_handler):
self.slider.cursor_disabled_image = self.slider.cursor_image
self.check_btn.bind(on_press=btn_handler)
class ResultScreen(MDScreen):
score_str = StringProperty("1")
score_delta = StringProperty("+1")
track = StringProperty("")
author = StringProperty("")
track_color = ListProperty(defaultvalue=(0, 0, 0, 1))
author_color = ListProperty(defaultvalue=(0, 0, 0, 1))
COLORS = {True: (0, .75, .30, 1), False: (.93, .26, .26, 1)}
def __call__(self, track: str, author: str, results):
self.track = track
self.author = author
self.track_color = self.COLORS.get(results[0], (0, 0, 0, 1))
self.author_color = self.COLORS.get(results[1], (0, 0, 0, 1))
class OverScreen(MDScreen):
score_str = StringProperty("0")
score_color = ListProperty((0, 0, 0, 1))
back_btn = ObjectProperty(None)
def __call__(self, score: int):
self.score_str = str(score)
if score > 300:
score = 300
self.score_color = (*hsv_to_rgb(score/360, 1, 1), 1)
| 27.892045
| 72
| 0.598085
|
1b02a6d0b82db3637171c293b2f99a0c832b023d
| 23,366
|
py
|
Python
|
catalyst/__main__.py
|
erlendve/catalyst
|
463575bc23c0abd1287f8ec81c4377baabf2b8b8
|
[
"Apache-2.0"
] | null | null | null |
catalyst/__main__.py
|
erlendve/catalyst
|
463575bc23c0abd1287f8ec81c4377baabf2b8b8
|
[
"Apache-2.0"
] | null | null | null |
catalyst/__main__.py
|
erlendve/catalyst
|
463575bc23c0abd1287f8ec81c4377baabf2b8b8
|
[
"Apache-2.0"
] | null | null | null |
import errno
import os
from functools import wraps
import click
import sys
import logbook
import pandas as pd
from catalyst.marketplace.marketplace import Marketplace
from six import text_type
from catalyst.data import bundles as bundles_module
from catalyst.exchange.exchange_bundle import ExchangeBundle
from catalyst.exchange.utils.exchange_utils import delete_algo_folder
from catalyst.utils.cli import Date, Timestamp
from catalyst.utils.run_algo import _run, load_extensions
try:
__IPYTHON__
except NameError:
__IPYTHON__ = False
@click.group()
@click.option(
'-e',
'--extension',
multiple=True,
help='File or module path to a catalyst extension to load.',
)
@click.option(
'--strict-extensions/--non-strict-extensions',
is_flag=True,
help='If --strict-extensions is passed then catalyst will not run '
'if it cannot load all of the specified extensions. If this is '
'not passed or --non-strict-extensions is passed then the '
'failure will be logged but execution will continue.',
)
@click.option(
'--default-extension/--no-default-extension',
is_flag=True,
default=True,
help="Don't load the default catalyst extension.py file "
"in $CATALYST_HOME.",
)
@click.version_option()
def main(extension, strict_extensions, default_extension):
"""Top level catalyst entry point.
"""
# install a logbook handler before performing any other operations
logbook.StderrHandler().push_application()
load_extensions(
default_extension,
extension,
strict_extensions,
os.environ,
)
def extract_option_object(option):
"""Convert a click.option call into a click.Option object.
Parameters
----------
option : decorator
A click.option decorator.
Returns
-------
option_object : click.Option
The option object that this decorator will create.
"""
@option
def opt():
pass
return opt.__click_params__[0]
def ipython_only(option):
"""Mark that an option should only be exposed in IPython.
Parameters
----------
option : decorator
A click.option decorator.
Returns
-------
ipython_only_dec : decorator
A decorator that correctly applies the argument even when not
using IPython mode.
"""
if __IPYTHON__:
return option
argname = extract_option_object(option).name
def d(f):
@wraps(f)
def _(*args, **kwargs):
kwargs[argname] = None
return f(*args, **kwargs)
return _
return d
@main.command()
@click.option(
'-f',
'--algofile',
default=None,
type=click.File('r'),
help='The file that contains the algorithm to run.',
)
@click.option(
'-t',
'--algotext',
help='The algorithm script to run.',
)
@click.option(
'-D',
'--define',
multiple=True,
help="Define a name to be bound in the namespace before executing"
" the algotext. For example '-Dname=value'. The value may be"
" any python expression. These are evaluated in order so they"
" may refer to previously defined names.",
)
@click.option(
'--data-frequency',
type=click.Choice({'daily', 'minute'}),
default='daily',
show_default=True,
help='The data frequency of the simulation.',
)
@click.option(
'--capital-base',
type=float,
show_default=True,
help='The starting capital for the simulation.',
)
@click.option(
'-b',
'--bundle',
default='poloniex',
metavar='BUNDLE-NAME',
show_default=True,
help='The data bundle to use for the simulation.',
)
@click.option(
'--bundle-timestamp',
type=Timestamp(),
default=pd.Timestamp.utcnow(),
show_default=False,
help='The date to lookup data on or before.\n'
'[default: <current-time>]'
)
@click.option(
'-s',
'--start',
type=Date(tz='utc', as_timestamp=True),
help='The start date of the simulation.',
)
@click.option(
'-e',
'--end',
type=Date(tz='utc', as_timestamp=True),
help='The end date of the simulation.',
)
@click.option(
'-o',
'--output',
default='-',
metavar='FILENAME',
show_default=True,
help="The location to write the perf data. If this is '-' the perf"
" will be written to stdout.",
)
@click.option(
'--print-algo/--no-print-algo',
is_flag=True,
default=False,
help='Print the algorithm to stdout.',
)
@ipython_only(click.option(
'--local-namespace/--no-local-namespace',
is_flag=True,
default=None,
help='Should the algorithm methods be resolved in the local namespace.'
))
@click.option(
'-x',
'--exchange-name',
help='The name of the targeted exchange.',
)
@click.option(
'-n',
'--algo-namespace',
help='A label assigned to the algorithm for data storage purposes.'
)
@click.option(
'-c',
'--base-currency',
help='The base currency used to calculate statistics '
'(e.g. usd, btc, eth).',
)
@click.pass_context
def run(ctx,
algofile,
algotext,
define,
data_frequency,
capital_base,
bundle,
bundle_timestamp,
start,
end,
output,
print_algo,
local_namespace,
exchange_name,
algo_namespace,
base_currency):
"""Run a backtest for the given algorithm.
"""
if (algotext is not None) == (algofile is not None):
ctx.fail(
"must specify exactly one of '-f' / '--algofile' or"
" '-t' / '--algotext'",
)
# check that the start and end dates are passed correctly
if start is None and end is None:
# check both at the same time to avoid the case where a user
# does not pass either of these and then passes the first only
# to be told they need to pass the second argument also
ctx.fail(
"must specify dates with '-s' / '--start' and '-e' / '--end'"
" in backtest mode",
)
if start is None:
ctx.fail("must specify a start date with '-s' / '--start'"
" in backtest mode")
if end is None:
ctx.fail("must specify an end date with '-e' / '--end'"
" in backtest mode")
if exchange_name is None:
ctx.fail("must specify an exchange name '-x'")
if base_currency is None:
ctx.fail("must specify a base currency with '-c' in backtest mode")
if capital_base is None:
ctx.fail("must specify a capital base with '--capital-base'")
click.echo('Running in backtesting mode.', sys.stdout)
perf = _run(
initialize=None,
handle_data=None,
before_trading_start=None,
analyze=None,
algofile=algofile,
algotext=algotext,
defines=define,
data_frequency=data_frequency,
capital_base=capital_base,
data=None,
bundle=bundle,
bundle_timestamp=bundle_timestamp,
start=start,
end=end,
output=output,
print_algo=print_algo,
local_namespace=local_namespace,
environ=os.environ,
live=False,
exchange=exchange_name,
algo_namespace=algo_namespace,
base_currency=base_currency,
analyze_live=None,
live_graph=False,
simulate_orders=True,
auth_aliases=None,
stats_output=None,
)
if output == '-':
click.echo(str(perf), sys.stdout)
elif output != os.devnull: # make the catalyst magic not write any data
perf.to_pickle(output)
return perf
def catalyst_magic(line, cell=None):
"""The catalyst IPython cell magic.
"""
load_extensions(
default=True,
extensions=[],
strict=True,
environ=os.environ,
)
try:
return run.main(
# put our overrides at the start of the parameter list so that
# users may pass values with higher precedence
[
'--algotext', cell,
'--output', os.devnull, # don't write the results by default
] + ([
# these options are set when running in line magic mode
# set a non None algo text to use the ipython user_ns
'--algotext', '',
'--local-namespace',
] if cell is None else []) + line.split(),
'%s%%catalyst' % ((cell or '') and '%'),
# don't use system exit and propogate errors to the caller
standalone_mode=False,
)
except SystemExit as e:
# https://github.com/mitsuhiko/click/pull/533
# even in standalone_mode=False `--help` really wants to kill us ;_;
if e.code:
raise ValueError('main returned non-zero status code: %d' % e.code)
@main.command()
@click.option(
'-f',
'--algofile',
default=None,
type=click.File('r'),
help='The file that contains the algorithm to run.',
)
@click.option(
'--capital-base',
type=float,
show_default=True,
help='The amount of capital (in base_currency) allocated to trading.',
)
@click.option(
'-t',
'--algotext',
help='The algorithm script to run.',
)
@click.option(
'-D',
'--define',
multiple=True,
help="Define a name to be bound in the namespace before executing"
" the algotext. For example '-Dname=value'. The value may be"
" any python expression. These are evaluated in order so they"
" may refer to previously defined names.",
)
@click.option(
'-o',
'--output',
default='-',
metavar='FILENAME',
show_default=True,
help="The location to write the perf data. If this is '-' the perf will"
" be written to stdout.",
)
@click.option(
'--print-algo/--no-print-algo',
is_flag=True,
default=False,
help='Print the algorithm to stdout.',
)
@ipython_only(click.option(
'--local-namespace/--no-local-namespace',
is_flag=True,
default=None,
help='Should the algorithm methods be resolved in the local namespace.'
))
@click.option(
'-x',
'--exchange-name',
help='The name of the targeted exchange.',
)
@click.option(
'-n',
'--algo-namespace',
help='A label assigned to the algorithm for data storage purposes.'
)
@click.option(
'-c',
'--base-currency',
help='The base currency used to calculate statistics '
'(e.g. usd, btc, eth).',
)
@click.option(
'-e',
'--end',
type=Date(tz='utc', as_timestamp=True),
help='An optional end date at which to stop the execution.',
)
@click.option(
'--live-graph/--no-live-graph',
is_flag=True,
default=False,
help='Display live graph.',
)
@click.option(
'--simulate-orders/--no-simulate-orders',
is_flag=True,
default=True,
help='Simulating orders enable the paper trading mode. No orders will be '
'sent to the exchange unless set to false.',
)
@click.option(
'--auth-aliases',
default=None,
help='Authentication file aliases for the specified exchanges. By default,'
'each exchange uses the "auth.json" file in the exchange folder. '
'Specifying an "auth2" alias would use "auth2.json". It should be '
'specified like this: "[exchange_name],[alias],..." For example, '
'"binance,auth2" or "binance,auth2,bittrex,auth2".',
)
@click.pass_context
def live(ctx,
algofile,
capital_base,
algotext,
define,
output,
print_algo,
local_namespace,
exchange_name,
algo_namespace,
base_currency,
end,
live_graph,
auth_aliases,
simulate_orders):
"""Trade live with the given algorithm.
"""
if (algotext is not None) == (algofile is not None):
ctx.fail(
"must specify exactly one of '-f' / '--algofile' or"
" '-t' / '--algotext'",
)
if exchange_name is None:
ctx.fail("must specify an exchange name '-x'")
if algo_namespace is None:
ctx.fail("must specify an algorithm name '-n' in live execution mode")
if base_currency is None:
ctx.fail("must specify a base currency '-c' in live execution mode")
if capital_base is None:
ctx.fail("must specify a capital base with '--capital-base'")
if simulate_orders:
click.echo('Running in paper trading mode.', sys.stdout)
else:
click.echo('Running in live trading mode.', sys.stdout)
perf = _run(
initialize=None,
handle_data=None,
before_trading_start=None,
analyze=None,
algofile=algofile,
algotext=algotext,
defines=define,
data_frequency=None,
capital_base=capital_base,
data=None,
bundle=None,
bundle_timestamp=None,
start=None,
end=end,
output=output,
print_algo=print_algo,
local_namespace=local_namespace,
environ=os.environ,
live=True,
exchange=exchange_name,
algo_namespace=algo_namespace,
base_currency=base_currency,
live_graph=live_graph,
analyze_live=None,
simulate_orders=simulate_orders,
auth_aliases=auth_aliases,
stats_output=None,
)
if output == '-':
click.echo(str(perf), sys.stdout)
elif output != os.devnull: # make the catalyst magic not write any data
perf.to_pickle(output)
return perf
@main.command(name='ingest-exchange')
@click.option(
'-x',
'--exchange-name',
help='The name of the exchange bundle to ingest.',
)
@click.option(
'-f',
'--data-frequency',
type=click.Choice({'daily', 'minute', 'daily,minute', 'minute,daily'}),
default='daily',
show_default=True,
help='The data frequency of the desired OHLCV bars.',
)
@click.option(
'-s',
'--start',
default=None,
type=Date(tz='utc', as_timestamp=True),
help='The start date of the data range. (default: one year from end date)',
)
@click.option(
'-e',
'--end',
default=None,
type=Date(tz='utc', as_timestamp=True),
help='The end date of the data range. (default: today)',
)
@click.option(
'-i',
'--include-symbols',
default=None,
help='A list of symbols to ingest (optional comma separated list)',
)
@click.option(
'--exclude-symbols',
default=None,
help='A list of symbols to exclude from the ingestion '
'(optional comma separated list)',
)
@click.option(
'--csv',
default=None,
help='The path of a CSV file containing the data. If specified, start, '
'end, include-symbols and exclude-symbols will be ignored. Instead,'
'all data in the file will be ingested.',
)
@click.option(
'--show-progress/--no-show-progress',
default=True,
help='Print progress information to the terminal.'
)
@click.option(
'--verbose/--no-verbose`',
default=False,
help='Show a progress indicator for every currency pair.'
)
@click.option(
'--validate/--no-validate`',
default=False,
help='Report potential anomalies found in data bundles.'
)
@click.pass_context
def ingest_exchange(ctx, exchange_name, data_frequency, start, end,
include_symbols, exclude_symbols, csv, show_progress,
verbose, validate):
"""
Ingest data for the given exchange.
"""
if exchange_name is None:
ctx.fail("must specify an exchange name '-x'")
exchange_bundle = ExchangeBundle(exchange_name)
click.echo('Trying to ingest exchange bundle {}...'.format(exchange_name),
sys.stdout)
exchange_bundle.ingest(
data_frequency=data_frequency,
include_symbols=include_symbols,
exclude_symbols=exclude_symbols,
start=start,
end=end,
show_progress=show_progress,
show_breakdown=verbose,
show_report=validate,
csv=csv
)
@main.command(name='clean-algo')
@click.option(
'-n',
'--algo-namespace',
help='The label of the algorithm to for which to clean the state.'
)
@click.pass_context
def clean_algo(ctx, algo_namespace):
click.echo(
'Cleaning algo state: {}'.format(algo_namespace),
sys.stdout
)
delete_algo_folder(algo_namespace)
click.echo('Done', sys.stdout)
@main.command(name='clean-exchange')
@click.option(
'-x',
'--exchange-name',
help='The name of the exchange bundle to ingest.',
)
@click.option(
'-f',
'--data-frequency',
type=click.Choice({'daily', 'minute'}),
default=None,
help='The bundle data frequency to remove. If not specified, it will '
'remove both daily and minute bundles.',
)
@click.pass_context
def clean_exchange(ctx, exchange_name, data_frequency):
"""Clean up bundles from 'ingest-exchange'.
"""
if exchange_name is None:
ctx.fail("must specify an exchange name '-x'")
exchange_bundle = ExchangeBundle(exchange_name)
click.echo('Cleaning exchange bundle {}...'.format(exchange_name),
sys.stdout)
exchange_bundle.clean(
data_frequency=data_frequency,
)
click.echo('Done', sys.stdout)
@main.command()
@click.option(
'-b',
'--bundle',
metavar='BUNDLE-NAME',
default=None,
show_default=False,
help='The data bundle to ingest.',
)
@click.option(
'-x',
'--exchange-name',
help='The name of the exchange bundle to ingest.',
)
@click.option(
'-c',
'--compile-locally',
is_flag=True,
default=False,
help='Download dataset from source and compile bundle locally.',
)
@click.option(
'--assets-version',
type=int,
multiple=True,
help='Version of the assets db to which to downgrade.',
)
@click.option(
'--show-progress/--no-show-progress',
default=True,
help='Print progress information to the terminal.'
)
@click.pass_context
def ingest(ctx, bundle, exchange_name, compile_locally, assets_version,
show_progress):
"""Ingest the data for the given bundle.
"""
bundles_module.ingest(
bundle,
os.environ,
pd.Timestamp.utcnow(),
assets_version,
show_progress,
compile_locally,
)
@main.command()
@click.option(
'-b',
'--bundle',
default='poloniex',
metavar='BUNDLE-NAME',
show_default=True,
help='The data bundle to clean.',
)
@click.option(
'-x',
'--exchange_name',
metavar='EXCHANGE-NAME',
show_default=True,
help='The exchange bundle name to clean.',
)
@click.option(
'-e',
'--before',
type=Timestamp(),
help='Clear all data before TIMESTAMP.'
' This may not be passed with -k / --keep-last',
)
@click.option(
'-a',
'--after',
type=Timestamp(),
help='Clear all data after TIMESTAMP'
' This may not be passed with -k / --keep-last',
)
@click.option(
'-k',
'--keep-last',
type=int,
metavar='N',
help='Clear all but the last N downloads.'
' This may not be passed with -e / --before or -a / --after',
)
def clean(bundle, before, after, keep_last):
"""Clean up bundles from 'ingest'.
"""
bundles_module.clean(
bundle,
before,
after,
keep_last,
)
@main.command()
def bundles():
"""List all of the available data bundles.
"""
for bundle in sorted(bundles_module.bundles.keys()):
if bundle.startswith('.'):
# hide the test data
continue
try:
ingestions = list(
map(text_type, bundles_module.ingestions_for_bundle(bundle))
)
except OSError as e:
if e.errno != errno.ENOENT:
raise
ingestions = []
# If we got no ingestions, either because the directory didn't exist or
# because there were no entries, print a single message indicating that
# no ingestions have yet been made.
for timestamp in ingestions or ["<no ingestions>"]:
click.echo("%s %s" % (bundle, timestamp), sys.stdout)
@main.group()
@click.pass_context
def marketplace(ctx):
"""Access the Enigma Data Marketplace to:\n
- Register and Publish new datasets (seller-side)\n
- Subscribe and Ingest premium datasets (buyer-side)\n
"""
pass
@marketplace.command()
@click.pass_context
def ls(ctx):
"""List all available datasets.
"""
click.echo('Listing of available data sources on the marketplace:',
sys.stdout)
marketplace = Marketplace()
marketplace.list()
@marketplace.command()
@click.option(
'--dataset',
default=None,
help='The name of the dataset to ingest from the Data Marketplace.',
)
@click.pass_context
def subscribe(ctx, dataset):
"""Subscribe to an existing dataset.
"""
marketplace = Marketplace()
marketplace.subscribe(dataset)
@marketplace.command()
@click.option(
'--dataset',
default=None,
help='The name of the dataset to ingest from the Data Marketplace.',
)
@click.option(
'-f',
'--data-frequency',
type=click.Choice({'daily', 'minute', 'daily,minute', 'minute,daily'}),
default='daily',
show_default=True,
help='The data frequency of the desired OHLCV bars.',
)
@click.option(
'-s',
'--start',
default=None,
type=Date(tz='utc', as_timestamp=True),
help='The start date of the data range. (default: one year from end date)',
)
@click.option(
'-e',
'--end',
default=None,
type=Date(tz='utc', as_timestamp=True),
help='The end date of the data range. (default: today)',
)
@click.pass_context
def ingest(ctx, dataset, data_frequency, start, end):
"""Ingest a dataset (requires subscription).
"""
marketplace = Marketplace()
marketplace.ingest(dataset, data_frequency, start, end)
@marketplace.command()
@click.option(
'--dataset',
default=None,
help='The name of the dataset to ingest from the Data Marketplace.',
)
@click.pass_context
def clean(ctx, dataset):
"""Clean/Remove local data for a given dataset.
"""
marketplace = Marketplace()
marketplace.clean(dataset)
@marketplace.command()
@click.pass_context
def register(ctx):
"""Register a new dataset.
"""
marketplace = Marketplace()
marketplace.register()
@marketplace.command()
@click.option(
'--dataset',
default=None,
help='The name of the Marketplace dataset to publish data for.',
)
@click.option(
'--datadir',
default=None,
help='The folder that contains the CSV data files to publish.',
)
@click.option(
'--watch/--no-watch',
is_flag=True,
default=False,
help='Whether to watch the datadir for live data.',
)
@click.pass_context
def publish(ctx, dataset, datadir, watch):
"""Publish data for a registered dataset.
"""
marketplace = Marketplace()
if dataset is None:
ctx.fail("must specify a dataset to publish data for "
" with '--dataset'\n")
if datadir is None:
ctx.fail("must specify a datadir where to find the files to publish "
" with '--datadir'\n")
marketplace.publish(dataset, datadir, watch)
if __name__ == '__main__':
main()
| 26.136465
| 79
| 0.617222
|
52d5399498951b0563f3c6d2105b8a3f42842c3c
| 3,718
|
py
|
Python
|
electrum/gui/qt/qrcodewidget.py
|
zcoinofficial/electrum
|
5fdc234f817aed1d7d4bcd894fafd99972423314
|
[
"MIT"
] | 23
|
2018-02-19T16:59:23.000Z
|
2020-09-10T16:58:44.000Z
|
electrum/gui/qt/qrcodewidget.py
|
zcoinofficial/electrum
|
5fdc234f817aed1d7d4bcd894fafd99972423314
|
[
"MIT"
] | 37
|
2018-03-04T11:59:44.000Z
|
2020-09-27T19:58:21.000Z
|
electrum/gui/qt/qrcodewidget.py
|
zcoinofficial/electrum
|
5fdc234f817aed1d7d4bcd894fafd99972423314
|
[
"MIT"
] | 12
|
2018-02-20T01:37:48.000Z
|
2020-08-06T06:18:42.000Z
|
import os
import qrcode
from PyQt5.QtGui import QColor
import PyQt5.QtGui as QtGui
from PyQt5.QtWidgets import (
QApplication, QVBoxLayout, QTextEdit, QHBoxLayout, QPushButton, QWidget)
import electrum_xzc
from electrum_xzc.i18n import _
from .util import WindowModalDialog
class QRCodeWidget(QWidget):
def __init__(self, data = None, fixedSize=False):
QWidget.__init__(self)
self.data = None
self.qr = None
self.fixedSize=fixedSize
if fixedSize:
self.setFixedSize(fixedSize, fixedSize)
self.setData(data)
def setData(self, data):
if self.data != data:
self.data = data
if self.data:
self.qr = qrcode.QRCode(
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=0,
)
self.qr.add_data(self.data)
if not self.fixedSize:
k = len(self.qr.get_matrix())
self.setMinimumSize(k*5,k*5)
else:
self.qr = None
self.update()
def paintEvent(self, e):
if not self.data:
return
black = QColor(0, 0, 0, 255)
white = QColor(255, 255, 255, 255)
if not self.qr:
qp = QtGui.QPainter()
qp.begin(self)
qp.setBrush(white)
qp.setPen(white)
r = qp.viewport()
qp.drawRect(0, 0, r.width(), r.height())
qp.end()
return
matrix = self.qr.get_matrix()
k = len(matrix)
qp = QtGui.QPainter()
qp.begin(self)
r = qp.viewport()
margin = 10
framesize = min(r.width(), r.height())
boxsize = int( (framesize - 2*margin)/k )
size = k*boxsize
left = (framesize - size)/2
top = (framesize - size)/2
# Draw white background with margin
qp.setBrush(white)
qp.setPen(white)
qp.drawRect(0, 0, framesize, framesize)
# Draw qr code
qp.setBrush(black)
qp.setPen(black)
for r in range(k):
for c in range(k):
if matrix[r][c]:
qp.drawRect(left+c*boxsize, top+r*boxsize, boxsize - 1, boxsize - 1)
qp.end()
class QRDialog(WindowModalDialog):
def __init__(self, data, parent=None, title = "", show_text=False):
WindowModalDialog.__init__(self, parent, title)
vbox = QVBoxLayout()
qrw = QRCodeWidget(data)
vbox.addWidget(qrw, 1)
if show_text:
text = QTextEdit()
text.setText(data)
text.setReadOnly(True)
vbox.addWidget(text)
hbox = QHBoxLayout()
hbox.addStretch(1)
config = electrum_xzc.get_config()
if config:
filename = os.path.join(config.path, "qrcode.png")
def print_qr():
p = qrw.grab() # FIXME also grabs neutral colored padding
p.save(filename, 'png')
self.show_message(_("QR code saved to file") + " " + filename)
def copy_to_clipboard():
p = qrw.grab()
QApplication.clipboard().setPixmap(p)
self.show_message(_("QR code copied to clipboard"))
b = QPushButton(_("Copy"))
hbox.addWidget(b)
b.clicked.connect(copy_to_clipboard)
b = QPushButton(_("Save"))
hbox.addWidget(b)
b.clicked.connect(print_qr)
b = QPushButton(_("Close"))
hbox.addWidget(b)
b.clicked.connect(self.accept)
b.setDefault(True)
vbox.addLayout(hbox)
self.setLayout(vbox)
| 27.746269
| 88
| 0.541689
|
8cb2f26c5e31b23df06aebf888e591ddb087f953
| 1,362
|
py
|
Python
|
src/rastervision/run.py
|
anuragreddygv323/raster-vision
|
db2bc35f21968618a333cee2f5e86f29e7d56483
|
[
"Apache-2.0"
] | 12
|
2018-07-31T01:52:00.000Z
|
2021-04-22T12:43:28.000Z
|
src/rastervision/run.py
|
anuragreddygv323/raster-vision
|
db2bc35f21968618a333cee2f5e86f29e7d56483
|
[
"Apache-2.0"
] | 1
|
2020-02-03T13:46:43.000Z
|
2022-03-29T16:53:24.000Z
|
src/rastervision/run.py
|
yoninachmany/raster-vision-deepglobe-semseg
|
14a6495f23bbef0bf7f7c47fb37b856a559b272f
|
[
"Apache-2.0"
] | 11
|
2018-05-03T10:43:08.000Z
|
2021-08-16T05:45:29.000Z
|
"""
Execute a sequence of tasks for a run, given a json file with options for that
run. Example usage: `python run.py options.json train_model`
"""
import argparse
import json
from rastervision.semseg.settings import SEMSEG
from rastervision.semseg.run import SemsegRunner
from rastervision.tagging.settings import TAGGING
from rastervision.tagging.run import TaggingRunner
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('file_path', nargs='?',
help='path to the options json file')
parser.add_argument('tasks', nargs='*', help='list of tasks to perform')
return parser.parse_args()
def run_tasks():
"""Run tasks specified on command line.
This creates the RunOptions object from the json file specified on the
command line, creates a data generator, and then runs the tasks.
"""
args = parse_args()
with open(args.file_path) as options_file:
problem_type = json.load(options_file)['problem_type']
if problem_type == SEMSEG:
runner = SemsegRunner()
elif problem_type == TAGGING:
runner = TaggingRunner()
else:
raise ValueError('{} is not a valid problem_type'.format(
problem_type))
runner.run_tasks(args.file_path, args.tasks)
if __name__ == '__main__':
run_tasks()
| 30.954545
| 78
| 0.684288
|
f3757c47631c2439507177dc5abad52118784868
| 275
|
py
|
Python
|
mypy-debugging/mypy_minimal_example.py
|
mbforbes/rndjam1
|
3cb8f75a4ad7b2efb98a426caebb0641ddf773c2
|
[
"MIT"
] | 4
|
2018-06-01T15:59:17.000Z
|
2021-03-16T13:40:52.000Z
|
mypy-debugging/mypy_minimal_example.py
|
mbforbes/rndjam1
|
3cb8f75a4ad7b2efb98a426caebb0641ddf773c2
|
[
"MIT"
] | 10
|
2017-10-05T18:19:36.000Z
|
2019-06-26T18:11:35.000Z
|
mypy-debugging/mypy_minimal_example.py
|
mbforbes/rndjam1
|
3cb8f75a4ad7b2efb98a426caebb0641ddf773c2
|
[
"MIT"
] | null | null | null |
import torch
i = torch.IntTensor()
f = torch.FloatTensor()
reveal_type(i)
reveal_type(f)
def foo(i: torch.IntTensor, f: torch.FloatTensor) -> None:
pass
# these behave as expected
foo(i, f)
foo(1, 2)
# these typecheck OK, but I think should error
foo(i, i)
foo(f, f)
| 15.277778
| 58
| 0.690909
|
ccd47eb91f9fdf1c7a2e1acb6c30bdd483db5207
| 10,916
|
py
|
Python
|
example/distill/mnist_distill/train_with_fleet.py
|
wangxicoding/edl
|
75d651e72e5297aba2e597588cf958ea336deb4e
|
[
"Apache-2.0"
] | 90
|
2020-04-21T01:46:10.000Z
|
2022-02-10T09:09:34.000Z
|
example/distill/mnist_distill/train_with_fleet.py
|
wangxicoding/edl
|
75d651e72e5297aba2e597588cf958ea336deb4e
|
[
"Apache-2.0"
] | 37
|
2018-03-02T22:41:15.000Z
|
2020-04-22T16:48:36.000Z
|
example/distill/mnist_distill/train_with_fleet.py
|
wangxicoding/edl
|
75d651e72e5297aba2e597588cf958ea336deb4e
|
[
"Apache-2.0"
] | 34
|
2018-03-02T23:28:25.000Z
|
2020-03-25T08:50:29.000Z
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
if os.environ.get('PADDLE_TRAINER_ENDPOINTS') is None:
os.environ['PADDLE_TRAINER_ENDPOINTS'] = '127.0.0.1:0'
from paddle_edl.distill.distill_reader import DistillReader
import argparse
import ast
from PIL import Image
import numpy
import paddle
import paddle.fluid as fluid
from paddle.fluid.incubate.fleet.collective import fleet
from paddle.fluid.incubate.fleet.base import role_maker
def parse_args():
parser = argparse.ArgumentParser("mnist")
parser.add_argument(
'--use_gpu',
type=ast.literal_eval,
default=True,
help="Whether to use GPU or not. 'True' or 'False'")
parser.add_argument(
'--num_epochs', type=int, default=5, help="number of epochs.")
parser.add_argument(
'--use_distill_service',
default=False,
type=ast.literal_eval,
help="Whether to use distill service train. 'True' or 'False'")
parser.add_argument(
'--save_serving_model',
default=False,
type=ast.literal_eval,
help="Whether to save paddle serving model. 'True' or 'False'")
parser.add_argument(
'--distill_teachers',
default='127.0.0.1:9292',
type=str,
help="teachers of distill train. such as '127.0.0.1:9292,127.0.0.1:9293'"
)
args = parser.parse_args()
return args
def loss_net(hidden, label):
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss)
acc = fluid.layers.accuracy(input=prediction, label=label)
return prediction, avg_loss, acc
def multilayer_perceptron(img, label):
img = fluid.layers.fc(input=img, size=200, act='tanh')
hidden = fluid.layers.fc(input=img, size=200, act='tanh')
return loss_net(hidden, label)
def softmax_regression(img, label):
return loss_net(img, label)
def convolutional_neural_network(img, label):
conv_pool_1 = fluid.nets.simple_img_conv_pool(
input=img,
filter_size=5,
num_filters=20,
pool_size=2,
pool_stride=2,
act="relu")
conv_pool_1 = fluid.layers.batch_norm(conv_pool_1)
conv_pool_2 = fluid.nets.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=50,
pool_size=2,
pool_stride=2,
act="relu")
return loss_net(conv_pool_2, label)
def train(nn_type,
use_cuda,
save_dirname=None,
model_filename=None,
params_filename=None):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
startup_program = fluid.default_startup_program()
main_program = fluid.default_main_program()
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=500),
batch_size=BATCH_SIZE)
test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
if nn_type == 'softmax_regression':
net_conf = softmax_regression
elif nn_type == 'multilayer_perceptron':
net_conf = multilayer_perceptron
else:
net_conf = convolutional_neural_network
prediction, avg_loss, acc = net_conf(img, label)
test_program = main_program.clone(for_test=True)
inputs = [img, label]
test_inputs = [img, label]
if args.use_distill_service:
dr = DistillReader(ins=['img', 'label'], predicts=['fc_0.tmp_2'])
dr.set_fixed_teacher(args.distill_teachers)
train_reader = dr.set_sample_list_generator(train_reader)
soft_label = fluid.data(
name='soft_label', shape=[None, 10], dtype='float32')
inputs.append(soft_label)
distill_loss = fluid.layers.cross_entropy(
input=prediction, label=soft_label, soft_label=True)
distill_loss = fluid.layers.mean(distill_loss)
loss = distill_loss
else:
loss = avg_loss
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
train_rank = fleet.worker_index()
train_nranks = fleet.worker_num()
optimizer = fluid.optimizer.Momentum(learning_rate=0.001, momentum=0.9)
if use_cuda:
optimizer = fleet.distributed_optimizer(optimizer)
optimizer.minimize(loss)
main_program = fleet.main_program if use_cuda else main_program
gpu_id = int(os.getenv("FLAGS_selected_gpus", "0"))
def train_test(train_test_program, train_test_reader):
acc_set = []
avg_loss_set = []
for test_data in train_test_reader():
acc_np, avg_loss_np = exe.run(program=train_test_program,
feed=test_data,
fetch_list=[acc, avg_loss])
acc_set.append(float(acc_np))
avg_loss_set.append(float(avg_loss_np))
# get test acc and loss
acc_val_mean = numpy.array(acc_set).mean()
avg_loss_val_mean = numpy.array(avg_loss_set).mean()
return avg_loss_val_mean, acc_val_mean
place = fluid.CUDAPlace(gpu_id) if use_cuda else fluid.CPUPlace()
reader_places = fluid.cuda_places() if use_cuda else fluid.CPUPlace()
py_train_reader = fluid.io.DataLoader.from_generator(
feed_list=inputs, capacity=64)
py_train_reader.set_sample_list_generator(train_reader, reader_places)
py_test_reader = fluid.io.DataLoader.from_generator(
feed_list=test_inputs, capacity=64)
py_test_reader.set_sample_list_generator(test_reader, place)
exe = fluid.Executor(place)
exe.run(startup_program)
epochs = [epoch_id for epoch_id in range(NUM_EPOCHS)]
lists = []
step = 0
for epoch_id in epochs:
for step_id, data in enumerate(py_train_reader()):
metrics = exe.run(main_program, feed=data, fetch_list=[loss, acc])
if step % 100 == 0:
print("Pass {}, Step {}, Cost {}".format(epoch_id, step,
metrics[0].mean()))
step += 1
if train_rank == 0:
# test for epoch
avg_loss_val, acc_val = train_test(
train_test_program=test_program,
train_test_reader=py_test_reader)
print("Test with Pass %d, avg_cost: %s, acc: %s" %
(epoch_id, avg_loss_val, acc_val))
lists.append((epoch_id, avg_loss_val, acc_val))
if save_dirname is not None:
fluid.io.save_inference_model(
save_dirname, ["img"], [prediction],
exe,
model_filename=model_filename,
params_filename=params_filename)
if train_rank == 0:
if args.save_serving_model:
import paddle_serving_client.io as serving_io
if not os.path.isdir('output'):
os.mkdir('output')
serving_io.save_model("output/mnist_cnn_model",
"output/serving_conf", {img.name: img},
{prediction.name: prediction}, test_program)
print('save serving model, feed_names={}, fetch_names={}'.format(
[img.name], [prediction.name]))
# find the best pass
best = sorted(lists, key=lambda list: float(list[1]))[0]
print('Best pass is %s, testing Avg cost is %s' % (best[0], best[1]))
print('The classification accuracy is %.2f%%' % (float(best[2]) * 100))
def infer(use_cuda,
save_dirname=None,
model_filename=None,
params_filename=None):
if save_dirname is None:
return
gpu_id = int(os.getenv("FLAGS_selected_gpus", "0"))
place = fluid.CUDAPlace(gpu_id) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
def load_image(file):
im = Image.open(file).convert('L')
im = im.resize((28, 28), Image.ANTIALIAS)
im = numpy.array(im).reshape((1, 1, 28, 28)).astype(numpy.float32)
im = im / 255.0 * 2.0 - 1.0
return im
cur_dir = os.path.dirname(os.path.realpath(__file__))
tensor_img = load_image(cur_dir + '/image/infer_3.png')
inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope):
# Use fluid.io.load_inference_model to obtain the inference program desc,
# the feed_target_names (the names of variables that will be feeded
# data using feed operators), and the fetch_targets (variables that
# we want to obtain data from using fetch operators).
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(
save_dirname, exe, model_filename, params_filename)
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
# and results will contain a list of data corresponding to fetch_targets.
results = exe.run(inference_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
lab = numpy.argsort(results)
print("Inference result of image/infer_3.png is: %d" % lab[0][0][-1])
def main(use_cuda, nn_type):
model_filename = None
params_filename = None
save_dirname = "recognize_digits_" + nn_type + ".inference.model"
# call train() with is_local argument to run distributed train
train(
nn_type=nn_type,
use_cuda=use_cuda,
save_dirname=save_dirname,
model_filename=model_filename,
params_filename=params_filename)
infer(
use_cuda=use_cuda,
save_dirname=save_dirname,
model_filename=model_filename,
params_filename=params_filename)
if __name__ == '__main__':
args = parse_args()
BATCH_SIZE = 64
NUM_EPOCHS = args.num_epochs
use_cuda = args.use_gpu
# predict = 'softmax_regression' # uncomment for Softmax
#predict = 'multilayer_perceptron' # uncomment for MLP
predict = 'convolutional_neural_network' # uncomment for LeNet5
main(use_cuda=use_cuda, nn_type=predict)
| 36.265781
| 81
| 0.649872
|
ca24a7060a3a1a862d878ae424b194fa092b02ad
| 3,741
|
py
|
Python
|
code/evaluate_correction/evaluate.py
|
JeyDi/Mispelling
|
e2a91178707532f4b00949bbf37fcd17671f1a08
|
[
"MIT"
] | 1
|
2019-03-19T23:08:58.000Z
|
2019-03-19T23:08:58.000Z
|
code/evaluate_correction/evaluate.py
|
JeyDi/Mispelling
|
e2a91178707532f4b00949bbf37fcd17671f1a08
|
[
"MIT"
] | null | null | null |
code/evaluate_correction/evaluate.py
|
JeyDi/Mispelling
|
e2a91178707532f4b00949bbf37fcd17671f1a08
|
[
"MIT"
] | null | null | null |
from os import path, listdir
from itertools import product
from evaluate_correction import evaluate_utility
def check(truth,perturbed, corrected):
return (is_perturbed(perturbed, truth), is_corrected(perturbed, corrected), is_truth(truth, corrected))
def is_perturbed(truth, perturbed):
return int(perturbed != truth)
def is_corrected(perturbed, corrected):
return int(perturbed != corrected)
def is_truth(truth, corrected):
return int(truth == corrected)
##METRICS
def perturbed_corrected_ratio(scores):
try:
result = scores[(1, 1, 1)] / (scores[(1, 1, 1)] + scores[(1, 1, 0)] + scores[(1, 0, 0)])
except:
result = "perturbed corrected ratio error"
return result
def not_perturbed_not_corrected_ratio(scores):
try:
result = scores[(0, 0, 1)] / (scores[(0, 0, 1)] + scores[(0, 1, 0)])
except:
result = "not perturbed, not corrected ratio error"
return result
def precision(scores):
try:
result = scores[(1,1,1)] / (scores[(1,1,1)] + scores[(1,0,0)])
except:
result = "precision calculation error"
return result
def recall(scores):
try:
result = scores[(1,1,1)] / (scores[1,1,1] + scores[(0,1,0)] + scores[(1,1,0)])
except:
result = "recall error"
return result
def accuracy(scores):
try:
result = ((scores[(1,1,1)]+scores[(0,0,1)])/(scores[(1,1,1)]+scores[(1,0,0)]+scores[(0,1,0)]+scores[(1,1,0)]+scores[(0,0,1)]))
except:
result = "accuracy error"
return result
def F1_measure(scores,precision,recall):
try:
result = 2*((precision*recall)/(precision+recall))
except:
result = "F1 measure error"
return result
def count_indexes(tweets_evals):
a = [0, 1]
indexes = {}
for element in list(product(a, a, a)):
indexes[element] = 0
total_element = 0
for tweet_id in tweets_evals:
for score in tweets_evals[tweet_id]:
indexes[score] += 1
total_element += len(tweets_evals[tweet_id])
return indexes
#Main function for evaluation
def evaluate(dict_file, perturbed_file, corrected_file,out_path = None):
tweets_evals = {}
words_evals = {}
# file_name, _ = path.splitext(path.basename(dict_path))
start_tweets = evaluate_utility.load_file(dict_file)
perturbed_tweets = evaluate_utility.load_file(perturbed_file)
corrected_tweets = evaluate_utility.load_file(corrected_file)
for word_id in range(len(start_tweets)):
start_words = start_tweets[word_id]
perturbed_words = perturbed_tweets[word_id]
corrected_words = corrected_tweets[word_id]
# check if the tweet is consistent with the ground truth
tweets_evals[word_id] = [check(start_words, perturbed_words, corrected_words)]
# check if every word of the tweet is consistent with the ground truth
words_check = []
for start_single, perturbed_single, corrected_single in zip(start_words.split(), perturbed_words.split(),
corrected_words.split()):
words_check += [check(start_single, perturbed_single, corrected_single)]
words_evals[word_id] = words_check
if(out_path is None):
out_path = "./results"
out_path_evaluation = path.join(out_path, "word_evaluation.txt")
out_path_index = path.join(out_path, "word_evaluation_index.txt")
# out_path_correct_ratio = path.join(out_path, "word_evaluation_correct_ratio.txt")
evaluate_utility.write_tweets(out_path_evaluation, words_evals)
words_index = count_indexes(words_evals)
evaluate_utility.write_tweets(out_path_index, words_index)
return words_index
| 31.70339
| 134
| 0.662657
|
673c1874fcf9d63729d953574ddd5c23f00f256b
| 6,138
|
py
|
Python
|
mmtbx/suitename/suites.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 155
|
2016-11-23T12:52:16.000Z
|
2022-03-31T15:35:44.000Z
|
mmtbx/suitename/suites.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 590
|
2016-12-10T11:31:18.000Z
|
2022-03-30T23:10:09.000Z
|
mmtbx/suitename/suites.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 115
|
2016-11-15T08:17:28.000Z
|
2022-02-09T15:30:14.000Z
|
from __future__ import nested_scopes, generators, division, absolute_import
from __future__ import with_statement, print_function
import sys, os
# Copyright 2021 Richardson Lab at Duke University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from suitenamedefs import globals
from iotbx.data_manager import DataManager # Load in the DataManager
from libtbx import phil
from libtbx.utils import Sorry
# from mmtbx.validation import utils
# from cctbx import geometry_restraints
# from collections import defaultdict
from diangle import getResidueDihedrals
# IMPORT TO EXPORT:
from mmtbx.suitename.suitename import compute, write, \
finalStats, clearStats
# The following are the options available, in Phil format,
# for human and computer comprehension.
philOptions = """
suitename {
# input
infile=""
.type=str
.help="the file to process"
anglefields = 9
.type=int
.help="number of angle fields provided, for textual input only"
pointidfields = 7
.type=int
.help="number of point id fields before the angle fields"
ptid=0
.type=int
.help="number of point id fields before the angle fields"
residuein=false
.type=bool
.help="expect dangle format giving residues"
suitein=false
.type=bool
.help="expect kinemage format giving suites directly"
# output
string=False
.type=bool
.help="output in string format, 3 characters per suite"
kinemage=False
.type=bool
.help="output in kinemage format, useful for visualization"
report=true
.type=bool
.help="output as a report, giving statistical details"
chart=False
.type=bool
.help="modifier to standard report, output without statistical summary"
nosequence = False
.type=bool
.help="modifier to string format, do not include base letters"
causes=False
.type=bool
.help="output extra details concerning the causes of each assignment made"
test=False
.type=bool
.help="display a lat of additional information about program internals"
# compute
satellites=False
.type=bool
.help="use the special satelliteWidths values for satellites"
nowannabe=False
.type=bool
.help="do not consider 'wannabe' clusters"
noinc=False
.type=bool
.help="do not display incomplete suites"
etatheta=False
.type=bool
altid="A"
.type=str
.help="which alternate conformer to use (A, B, etc)"
altidfield = 6
.type=int
.help="which field (1-based) gives the alternate conformer code"
version=false
.type=bool
.help="give the version number of suite name"
# deprecated and automatically true:
oneline=false
.type=bool
}
"""
def main(options, outFile=None, errorFile=None):
"""The main track for handling PDB and CIF input formats, which will involve
parsing the model hierarchy to get the dihedral angles for ourselves"""
setOptions(options)
import suiteninput # must be AFTER setOptions
if not outFile: outFile = sys.stdout
if not errorFile: errorFile = sys.stderr
inFile = options.infile
model = loadModel(inFile)
residues = getResidueDihedrals(model, options.altid,
name=os.path.splitext(inFile)[0],
errorFile=errorFile)
### to print mp_geo-like output:
# for r in residues:
# print(residueString(r))
# useful for seeing what suites were generated
if residues is not None and len(residues) > 0:
suiteList = suiteninput.buildSuites(residues)
suiteList = suiteList[:-1]
suiteList = compute(suiteList)
finalStats()
write(outFile, suiteList)
clearStats()
def parseOptions(optionString, errorFile=None):
""" Use optionString to modify the defaults given in philOptions above.
Returns a Python object that has an attribute for every option listed
in philOptions. Example: "chart=true noinc=true causes=true"
The values in optionString are case insensitive.
"""
opt2 = """ # use this for more complex option types e.g. multiples
suitename {
report=true
chart=true
} """
# user_phil = phil.parse(opt2)
master_phil = phil.parse(philOptions)
interp = master_phil.command_line_argument_interpreter()
optionList = optionString.split()
try:
user_phil = interp.process(args=optionList)
except Sorry as e:
if errorFile is None: errorFile = sys.stderr
print(e, file=errorFile)
working_phil = master_phil.fetch(sources=user_phil)
full_options = working_phil.extract()
return full_options.suitename
def setOptions(optionsIn):
"""optionsIn may be the result of parseOptions above
or the result of an argparse parse_args operation"""
from mmtbx.suitename.suitename import loadOptions
globals.options = optionsIn
loadOptions(optionsIn)
def loadModel(filename):
dm = DataManager() # Initialize the DataManager and call it dm
dm.set_overwrite(True) # tell the DataManager to overwrite files with the same name
#print("Reading file")
model = dm.get_model(filename)
#print("Processing model")
#model.process_input_model(make_restraints=True)
# removed because Restraints Manager will not operate
# on unfamiliar residues KPB 6/10/2021
return model
def testResidues(model):
#print("computing dihedrals")
residues = getResidueDihedrals(model)
for r in residues:
print(r.pointIDs, " : ", r.angle)
| 32.823529
| 95
| 0.69143
|
2d87bebab843cb4cf84b592e1ac9db7edf284b79
| 9,408
|
py
|
Python
|
Server/integrations/duo/DuoExternalAuthenticator.py
|
rkondratenko/oxAuth
|
a083722fcefddfc31895b89900ee21999077cf52
|
[
"MIT"
] | 380
|
2015-01-08T23:28:43.000Z
|
2022-03-07T20:19:51.000Z
|
Server/integrations/duo/DuoExternalAuthenticator.py
|
rkondratenko/oxAuth
|
a083722fcefddfc31895b89900ee21999077cf52
|
[
"MIT"
] | 1,489
|
2020-11-06T14:04:47.000Z
|
2020-11-06T14:30:43.000Z
|
Server/integrations/duo/DuoExternalAuthenticator.py
|
HomeMeCEO/oxAuth
|
e391298e55e34f0f732c7ecd199bffe42289992b
|
[
"MIT"
] | 172
|
2015-01-10T09:48:21.000Z
|
2022-02-24T03:01:59.000Z
|
# oxAuth is available under the MIT License (2008). See http://opensource.org/licenses/MIT for full text.
# Copyright (c) 2016, Gluu
#
# Author: Yuriy Movchan
#
from org.gluu.service.cdi.util import CdiUtil
from org.gluu.oxauth.security import Identity
from org.gluu.model.custom.script.type.auth import PersonAuthenticationType
from org.gluu.oxauth.service import AuthenticationService
from org.gluu.oxauth.service.common import UserService
from org.gluu.service import MailService
from org.gluu.util import ArrayHelper
from org.gluu.util import StringHelper
from java.util import Arrays
import duo_web
import json
class PersonAuthentication(PersonAuthenticationType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, customScript, configurationAttributes):
print "Duo. Initialization"
duo_creds_file = configurationAttributes.get("duo_creds_file").getValue2()
# Load credentials from file
f = open(duo_creds_file, 'r')
try:
creds = json.loads(f.read())
except:
print "Duo. Initialization. Failed to load creds from file:", duo_creds_file
return False
finally:
f.close()
self.ikey = str(creds["ikey"])
self.skey = str(creds["skey"])
self.akey = str(creds["akey"])
self.use_duo_group = False
if (configurationAttributes.containsKey("duo_group")):
self.duo_group = configurationAttributes.get("duo_group").getValue2()
self.use_duo_group = True
print "Duo. Initialization. Using Duo only if user belong to group:", self.duo_group
self.use_audit_group = False
if (configurationAttributes.containsKey("audit_group")):
self.audit_group = configurationAttributes.get("audit_group").getValue2()
if (not configurationAttributes.containsKey("audit_group_email")):
print "Duo. Initialization. Property audit_group_email is not specified"
return False
self.audit_email = configurationAttributes.get("audit_group_email").getValue2()
self.use_audit_group = True
print "Duo. Initialization. Using audito group:", self.audit_group
if (self.use_duo_group or self.use_audit_group):
if (not configurationAttributes.containsKey("audit_attribute")):
print "Duo. Initialization. Property audit_attribute is not specified"
return False
else:
self.audit_attribute = configurationAttributes.get("audit_attribute").getValue2()
print "Duo. Initialized successfully"
return True
def destroy(self, configurationAttributes):
print "Duo. Destroy"
print "Duo. Destroyed successfully"
return True
def getApiVersion(self):
return 11
def getAuthenticationMethodClaims(self, requestParameters):
return None
def isValidAuthenticationMethod(self, usageType, configurationAttributes):
return True
def getAlternativeAuthenticationMethod(self, usageType, configurationAttributes):
return None
def authenticate(self, configurationAttributes, requestParameters, step):
duo_host = configurationAttributes.get("duo_host").getValue2()
authenticationService = CdiUtil.bean(AuthenticationService)
identity = CdiUtil.bean(Identity)
if (step == 1):
print "Duo. Authenticate for step 1"
# Check if user authenticated already in another custom script
user = authenticationService.getAuthenticatedUser()
if user == None:
credentials = identity.getCredentials()
user_name = credentials.getUsername()
user_password = credentials.getPassword()
logged_in = False
if (StringHelper.isNotEmptyString(user_name) and StringHelper.isNotEmptyString(user_password)):
userService = CdiUtil.bean(UserService)
logged_in = authenticationService.authenticate(user_name, user_password)
if (not logged_in):
return False
user = authenticationService.getAuthenticatedUser()
if (self.use_duo_group):
print "Duo. Authenticate for step 1. Checking if user belong to Duo group"
is_member_duo_group = self.isUserMemberOfGroup(user, self.audit_attribute, self.duo_group)
if (is_member_duo_group):
print "Duo. Authenticate for step 1. User '" + user.getUserId() + "' member of Duo group"
duo_count_login_steps = 2
else:
self.processAuditGroup(user)
duo_count_login_steps = 1
identity.setWorkingParameter("duo_count_login_steps", duo_count_login_steps)
return True
elif (step == 2):
print "Duo. Authenticate for step 2"
user = authenticationService.getAuthenticatedUser()
if user == None:
print "Duo. Authenticate for step 2. Failed to determine user name"
return False
user_name = user.getUserId()
sig_response_array = requestParameters.get("sig_response")
if ArrayHelper.isEmpty(sig_response_array):
print "Duo. Authenticate for step 2. sig_response is empty"
return False
duo_sig_response = sig_response_array[0]
print "Duo. Authenticate for step 2. duo_sig_response: " + duo_sig_response
authenticated_username = duo_web.verify_response(self.ikey, self.skey, self.akey, duo_sig_response)
print "Duo. Authenticate for step 2. authenticated_username: " + authenticated_username + ", expected user_name: " + user_name
if (not StringHelper.equals(user_name, authenticated_username)):
return False
self.processAuditGroup(user)
return True
else:
return False
def prepareForStep(self, configurationAttributes, requestParameters, step):
identity = CdiUtil.bean(Identity)
authenticationService = CdiUtil.bean(AuthenticationService)
duo_host = configurationAttributes.get("duo_host").getValue2()
if (step == 1):
print "Duo. Prepare for step 1"
return True
elif (step == 2):
print "Duo. Prepare for step 2"
user = authenticationService.getAuthenticatedUser()
if (user == None):
print "Duo. Prepare for step 2. Failed to determine user name"
return False
user_name = user.getUserId()
duo_sig_request = duo_web.sign_request(self.ikey, self.skey, self.akey, user_name)
print "Duo. Prepare for step 2. duo_sig_request: " + duo_sig_request
identity.setWorkingParameter("duo_host", duo_host)
identity.setWorkingParameter("duo_sig_request", duo_sig_request)
return True
else:
return False
def getExtraParametersForStep(self, configurationAttributes, step):
if step == 2:
return Arrays.asList("duo_count_login_steps", "cas2_user_uid")
return None
def getCountAuthenticationSteps(self, configurationAttributes):
identity = CdiUtil.bean(Identity)
if (identity.isSetWorkingParameter("duo_count_login_steps")):
return int(identity.getWorkingParameter("duo_count_login_steps"))
return 2
def getPageForStep(self, configurationAttributes, step):
if (step == 2):
return "/auth/duo/duologin.xhtml"
return ""
def getNextStep(self, configurationAttributes, requestParameters, step):
return -1
def getLogoutExternalUrl(self, configurationAttributes, requestParameters):
print "Get external logout URL call"
return None
def logout(self, configurationAttributes, requestParameters):
return True
def isUserMemberOfGroup(self, user, attribute, group):
is_member = False
member_of_list = user.getAttributeValues(attribute)
if (member_of_list != None):
for member_of in member_of_list:
if StringHelper.equalsIgnoreCase(group, member_of) or member_of.endswith(group):
is_member = True
break
return is_member
def processAuditGroup(self, user):
if (self.use_audit_group):
is_member = self.isUserMemberOfGroup(user, self.audit_attribute, self.audit_group)
if (is_member):
print "Duo. Authenticate for processAuditGroup. User '" + user.getUserId() + "' member of audit group"
print "Duo. Authenticate for processAuditGroup. Sending e-mail about user '" + user.getUserId() + "' login to", self.audit_email
# Send e-mail to administrator
user_id = user.getUserId()
mailService = CdiUtil.bean(MailService)
subject = "User log in: " + user_id
body = "User log in: " + user_id
mailService.sendMail(self.audit_email, subject, body)
| 39.2
| 144
| 0.640731
|
f9aa976e3d82009bb9b99c50cbbb4e2a1b43b9af
| 2,813
|
py
|
Python
|
lib/core/evaluation.py
|
lucasstna/HRNet-Facial-Landmark-Detection
|
0af2745339524ca179f6df277ca8fd5c1ded0586
|
[
"MIT"
] | null | null | null |
lib/core/evaluation.py
|
lucasstna/HRNet-Facial-Landmark-Detection
|
0af2745339524ca179f6df277ca8fd5c1ded0586
|
[
"MIT"
] | null | null | null |
lib/core/evaluation.py
|
lucasstna/HRNet-Facial-Landmark-Detection
|
0af2745339524ca179f6df277ca8fd5c1ded0586
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Created by Tianheng Cheng(tianhengcheng@gmail.com), Yang Zhao
# ------------------------------------------------------------------------------
import math
import torch
import numpy as np
from ..utils.transforms import transform_preds
def get_preds(scores):
"""
get predictions from score maps in torch Tensor
return type: torch.LongTensor
"""
assert scores.dim() == 4, 'Score maps should be 4-dim'
maxval, idx = torch.max(scores.view(scores.size(0), scores.size(1), -1), 2)
maxval = maxval.view(scores.size(0), scores.size(1), 1)
idx = idx.view(scores.size(0), scores.size(1), 1) + 1
preds = idx.repeat(1, 1, 2).float()
preds[:, :, 0] = (preds[:, :, 0] - 1) % scores.size(3) + 1
preds[:, :, 1] = torch.floor((preds[:, :, 1] - 1) / scores.size(3)) + 1
pred_mask = maxval.gt(0).repeat(1, 1, 2).float()
preds *= pred_mask
return preds
def compute_nme(preds, meta):
targets = meta['pts']
preds = preds.numpy()
target = targets.cpu().numpy()
N = preds.shape[0]
L = preds.shape[1]
rmse = np.zeros(N)
for i in range(N):
pts_pred, pts_gt = preds[i, ], target[i, ]
if L == 19: # aflw
interocular = meta['box_size'][i]
elif L == 29: # cofw
interocular = np.linalg.norm(pts_gt[8, ] - pts_gt[9, ])
elif L == 68: # 300w
# interocular
interocular = np.linalg.norm(pts_gt[36, ] - pts_gt[45, ])
elif L == 98:
interocular = np.linalg.norm(pts_gt[60, ] - pts_gt[72, ])
elif L == 4: # BK
interocular = np.linalg.norm(pts_gt[0, ] - pts_gt[3, ])
else:
raise ValueError('Number of landmarks is wrong')
rmse[i] = np.sum(np.linalg.norm(pts_pred - pts_gt, axis=1)) / (interocular * L)
return rmse
def decode_preds(output, center, scale, res):
coords = get_preds(output) # float type
coords = coords.cpu()
# pose-processing
for n in range(coords.size(0)):
for p in range(coords.size(1)):
hm = output[n][p]
px = int(math.floor(coords[n][p][0]))
py = int(math.floor(coords[n][p][1]))
if (px > 1) and (px < res[0]) and (py > 1) and (py < res[1]):
diff = torch.Tensor([hm[py - 1][px] - hm[py - 1][px - 2], hm[py][px - 1]-hm[py - 2][px - 1]])
coords[n][p] += diff.sign() * .25
coords += 0.5
preds = coords.clone()
# Transform back
for i in range(coords.size(0)):
preds[i] = transform_preds(coords[i], center[i], scale[i], res)
if preds.dim() < 3:
preds = preds.view(1, preds.size())
return preds
| 31.255556
| 109
| 0.525062
|
f2659b46bce9fea62ef2840e73249b2256de04d3
| 2,032
|
py
|
Python
|
python/grasp_proxy_matlab.py
|
mgualti/DeepRLManip
|
6b982a319edae51d9c7c740c3a83fe8ce3a97ca7
|
[
"MIT"
] | 23
|
2018-10-30T02:34:39.000Z
|
2022-01-13T09:58:00.000Z
|
python/grasp_proxy_matlab.py
|
mgualti/DeepRLManip
|
6b982a319edae51d9c7c740c3a83fe8ce3a97ca7
|
[
"MIT"
] | null | null | null |
python/grasp_proxy_matlab.py
|
mgualti/DeepRLManip
|
6b982a319edae51d9c7c740c3a83fe8ce3a97ca7
|
[
"MIT"
] | 5
|
2018-11-03T19:56:59.000Z
|
2020-03-02T08:28:31.000Z
|
'''Provides an interface to the Matlab grasp detector.'''
# python
import os
# scipy
from numpy import array, ascontiguousarray, fromstring, reshape, rollaxis
# matlab
import matlab
import matlab.engine
# self
import hand_descriptor
from hand_descriptor import HandDescriptor
class GraspProxyMatlab:
'''A class for interfacing with grasp detection.'''
def __init__(self):
'''Starts Matlab engine.'''
self.matlabDir = self.caffeDir = os.getcwd() + "/matlab/"
print("Starting Matlab...")
self.eng = matlab.engine.start_matlab()
# add all of the required directories to the MATLAB path
self.eng.addpath("/home/mgualti/Programs/caffe/matlab")
self.eng.addpath(self.matlabDir + "gpd2")
self.eng.addpath(self.matlabDir)
self.eng.parpool()
def DetectGrasps(self, cloud, viewPoints, viewPointIndices, nSamples, scoreThresh, gpuId):
'''Calls the DetectGrasps Matlab script.'''
viewPointIndices = viewPointIndices + 1 # convert to Matlab 1-indexing
mCloud = matlab.double(cloud.T.tolist())
mViewPoints = matlab.double(viewPoints.T.tolist())
mViewPointIndices = matlab.int32(viewPointIndices.tolist(), size=(len(viewPointIndices), 1))
plotBitmap = matlab.logical([False, False, False])
mGrasps = self.eng.DetectGrasps(
mCloud, mViewPoints, mViewPointIndices, nSamples, scoreThresh, plotBitmap, gpuId)
return self.UnpackGrasps(mGrasps)
def UnpackGrasps(self, mGrasps):
'''Extracts the list of grasps in Matlab format and returns a list in Python format.'''
grasps = []
for mGrasp in mGrasps:
top = array(mGrasp["top"]).flatten()
bottom = array(mGrasp["bottom"]).flatten()
axis = array(mGrasp["axis"]).flatten()
approach = array(mGrasp["approach"]).flatten()
score = mGrasp["score"]
# create grasp object
T = hand_descriptor.PoseFromApproachAxisCenter(approach, axis, 0.5*bottom + 0.5*top)
grasp = HandDescriptor(T)
grasp.score = score
grasps.append(grasp)
return grasps
| 31.261538
| 96
| 0.702756
|
6052a6a8497ff000c25bfbbbf15b1ef4ca35cc6b
| 1,569
|
py
|
Python
|
wdreconcile/typematcher.py
|
rpatil524/openrefine-wikibase
|
bfdbcf3dccc586c6a4fc5103162d7cfe0f69c15b
|
[
"MIT"
] | null | null | null |
wdreconcile/typematcher.py
|
rpatil524/openrefine-wikibase
|
bfdbcf3dccc586c6a4fc5103162d7cfe0f69c15b
|
[
"MIT"
] | null | null | null |
wdreconcile/typematcher.py
|
rpatil524/openrefine-wikibase
|
bfdbcf3dccc586c6a4fc5103162d7cfe0f69c15b
|
[
"MIT"
] | null | null | null |
from .utils import to_q
from .sparqlwikidata import sparql_wikidata
import config
from string import Template
class TypeMatcher(object):
"""
Interface that caches the subclasses of parent classes.
Cached using Redis sets, with expiration.
"""
def __init__(self, redis_client):
self.r = redis_client
self.prefix = config.redis_key_prefix+':children'
self.ttl = 24*60*60 # 1 day
def is_subclass(self, qid_1, qid_2):
"""
Checks if the Wikidata item designated by
the first QID is a subclass of the second.
Equivalent SPARQL query:
?qid_1 wdt:P279* ?qid_2
This is done by caching the children of
the class via the "subclass of" (P279)
relation.
"""
self.prefetch_children(qid_2)
return self.r.sismember(self._key_name(qid_2), qid_1)
def prefetch_children(self, qid, force=False):
"""
Prefetches (in Redis) all the children of a given class
"""
key_name = self._key_name(qid)
if self.r.exists(key_name):
return # children are already prefetched
sparql_query = Template(config.sparql_query_to_fetch_subclasses).substitute(qid=qid)
results = sparql_wikidata(sparql_query)
for result in results["bindings"]:
child_qid = to_q(result["child"]["value"])
self.r.sadd(key_name, child_qid)
# set expiration
self.r.expire(key_name, self.ttl)
def _key_name(self, qid):
return ':'.join([self.prefix, qid])
| 29.055556
| 92
| 0.636711
|
cb3daa5d04943181caf328209a3294d59d23ac48
| 13,463
|
py
|
Python
|
pySPACE/tools/progressbar.py
|
pyspace/pyspace
|
763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62
|
[
"BSD-3-Clause"
] | 32
|
2015-02-20T09:03:09.000Z
|
2022-02-25T22:32:52.000Z
|
pySPACE/tools/progressbar.py
|
pyspace/pyspace
|
763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62
|
[
"BSD-3-Clause"
] | 5
|
2015-05-18T15:08:40.000Z
|
2020-03-05T19:18:01.000Z
|
pySPACE/tools/progressbar.py
|
pyspace/pyspace
|
763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62
|
[
"BSD-3-Clause"
] | 18
|
2015-09-28T07:16:38.000Z
|
2021-01-20T13:52:19.000Z
|
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
"""Text progressbar library for Python
This library provides a text mode progressbar. This is typically used
to display the progress of a long running operation, providing a
visual clue that processing is underway.
The ProgressBar class manages the progress, and the format of the line
is given by a number of widgets. A widget is an object that may
display differently depending on the state of the progress. There are
three types of widget:
- a string, which always shows itself;
- a ProgressBarWidget, which may return a different value every time
it's update method is called; and
- a ProgressBarWidgetHFill, which is like ProgressBarWidget, except it
expands to fill the remaining width of the line.
The progressbar module is very easy to use, yet very powerful. And
automatically supports features like auto-resizing when available.
Changes in comparison to original code:
- added day display in ETA class
- small style improvements in documentation
- coding style adaptions
Original LGPL 2.1+ license::
Copyright (c) 2005 Nilton Volpato
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
__author__ = "Nilton Volpato"
__author_email__ = "first-name dot last-name @ gmail.com"
__date__ = "2006-05-07"
__version__ = "2.2"
# Changelog
#
# 2006-05-07: v2.2 fixed bug in windows
# 2005-12-04: v2.1 autodetect terminal width, added start method
# 2005-12-04: v2.0 everything is now a widget (wow!)
# 2005-12-03: v1.0 rewrite using widgets
# 2005-06-02: v0.5 rewrite
# 2004-??-??: v0.1 first version
import sys, time
from array import array
try:
from fcntl import ioctl
import termios
except ImportError:
pass
import signal
class ProgressBarWidget(object):
"""Element of ProgressBar formatting.
The ProgressBar object will call it's update value when an update
is needed. It's size may change between call, but the results will
not be good if the size changes drastically and repeatedly.
"""
def update(self, pbar):
"""Returns the string representing the widget
The parameter *pbar* is a reference to the calling ProgressBar,
where one can access attributes of the class for knowing how
the update must be made.
At least this function must be overridden.
"""
pass
class ProgressBarWidgetHFill(object):
"""Variable width element of ProgressBar formatting
The ProgressBar object will call it's update value, informing the
width this object must the made. This is like TeX ``\\hfill``, it will
expand to fill the line. You can use more than one in the same
line, and they will all have the same width, and together will
fill the line.
"""
def update(self, pbar, width):
"""Returns the string representing the widget
The parameter *pbar* is a reference to the calling ProgressBar,
where one can access attributes of the class for knowing how
the update must be made. The parameter width is the total
horizontal width the widget must have.
At least this function must be overridden.
"""
pass
class ETA(ProgressBarWidget):
"""Widget for the Estimated Time of Arrival"""
def format_time(self, seconds):
return time.strftime('%H:%M:%S', time.gmtime(seconds))
def update(self, pbar):
if pbar.currval == 0:
return 'ETA: --:--:--'
elif pbar.finished:
days = pbar.seconds_elapsed // 86400 # one day has 86400 seconds
if days > 0.0:
return 'Time: %dd %s' % (int(days),
self.format_time(pbar.seconds_elapsed))
else:
return 'Time: %s' % self.format_time(pbar.seconds_elapsed)
else:
elapsed = pbar.seconds_elapsed
eta = elapsed * pbar.maxval / pbar.currval - elapsed
days = eta // 86400 # one day has 86400 seconds
if days > 0.0:
return 'ETA: %dd %s' % (int(days), self.format_time(eta))
else:
return 'ETA: %s' % self.format_time(eta)
class FileTransferSpeed(ProgressBarWidget):
"""Widget for showing the transfer speed (useful for file transfers)"""
def __init__(self):
self.fmt = '%6.2f %s'
self.units = ['B','K','M','G','T','P']
def update(self, pbar):
if pbar.seconds_elapsed < 2e-6:#== 0:
bps = 0.0
else:
bps = float(pbar.currval) / pbar.seconds_elapsed
spd = bps
for u in self.units:
if spd < 1000:
break
spd /= 1000
return self.fmt % (spd, u+'/s')
class RotatingMarker(ProgressBarWidget):
"""A rotating marker for filling the bar of progress"""
def __init__(self, markers='|/-\\'):
self.markers = markers
self.curmark = -1
def update(self, pbar):
if pbar.finished:
return self.markers[0]
self.curmark = (self.curmark + 1)%len(self.markers)
return self.markers[self.curmark]
class Percentage(ProgressBarWidget):
"""Just the percentage done"""
def update(self, pbar):
return '%3d%%' % pbar.percentage()
class Bar(ProgressBarWidgetHFill):
"""The bar of progress. It will stretch to fill the line"""
def __init__(self, marker='#', left='|', right='|'):
self.marker = marker
self.left = left
self.right = right
def _format_marker(self, pbar):
if isinstance(self.marker, (str, unicode)):
return self.marker
else:
return self.marker.update(pbar)
def update(self, pbar, width):
percent = pbar.percentage()
cwidth = width - len(self.left) - len(self.right)
marked_width = int(percent * cwidth / 100)
m = self._format_marker(pbar)
bar = (self.left + (m*marked_width).ljust(cwidth) + self.right)
return bar
class ReverseBar(Bar):
"""The reverse bar of progress, or bar of regress"""
def update(self, pbar, width):
percent = pbar.percentage()
cwidth = width - len(self.left) - len(self.right)
marked_width = int(percent * cwidth / 100)
m = self._format_marker(pbar)
bar = (self.left + (m*marked_width).rjust(cwidth) + self.right)
return bar
default_widgets = [Percentage(), ' ', Bar()]
class ProgressBar(object):
"""Updates and print the progress bar
The term_width parameter may be an integer. Or None, in which case
it will try to guess it, if it fails it will default to 80 columns.
The simple use is like this::
>>> pbar = ProgressBar().start()
>>> for i in xrange(100):
... # do something
... pbar.update(i+1)
...
>>> pbar.finish()
But anything you want to do is possible (well, almost anything).
You can supply different widgets of any type in any order. And you
can even write your own widgets! There are many widgets already
shipped and you should experiment with them.
When implementing a widget update method you may access any
attribute or function of the ProgressBar object calling the
widget's update method. The most important attributes you would
like to access are:
- currval: current value of the progress, 0 <= currval <= maxval
- maxval: maximum (and final) value of the progress
- finished: True if the bar is have finished (reached 100%), False o/w
- start_time: first time update() method of ProgressBar was called
- seconds_elapsed: seconds elapsed since start_time
- percentage(): percentage of the progress (this is a method)
"""
def __init__(self, maxval=100, widgets=default_widgets, term_width=None, fd=None):
if maxval <= 0:
maxval = 1
self.maxval = maxval
self.widgets = widgets
self.fd = fd if fd is not None else sys.stdout
self.signal_set = False
if term_width is None:
try:
self.handle_resize(None,None)
signal.signal(signal.SIGWINCH, self.handle_resize)
self.signal_set = True
except:
self.term_width = 79
else:
self.term_width = term_width
self.currval = 0
self.finished = False
self.prev_percentage = -1
self.start_time = None
self.seconds_elapsed = 0
def handle_resize(self, signum, frame):
h,w=array('h', ioctl(self.fd,termios.TIOCGWINSZ,'\0'*8))[:2]
self.term_width = w
def percentage(self):
"""Returns the percentage of the progress"""
return self.currval*100.0 / self.maxval
def _format_widgets(self):
r = []
hfill_inds = []
num_hfill = 0
currwidth = 0
for i, w in enumerate(self.widgets):
if isinstance(w, ProgressBarWidgetHFill):
r.append(w)
hfill_inds.append(i)
num_hfill += 1
elif isinstance(w, (str, unicode)):
r.append(w)
currwidth += len(w)
else:
weval = w.update(self)
currwidth += len(weval)
r.append(weval)
for iw in hfill_inds:
r[iw] = r[iw].update(self, (self.term_width-currwidth)/num_hfill)
return r
def _format_line(self):
return ''.join(self._format_widgets()).ljust(self.term_width)
def _need_update(self):
return int(self.percentage()) != int(self.prev_percentage)
def update(self, value):
"""Updates the progress bar to a new value"""
assert (0.0 <= value <= self.maxval)
self.currval = value
if not self._need_update() or self.finished:
return
if not self.start_time:
self.start_time = time.time()
self.seconds_elapsed = time.time() - self.start_time
self.prev_percentage = self.percentage()
if value != self.maxval:
self.fd.write(self._format_line() + '\r')
else:
self.finished = True
self.fd.write(self._format_line() + '\n')
# Make sure the results are written out
self.fd.flush()
def start(self):
"""Start measuring time, and prints the bar at 0%.
It returns self so you can use it like this::
>>> pbar = ProgressBar().start()
>>> for i in xrange(100):
... # do something
... pbar.update(i+1)
...
>>> pbar.finish()
"""
self.update(0)
return self
def finish(self):
"""Used to tell the progress is finished"""
self.update(self.maxval)
if self.signal_set:
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
if __name__=='__main__':
import os
def example1():
widgets = ['Test: ', Percentage(), ' ', Bar(marker=RotatingMarker()),
' ', ETA(), ' ', FileTransferSpeed()]
pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
for i in range(1000000):
# do something
pbar.update(10*i+1)
pbar.finish()
print
def example2():
class CrazyFileTransferSpeed(FileTransferSpeed):
"""It's bigger between 45 and 80 percent"""
def update(self, pbar):
if 45 < pbar.percentage() < 80:
return 'Bigger Now ' + FileTransferSpeed.update(self,pbar)
else:
return FileTransferSpeed.update(self,pbar)
widgets = [CrazyFileTransferSpeed(),' <<<', Bar(), '>>> ', Percentage(),' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=10000000)
# maybe do something
pbar.start()
for i in range(2000000):
# do something
pbar.update(5*i+1)
pbar.finish()
print
def example3():
widgets = [Bar('>'), ' ', ETA(), ' ', ReverseBar('<')]
pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
for i in range(1000000):
# do something
pbar.update(10*i+1)
pbar.finish()
print
def example4():
widgets = ['Test: ', Percentage(), ' ',
Bar(marker='0',left='[',right=']'),
' ', ETA(), ' ', FileTransferSpeed()]
pbar = ProgressBar(widgets=widgets, maxval=500)
pbar.start()
for i in range(100,500+1,50):
time.sleep(0.2)
pbar.update(i)
pbar.finish()
print
example1()
example2()
example3()
example4()
| 33.573566
| 91
| 0.605511
|
22927fe9b48d1a397dee5837531530424087612d
| 501
|
py
|
Python
|
3_advanced/chapter18/practice/logarithm.py
|
code4tomorrow/Python
|
035b6f5d8fd635a16caaff78bcd3f582663dadc3
|
[
"MIT"
] | 4
|
2021-03-01T00:32:45.000Z
|
2021-05-21T22:01:52.000Z
|
3_advanced/chapter18/practice/logarithm.py
|
code4tomorrow/Python
|
035b6f5d8fd635a16caaff78bcd3f582663dadc3
|
[
"MIT"
] | 29
|
2020-09-12T22:56:04.000Z
|
2021-09-25T17:08:42.000Z
|
3_advanced/chapter18/practice/logarithm.py
|
code4tomorrow/Python
|
035b6f5d8fd635a16caaff78bcd3f582663dadc3
|
[
"MIT"
] | 7
|
2021-02-25T01:50:55.000Z
|
2022-02-28T00:00:42.000Z
|
"""
Create a recursive method that mirrors how a logarithm works in math.
You can have the base by default by ten. You do not have to deal
with decimals, just worry about returning integers.
Note: Logarithms return the power that you raise a base number to
in order to get a number.
Ex: logarithm of 9 to base 3 = 2; In this example, since 3 to the
2nd power gives you 9, the logarithm of 9 to base 3 is equal to 2.
"""
def logarithm(): # add parameters
# add here and remove "pass"
pass
| 29.470588
| 69
| 0.728543
|
8e2035638701ed4823e9583e319a0cacf11853e6
| 575
|
py
|
Python
|
yap.py
|
pythonista-blitz/unrailed-ai
|
bd511871234fe50dd8e0c9ab7aaa93d73df37cb0
|
[
"MIT"
] | 1
|
2021-09-06T00:54:29.000Z
|
2021-09-06T00:54:29.000Z
|
yap.py
|
pythonista-blitz/unrailed-ai
|
bd511871234fe50dd8e0c9ab7aaa93d73df37cb0
|
[
"MIT"
] | null | null | null |
yap.py
|
pythonista-blitz/unrailed-ai
|
bd511871234fe50dd8e0c9ab7aaa93d73df37cb0
|
[
"MIT"
] | null | null | null |
import argparse
import os
import sys
from yapf.yapflib.yapf_api import FormatFile
my_parser = argparse.ArgumentParser(description='List the content of a folder')
my_parser.add_argument('Path',
metavar='path',
type=str,
help='the path to list')
args = my_parser.parse_args()
input_path = args.Path
_, _, filenames = next(os.walk(input_path))
print(*filenames)
for file in filenames:
if os.path.splitext(file)[1] == "py":
FormatFile(os.path.join(input_path, file), in_place=True)
| 27.380952
| 79
| 0.64
|
6568b05465221d9776a6583dcae32729b8857807
| 1,163
|
py
|
Python
|
jawa/attributes/local_variable_type.py
|
4577/Jawa
|
23f93020ef6687567e45a9afa09bfd6e0faf6f0a
|
[
"MIT"
] | 1
|
2021-12-30T10:53:57.000Z
|
2021-12-30T10:53:57.000Z
|
jawa/attributes/local_variable_type.py
|
GiantTreeLP/jawa-fixed
|
dbdd1cb6ef2439bf6496adef8732775c792036ec
|
[
"MIT"
] | null | null | null |
jawa/attributes/local_variable_type.py
|
GiantTreeLP/jawa-fixed
|
dbdd1cb6ef2439bf6496adef8732775c792036ec
|
[
"MIT"
] | 1
|
2021-01-21T12:17:39.000Z
|
2021-01-21T12:17:39.000Z
|
from struct import pack
from collections import namedtuple
from jawa.attribute import Attribute
local_variable_type_entry = namedtuple('local_variable_type_entry', [
'start_pc',
'length',
'name_index',
'signature_index',
'index'
])
class LocalVariableTypeTableAttribute(Attribute):
ADDED_IN = '1.0.2'
MINIMUM_CLASS_VERSION = (45, 3)
def __init__(self, table, name_index=None):
super().__init__(
table,
name_index or table.cf.constants.create_utf8(
'LocalVariableTypeTable'
).index
)
self.local_variables = []
def unpack(self, info):
length = info.u2()
table = info.unpack('>{0}H'.format(length * 5))
self.local_variables = [
local_variable_type_entry(*x)
for x in zip(*[iter(table)] * 5)
]
def pack(self):
return pack(
'>H{0}H'.format(len(self.local_variables) * 5),
len(self.local_variables),
*sum(self.local_variables, ())
)
def __repr__(self):
return f'<LocalVariableTypeTableAttribute({self.local_variables!r})>'
| 24.744681
| 77
| 0.599312
|
b5a8d3e701f86470810edf670a2fe0b2b71b8f41
| 19,825
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/batch/v20181201/pool.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/batch/v20181201/pool.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/batch/v20181201/pool.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Pool']
class Pool(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
application_licenses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
application_packages: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationPackageReferenceArgs']]]]] = None,
certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CertificateReferenceArgs']]]]] = None,
deployment_configuration: Optional[pulumi.Input[pulumi.InputType['DeploymentConfigurationArgs']]] = None,
display_name: Optional[pulumi.Input[str]] = None,
inter_node_communication: Optional[pulumi.Input[str]] = None,
max_tasks_per_node: Optional[pulumi.Input[int]] = None,
metadata: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MetadataItemArgs']]]]] = None,
network_configuration: Optional[pulumi.Input[pulumi.InputType['NetworkConfigurationArgs']]] = None,
pool_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
scale_settings: Optional[pulumi.Input[pulumi.InputType['ScaleSettingsArgs']]] = None,
start_task: Optional[pulumi.Input[pulumi.InputType['StartTaskArgs']]] = None,
task_scheduling_policy: Optional[pulumi.Input[pulumi.InputType['TaskSchedulingPolicyArgs']]] = None,
user_accounts: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UserAccountArgs']]]]] = None,
vm_size: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Contains information about a pool.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the Batch account.
:param pulumi.Input[Sequence[pulumi.Input[str]]] application_licenses: The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, pool creation will fail.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationPackageReferenceArgs']]]] application_packages: Changes to application packages affect all new compute nodes joining the pool, but do not affect compute nodes that are already in the pool until they are rebooted or reimaged.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CertificateReferenceArgs']]]] certificates: For Windows compute nodes, the Batch service installs the certificates to the specified certificate store and location. For Linux compute nodes, the certificates are stored in a directory inside the task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this location. For certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and certificates are placed in that directory.
:param pulumi.Input[pulumi.InputType['DeploymentConfigurationArgs']] deployment_configuration: Using CloudServiceConfiguration specifies that the nodes should be creating using Azure Cloud Services (PaaS), while VirtualMachineConfiguration uses Azure Virtual Machines (IaaS).
:param pulumi.Input[str] display_name: The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024.
:param pulumi.Input[str] inter_node_communication: This imposes restrictions on which nodes can be assigned to the pool. Enabling this value can reduce the chance of the requested number of nodes to be allocated in the pool. If not specified, this value defaults to 'Disabled'.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MetadataItemArgs']]]] metadata: The Batch service does not assign any meaning to metadata; it is solely for the use of user code.
:param pulumi.Input[pulumi.InputType['NetworkConfigurationArgs']] network_configuration: The network configuration for a pool.
:param pulumi.Input[str] pool_name: The pool name. This must be unique within the account.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the Batch account.
:param pulumi.Input[pulumi.InputType['ScaleSettingsArgs']] scale_settings: Defines the desired size of the pool. This can either be 'fixedScale' where the requested targetDedicatedNodes is specified, or 'autoScale' which defines a formula which is periodically reevaluated. If this property is not specified, the pool will have a fixed scale with 0 targetDedicatedNodes.
:param pulumi.Input[pulumi.InputType['StartTaskArgs']] start_task: In an PATCH (update) operation, this property can be set to an empty object to remove the start task from the pool.
:param pulumi.Input[str] vm_size: For information about available sizes of virtual machines for Cloud Services pools (pools created with cloudServiceConfiguration), see Sizes for Cloud Services (https://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). Batch supports all Cloud Services VM sizes except ExtraSmall. For information about available VM sizes for pools using images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series).
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if account_name is None:
raise TypeError("Missing required property 'account_name'")
__props__['account_name'] = account_name
__props__['application_licenses'] = application_licenses
__props__['application_packages'] = application_packages
__props__['certificates'] = certificates
__props__['deployment_configuration'] = deployment_configuration
__props__['display_name'] = display_name
__props__['inter_node_communication'] = inter_node_communication
__props__['max_tasks_per_node'] = max_tasks_per_node
__props__['metadata'] = metadata
__props__['network_configuration'] = network_configuration
if pool_name is None:
raise TypeError("Missing required property 'pool_name'")
__props__['pool_name'] = pool_name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['scale_settings'] = scale_settings
__props__['start_task'] = start_task
__props__['task_scheduling_policy'] = task_scheduling_policy
__props__['user_accounts'] = user_accounts
__props__['vm_size'] = vm_size
__props__['allocation_state'] = None
__props__['allocation_state_transition_time'] = None
__props__['auto_scale_run'] = None
__props__['creation_time'] = None
__props__['current_dedicated_nodes'] = None
__props__['current_low_priority_nodes'] = None
__props__['etag'] = None
__props__['last_modified'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['provisioning_state_transition_time'] = None
__props__['resize_operation_status'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:batch/latest:Pool"), pulumi.Alias(type_="azure-nextgen:batch/v20170901:Pool"), pulumi.Alias(type_="azure-nextgen:batch/v20190401:Pool"), pulumi.Alias(type_="azure-nextgen:batch/v20190801:Pool"), pulumi.Alias(type_="azure-nextgen:batch/v20200301:Pool"), pulumi.Alias(type_="azure-nextgen:batch/v20200501:Pool"), pulumi.Alias(type_="azure-nextgen:batch/v20200901:Pool")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Pool, __self__).__init__(
'azure-nextgen:batch/v20181201:Pool',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Pool':
"""
Get an existing Pool resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Pool(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allocationState")
def allocation_state(self) -> pulumi.Output[str]:
return pulumi.get(self, "allocation_state")
@property
@pulumi.getter(name="allocationStateTransitionTime")
def allocation_state_transition_time(self) -> pulumi.Output[str]:
return pulumi.get(self, "allocation_state_transition_time")
@property
@pulumi.getter(name="applicationLicenses")
def application_licenses(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, pool creation will fail.
"""
return pulumi.get(self, "application_licenses")
@property
@pulumi.getter(name="applicationPackages")
def application_packages(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationPackageReferenceResponse']]]:
"""
Changes to application packages affect all new compute nodes joining the pool, but do not affect compute nodes that are already in the pool until they are rebooted or reimaged.
"""
return pulumi.get(self, "application_packages")
@property
@pulumi.getter(name="autoScaleRun")
def auto_scale_run(self) -> pulumi.Output['outputs.AutoScaleRunResponse']:
"""
This property is set only if the pool automatically scales, i.e. autoScaleSettings are used.
"""
return pulumi.get(self, "auto_scale_run")
@property
@pulumi.getter
def certificates(self) -> pulumi.Output[Optional[Sequence['outputs.CertificateReferenceResponse']]]:
"""
For Windows compute nodes, the Batch service installs the certificates to the specified certificate store and location. For Linux compute nodes, the certificates are stored in a directory inside the task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this location. For certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and certificates are placed in that directory.
"""
return pulumi.get(self, "certificates")
@property
@pulumi.getter(name="creationTime")
def creation_time(self) -> pulumi.Output[str]:
return pulumi.get(self, "creation_time")
@property
@pulumi.getter(name="currentDedicatedNodes")
def current_dedicated_nodes(self) -> pulumi.Output[int]:
return pulumi.get(self, "current_dedicated_nodes")
@property
@pulumi.getter(name="currentLowPriorityNodes")
def current_low_priority_nodes(self) -> pulumi.Output[int]:
return pulumi.get(self, "current_low_priority_nodes")
@property
@pulumi.getter(name="deploymentConfiguration")
def deployment_configuration(self) -> pulumi.Output[Optional['outputs.DeploymentConfigurationResponse']]:
"""
Using CloudServiceConfiguration specifies that the nodes should be creating using Azure Cloud Services (PaaS), while VirtualMachineConfiguration uses Azure Virtual Machines (IaaS).
"""
return pulumi.get(self, "deployment_configuration")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[Optional[str]]:
"""
The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
The ETag of the resource, used for concurrency statements.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="interNodeCommunication")
def inter_node_communication(self) -> pulumi.Output[Optional[str]]:
"""
This imposes restrictions on which nodes can be assigned to the pool. Enabling this value can reduce the chance of the requested number of nodes to be allocated in the pool. If not specified, this value defaults to 'Disabled'.
"""
return pulumi.get(self, "inter_node_communication")
@property
@pulumi.getter(name="lastModified")
def last_modified(self) -> pulumi.Output[str]:
"""
This is the last time at which the pool level data, such as the targetDedicatedNodes or autoScaleSettings, changed. It does not factor in node-level changes such as a compute node changing state.
"""
return pulumi.get(self, "last_modified")
@property
@pulumi.getter(name="maxTasksPerNode")
def max_tasks_per_node(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "max_tasks_per_node")
@property
@pulumi.getter
def metadata(self) -> pulumi.Output[Optional[Sequence['outputs.MetadataItemResponse']]]:
"""
The Batch service does not assign any meaning to metadata; it is solely for the use of user code.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkConfiguration")
def network_configuration(self) -> pulumi.Output[Optional['outputs.NetworkConfigurationResponse']]:
"""
The network configuration for a pool.
"""
return pulumi.get(self, "network_configuration")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="provisioningStateTransitionTime")
def provisioning_state_transition_time(self) -> pulumi.Output[str]:
return pulumi.get(self, "provisioning_state_transition_time")
@property
@pulumi.getter(name="resizeOperationStatus")
def resize_operation_status(self) -> pulumi.Output['outputs.ResizeOperationStatusResponse']:
"""
Describes either the current operation (if the pool AllocationState is Resizing) or the previously completed operation (if the AllocationState is Steady).
"""
return pulumi.get(self, "resize_operation_status")
@property
@pulumi.getter(name="scaleSettings")
def scale_settings(self) -> pulumi.Output[Optional['outputs.ScaleSettingsResponse']]:
"""
Defines the desired size of the pool. This can either be 'fixedScale' where the requested targetDedicatedNodes is specified, or 'autoScale' which defines a formula which is periodically reevaluated. If this property is not specified, the pool will have a fixed scale with 0 targetDedicatedNodes.
"""
return pulumi.get(self, "scale_settings")
@property
@pulumi.getter(name="startTask")
def start_task(self) -> pulumi.Output[Optional['outputs.StartTaskResponse']]:
"""
In an PATCH (update) operation, this property can be set to an empty object to remove the start task from the pool.
"""
return pulumi.get(self, "start_task")
@property
@pulumi.getter(name="taskSchedulingPolicy")
def task_scheduling_policy(self) -> pulumi.Output[Optional['outputs.TaskSchedulingPolicyResponse']]:
return pulumi.get(self, "task_scheduling_policy")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userAccounts")
def user_accounts(self) -> pulumi.Output[Optional[Sequence['outputs.UserAccountResponse']]]:
return pulumi.get(self, "user_accounts")
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Output[Optional[str]]:
"""
For information about available sizes of virtual machines for Cloud Services pools (pools created with cloudServiceConfiguration), see Sizes for Cloud Services (https://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). Batch supports all Cloud Services VM sizes except ExtraSmall. For information about available VM sizes for pools using images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series).
"""
return pulumi.get(self, "vm_size")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 59.89426
| 882
| 0.70628
|
328b13e7ae9e98077002fa13aa2707b1720e91fb
| 1,281
|
py
|
Python
|
cvs_trans.py
|
SL-RU/Under
|
31291588e92351c068919e3487ec814f1a1f40e2
|
[
"MIT"
] | null | null | null |
cvs_trans.py
|
SL-RU/Under
|
31291588e92351c068919e3487ec814f1a1f40e2
|
[
"MIT"
] | null | null | null |
cvs_trans.py
|
SL-RU/Under
|
31291588e92351c068919e3487ec814f1a1f40e2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import os, sys
lns = list()
i = 0
with open("ImportText.csv", encoding='utf-16') as f:
for l in f:
q = ()
i+=1
if "\t" in l:
q = l.split("\"\t\"")
else:
q = l.split("\" \"")
if len(q) == 1:
print(str(i) + ": " + l)
else:
q[0] = q[0][1:]
q[1] = q[1][:len(q[1]) - 2]
lns.append((q[0], q[1]))
if i < 30:
print(str(i) + ": " + q[0] + "><" + q[1] + "|||")
print("csv ok")
las = 0
j = 0
i = 0
strg = list()
with open("STRG.txt", encoding='utf-8') as f:
for l in f:
l = l.replace("\n", "")
#l = l[:len(l) - 2]
strg.append(l)
i += 1
if i < 30:
print(l)
#print(l)
i = 0
truse = 0
with open("translate.txt", 'w', encoding='utf-16') as f:
for l in strg:
ok = 0
for g in lns:
if l == g[0]:
f.write(g[1] + "\n")
truse += 1
ok = 1
break
if ok == 0:
f.write(l + "\n")
i += 1
if i%500 == 0:
print(str(i) + "/" + str(len(strg)))
print("DONE")
print("Used trans: " + str(truse) + " from " + str(len(lns)))
| 19.119403
| 65
| 0.360656
|
d3829f6a2bb888220705b7c273f8e1fea774dfa9
| 17,484
|
py
|
Python
|
musket_text/preprocessors.py
|
petrochenko-pavel-a/musket_text
|
9571b9d554ed66496c911222d319e42242351eb6
|
[
"MIT"
] | null | null | null |
musket_text/preprocessors.py
|
petrochenko-pavel-a/musket_text
|
9571b9d554ed66496c911222d319e42242351eb6
|
[
"MIT"
] | null | null | null |
musket_text/preprocessors.py
|
petrochenko-pavel-a/musket_text
|
9571b9d554ed66496c911222d319e42242351eb6
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
from musket_core import utils,preprocessing,context,model,datasets
from musket_core.context import get_current_project_data_path
from nltk.tokenize import casual_tokenize
from musket_core.datasets import DataSet, PredictionItem
from musket_core import configloader
from musket_core import caches,metrics
from collections import Counter
import tqdm
import keras
from keras import backend as K
from future.types import no
from musket_core.caches import cache_name
from builtins import int
from musket_core.preprocessing import PreproccedPredictionItem
_loaded={}
def get_coefs(word,*arr):
return word, np.asarray(arr, dtype='float32')
def embeddings(EMBEDDING_FILE:str):
path=context.get_current_project_data_path()
emb=os.path.join(path,EMBEDDING_FILE)
if EMBEDDING_FILE in _loaded:
return _loaded[EMBEDDING_FILE]
cache=path
utils.ensure(cache)
if os.path.exists(cache+EMBEDDING_FILE+".embcache"):
result=utils.load(cache+EMBEDDING_FILE+".embcache")
_loaded[EMBEDDING_FILE]=result
return result
if not EMBEDDING_FILE.endswith(".bin"):
result= dict(get_coefs(*o.strip().split(" ")) for o in open(emb,encoding="utf8",errors="ignore") if len(o)>100)
else:
import gensim
vectors=gensim.models.KeyedVectors.load_word2vec_format(emb, binary=True)
result={}
result.dict = vectors.vocab
result.vectors = vectors.vectors
_loaded[EMBEDDING_FILE]=result
utils.save(cache+EMBEDDING_FILE+".embcache", result)
return result
@preprocessing.dataset_transformer
def pad_sequence_labeling(inp:datasets.DataSet,maxLen=-1)->datasets.DataSet:
lm=inp
if isinstance(inp, datasets.CompositeDataSet):
lm=inp.components[0]
no_token_class=lm.num2Class[lm.clazzColumn][2]
if "O" in lm.num2Class[lm.clazzColumn][0]:
no_token_class=lm.num2Class[lm.clazzColumn][0]['O']
def pad_sequence_label(x):
tokenText=list(x.x)
tokenClazz=list(x.y)
while len(tokenClazz)<maxLen:
tokenClazz.append(no_token_class)
tokenText.append("eos")
if len(tokenClazz)>maxLen:
tokenClazz=tokenClazz[0:maxLen]
tokenText=tokenText[0:maxLen]
return preprocessing.PreproccedPredictionItem(x.id,np.array(tokenText),np.array(tokenClazz),x)
rs= preprocessing.PreprocessedDataSet(inp,pad_sequence_label,True)
return rs
@preprocessing.dataset_preprocessor
def tokens_to_case(inp):
rr=[]
for w in inp:
rr.append([w.lower()==w,w.upper()==w,w.isdigit(),w.isalpha()])
return np.array(rr)
@preprocessing.dataset_preprocessor
def lowercase(inp:str):
if isinstance(inp,str):
return inp.lower()
return np.array([x.lower() for x in inp])
class CropFirst1(keras.layers.Layer):
def build(self, input_shape):
keras.layers.Layer.build(self, input_shape)
def call(self, inp):
return inp[:,0,:]
def compute_output_shape(self, input_shape):
return (input_shape[1],input_shape[2])
@model.block
class bert():
def __init__(self, v):
if K.backend() != 'tensorflow':
raise RuntimeError('BERT is only available '
'with the TensorFlow backend.')
self.g_bert = None
def __call__(self,inp:list):
if self.g_bert is None:
from musket_text.bert.load import load_google_bert
cfg=inp[0].contribution
path=cfg.path
max_len=cfg.len
self.g_bert, cfg = load_google_bert(get_current_project_data_path()+path + '/', max_len=max_len, use_attn_mask=False)
self.outputs = self.g_bert.outputs
result = self.g_bert([inp[0],inp[1],inp[2]])
return result[0]
def bertDeployHandler(p1,cfg,p2):
try:
contrib=p1.contribution
if contrib.path[0]=='/':
contrib.path=contrib.path[1:]
nm=os.path.join(p2,"assets",contrib.path)
import shutil
shutil.copytree(get_current_project_data_path()+'/'+contrib.path + '/', nm)
except:
import traceback
traceback.print_exc()
@preprocessing.dataset_transformer
def text_to_bert_input(inp,path,max_len):
from musket_text.bert.bert_encoder import create_tokenizer
from musket_text.bert.input_constructor import prepare_input
bertTokenizer = create_tokenizer(get_current_project_data_path()+path)
@preprocessing.deployHandler(bertDeployHandler)
def transform2index(x):
bInput = prepare_input(x, max_len, bertTokenizer, False)
if bInput.attn_mask is not None:
return [x[0] for x in [bInput.input_ids, bInput.input_type_ids, bInput.token_pos, bInput.attn_mask]]
else:
return [x[0] for x in [bInput.input_ids, bInput.input_type_ids, bInput.token_pos]]
rs= preprocessing.PreprocessedDataSet(inp,transform2index,False)
rs.path=path
rs.contribution=BertConfig(path,max_len)
return rs
@model.block
def takeFirstToken(inp):
return CropFirst1()(inp)
class BertConfig:
def __init__(self,path,ln):
self.path=path
self.len=ln
@preprocessing.dataset_preprocessor
def tokenize(inp):
try:
return casual_tokenize(inp)
except:
print('Error tokenizing: ' + str(inp))
return []
@preprocessing.dataset_preprocessor
def tokenize_xy(inp:PredictionItem):
try:
new_x = casual_tokenize(inp.x)
new_y = casual_tokenize(inp.y)
return PreproccedPredictionItem(inp.id, new_x, new_y, inp)
except:
print('Error tokenizing prediction item: x = ' + str(inp.x) + ' y = ' + str(inp.y))
return []
class Vocabulary:
def __init__(self,words):
self.dict={}
self.i2w={}
num=0
for c in words:
self.dict[c]=num
self.i2w[num]=c
num=num+1
self.unknown=len(self.dict)
def buildVocabulary(inp:DataSet,max_words=None, use_y=False):
counter=Counter()
if max_words==-1:
max_words=None
for i in tqdm.tqdm(range(len(inp)),desc="Building vocabulary for: " + str(inp) + ", " + ("Y" if use_y else "X") + "axis"):
p=inp[i]
if use_y:
for c in p.y:
counter[c]+=1
else:
for c in p.x:
counter[c]+=1
word2Index={}
indexToWord={}
num=1
words=counter.most_common(max_words)
return Vocabulary([str(x[0]).strip() for x in words])
_vocabs={}
def vocabularyDeployHandler(p1,cfg,p2):
try:
nm="data"+ ("." + str(p1.max_words) if p1.max_words > 0 else "")+".vocab"
nm=os.path.join(p2,"assets",nm)
utils.save(nm,p1.vocabulary)
except:
import traceback
traceback.print_exc()
def get_vocabulary_name(cache_name:str, max_words:int, use_y):
return cache_name + ("_y" if use_y else "_x")+("." +str(max_words) if max_words > 0 else "") +".vocab"
@preprocessing.dataset_transformer
def y_tokens_to_indexes(inp:DataSet,max_words=-1,maxLen=-1,file_name = None)->DataSet:
return tokens_to_indexes(inp, max_words, maxLen, file_name, True)
@preprocessing.dataset_transformer
def tokens_to_indexes(inp:DataSet,max_words=-1,maxLen=-1, file_name = None, use_y=False)->DataSet:
voc=caches.get_cache_dir()
if file_name is not None:
name = file_name
else:
name=get_vocabulary_name(caches.cache_name(inp),max_words, use_y)
# WE SHOULD USE TRAIN VOCABULARY IN ALL CASES
if file_name is not None:
file_path=os.path.join(context.get_current_project_data_path(), file_name)
else:
file_path=os.path.join(context.get_current_project_path(),"assets",get_vocabulary_name(caches.cache_name(inp),max_words, use_y))
vocabulary=None
if os.path.exists(file_path):
if name in _vocabs:
vocabulary= _vocabs[name]
else:
vocabulary=utils.load(file_path)
_vocabs[name]=vocabulary
if vocabulary is None:
try:
trainName=str(inp.root().cfg.dataset)
curName=inp.root().get_name()
if trainName!=curName:
name=utils.load(inp.root().cfg.path+".contribution")
if isinstance(name , dict):
name=name["x" if not use_y else "y"]
elif isinstance(name , list):
name=name[0 if not use_y else 1]
except:
pass
file_path = os.path.join(voc,name)
if os.path.exists(file_path):
if name in _vocabs:
vocabulary= _vocabs[name]
else:
vocabulary=utils.load(file_path)
print("Loaded vocabulary " + name + " size: " + str(len(vocabulary.dict)))
_vocabs[name]=vocabulary
else:
vocabulary=buildVocabulary(inp,max_words, use_y)
utils.save(file_path,vocabulary)
print("Build and saved vocabulary " + name + " size: " + str(len(vocabulary.dict)))
_vocabs[name]=vocabulary
@preprocessing.deployHandler(vocabularyDeployHandler)
def transform2index(item:PredictionItem):
ml=maxLen
if ml==-1:
ml=len(x)
res=np.zeros((ml,),dtype=np.int32)
num=0
data = item.y if use_y else item.x;
for v in data:
if v in vocabulary.dict:
res[num]=(vocabulary.dict[v])
else:
res[num]=(vocabulary.unknown)
num=num+1
if num==ml:
break
res_item = PreproccedPredictionItem(item.id, item.x, res, item) if use_y else PreproccedPredictionItem(item.id, res, item.y, item)
return res_item
rs= preprocessing.PreprocessedDataSet(inp,transform2index,True)
rs.vocabulary=vocabulary
rs.maxWords=max_words
rs.maxLen=maxLen
rs.use_y = use_y
if not hasattr(rs, "contribution"):
rs.contribution = {}
rs.contribution["x" if not use_y else "y"]=name
return rs
def get_vocab(nm)->Vocabulary:
if nm in _vocabs:
return _vocabs[nm]
vocabulary=utils.load(nm)
_vocabs[nm]=vocabulary
return vocabulary
@preprocessing.dataset_transformer
def vectorize_indexes(inp,path,maxLen=-1):
embs=embeddings(path)
orig=inp
while not hasattr(orig, "vocabulary"):
orig=orig.parent
voc=orig.vocabulary
unknown=np.random.randn(300)
def index2Vector(inp):
ml=maxLen
if ml==-1:
ml=len(inp)
ln=min(ml,len(inp))
result=np.zeros((ml,300),dtype=np.float32)
for i in range(ln):
ind=inp[i]
if ind==0:
break
if ind in voc.i2w:
w=voc.i2w[ind]
if w in embs:
result[i]=embs[w]
continue
result[i]=unknown
return result
rs= preprocessing.PreprocessedDataSet(inp,index2Vector,False)
return rs
@preprocessing.dataset_preprocessor
class vectorize:
def __init__(self,path,maxLen=-1):
self.embeddings=embeddings(path)
self.maxLen=maxLen
pass
def __call__(self,inp):
ml=self.maxLen
if ml==-1:
ml=len(inp)
ln=min(ml,len(inp))
result=np.zeros((ml,300),dtype=np.float32)
for i in range(ln):
w=inp[i]
if w in self.embeddings:
result[i]=self.embeddings[w]
else:
w=w.lower()
if w in self.embeddings:
result[i]=self.embeddings[w]
return result
@preprocessing.dataset_preprocessor
class string_to_chars:
def __init__(self,maxLen,encoding="utf8",errors='strict'):
self.maxLen=maxLen
self.encoding=encoding
self.errors=errors
def __call__(self,inp:str):
vl=np.frombuffer(inp.encode(self.encoding, errors=self.errors),dtype=np.uint8)
if vl.shape[0]<self.maxLen:
r= np.pad(vl, (0,self.maxLen-vl.shape[0]),mode="constant")
return r
return vl[:self.maxLen]
@preprocessing.dataset_preprocessor
def remove_random_words(inp,probability):
rr=np.random.rand(len(inp))
result=[]
count=0
for i in range(len(inp)):
if rr[i]<probability:
count=count+1
continue
result.append(inp[i])
result=result+[0]*count
return np.array(result)
@preprocessing.dataset_preprocessor
def swap_random_words(inp,probability):
rr=np.random.rand(len(inp))
result=[]
continueNext=False
for i in range(len(inp)-1):
if continueNext:
continueNext=False
continue
if rr[i]<probability:
result.append(inp[i+1])
result.append(inp[i])
continueNext=True
continue
result.append(inp[i])
while len(result)<len(inp):
result.append(0)
if len(result)!=len(inp):
raise ValueError()
return np.array(result)
@preprocessing.dataset_preprocessor
def add_random_words(inp,probability):
rr=np.random.rand(len(inp))
result=[]
for i in range(len(inp)):
if rr[i]<probability:
result.append(np.random.randint(1,2000))
result.append(inp[i])
if len(result)>len(inp):
result=result[:len(inp)]
return np.array(result)
@model.block
def word_indexes_embedding(inp,path):
embedding_matrix = None
try:
if context.isTrainMode():
embs=embeddings(path)
vocab_name = inp.contribution
if isinstance(vocab_name, dict):
vocab_name = vocab_name["x"]
elif isinstance(vocab_name, list):
vocab_name = vocab_name[0]
v=get_vocab(vocab_name);
for word, i in tqdm.tqdm(v.dict.items()):
if embedding_matrix is None:
embedding_matrix=np.random.randn(len(v.dict)+1, len(embs[word]))
context.addTrainSetting((len(v.dict)+1, len(embs[word])))
if word in embs:
embedding_matrix[i]=embs[word]
return keras.layers.Embedding(len(v.dict)+1,embedding_matrix.shape[1],weights=[embedding_matrix],trainable=False)(inp)
else:
s,z = context.popTrainSetting()
return keras.layers.Embedding(s,z,trainable=False)(inp)
except:
import traceback
traceback.print_exc()
return None
from seqeval import metrics as sem
class connll2003_entity_level_f1(metrics.ByOneMetric):
def __init__(self):
self.gt=[]
self.pr=[]
self.name="connll2003_entity_level_f1"
pass
def onItem(self,outputs,labels):
vl=self.dataset
if isinstance(vl, datasets.CompositeDataSet):
vl=vl.components[0]
labels=vl.decode(labels)
gt=vl.decode(outputs,len(labels));
self.pr=self.pr+labels
self.gt=self.gt+gt
pass
def eval(self,predictions):
self.dataset=predictions.root()
return super().eval(predictions)
def commit(self,dict):
dict[self.name]=sem.f1_score(self.gt,self.pr)
return dict
class connll2003_entity_level_precision(connll2003_entity_level_f1):
def __init__(self):
self.gt=[]
self.pr=[]
self.name="connll2003_entity_level_precision"
pass
def eval(self,predictions):
self.dataset=predictions.root()
return super().eval(predictions)
def commit(self,dict):
dict[self.name]=sem.precision_score(self.gt,self.pr)
return dict
class connll2003_entity_level_recall(connll2003_entity_level_f1):
def __init__(self):
self.gt=[]
self.pr=[]
self.name="connll2003_entity_level_recall"
pass
def eval(self,predictions):
self.dataset=predictions.root()
return super().eval(predictions)
def commit(self,dict):
dict[self.name]=sem.recall_score(self.gt,self.pr)
return dict
configloader.load("layers").catalog[connll2003_entity_level_f1().name]=connll2003_entity_level_f1()
configloader.load("layers").catalog[connll2003_entity_level_precision().name]=connll2003_entity_level_precision()
configloader.load("layers").catalog[connll2003_entity_level_recall().name]=connll2003_entity_level_recall()
| 34.015564
| 147
| 0.594544
|
3b0071c71ea85640e2697176ba236b9468ffe6ed
| 4,428
|
py
|
Python
|
server/board.py
|
dadrian/blinken-board
|
994131a9dcda20b9a6af47d0349d55ce8d023ddb
|
[
"Apache-2.0"
] | null | null | null |
server/board.py
|
dadrian/blinken-board
|
994131a9dcda20b9a6af47d0349d55ce8d023ddb
|
[
"Apache-2.0"
] | null | null | null |
server/board.py
|
dadrian/blinken-board
|
994131a9dcda20b9a6af47d0349d55ce8d023ddb
|
[
"Apache-2.0"
] | null | null | null |
try:
import pygame
except:
pass
import psu
import socket
import struct
import logger
import time
try:
from websocket import create_connection
except:
pass
import base64
class Board(object):
def __init__(self, size=(600,490), host=('127.0.0.1', 1337), width=57, height=44, use_pygame=True, reconnect_interval=30, create_ws=False):
self.screen = None
if use_pygame:
self.screen = pygame.display.set_mode(size)
self.screen.fill((0, 0, 0))
self.lights = []
for x in range(width):
self.lights.append([])
for y in range(height):
self.lights[x].append((0, 0, 0))
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
self.tcp_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.tcp_sock.settimeout(5)
self.last_buf = b''
self.host = host
self.last_reconnect = time.time()
self.reconnect_interval = reconnect_interval
try:
self.tcp_sock.connect(host)
logger.debug('Connected to %s' % (str(host)))
except socket.error:
logger.warn('Could not connect to %s' % (str(host)))
self.tcp_sock = None
pass
try:
if create_ws:
self.ws = create_connection('ws://localhost:8765/raw_board')
logger.trace('created websocket for board')
except:
logger.warn('failed to create websocket')
pass
def reconnect_tcp(self):
now = time.time()
if (now - self.last_reconnect) < self.reconnect_interval:
return
self.last_reconnect = now
logger.debug('reconnecting to %s' % (str(self.host)))
self.tcp_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.tcp_sock.settimeout(5)
try:
self.tcp_sock.connect(self.host)
logger.info('Reconnected to lights %s' % (str(self.host)))
except socket.error:
self.tcp_sock = None
logger.debug('Failed to reconnect to %s' % (str(self.host)))
pass
def set_light(self, x, y, color):
self.lights[x][y] = color
if self.screen is not None:
pygame.draw.circle(self.screen, color, (x*10+20, y*10+20), 4)
def display(self):
if self.screen is not None:
pygame.display.flip()
def get_last_buf(self):
return self.last_buf
def serialize_board(self):
buf = b''
for x in range(len(self.lights)):
for y in range(len(self.lights[x])):
r, g, b = self.lights[x][y]
buf += struct.pack('>BBB', r, g, b)
return buf
def send_board(self):
buf = self.serialize_board()
self.send_buf_tcp(buf)
self.display()
def send_board_ws(self):
buf = self.serialize_board()
if len(buf) == 0:
return
#self.ws.send(buf)
self.ws.send(base64.b64encode(buf))
def send_buf_tcp(self, buf):
self.last_buf = buf
try:
if self.tcp_sock is not None:
self.tcp_sock.send(buf)
else:
self.reconnect_tcp()
except Exception as e:
logger.warn("Lights TCP connection died: %s" % (str(e)))
self.tcp_sock = None
self.reconnect_tcp()
# try reconnect?
pass
def send_board_udp(self):
for x in range(len(self.lights)):
# x = (psu_id*7 + 8) - (strand_id)
psu_id = (x-1) / 7
strand_id = (psu_id*7 + 8) - x
if x == 0:
psu_id = 0
strand_id = 8
data = '0401dc4a0100080100'.decode('hex') + '00000000'.decode('hex')
data += struct.pack('>I', strand_id)
buf = ''
for y in range(len(self.lights[x])):
r, g, b = self.lights[x][y]
buf += struct.pack('>BBB', r, g, b)
data += struct.pack('>I', len(buf))
data += psu.extra_buf[psu_id] # weird extra 3 bytes, not sure what it's for yet, seems psu-specific but not an ID?
data += buf
# construct UDP packet
#udp = dpkt.udp.UDP(sport=44280, dport=6038, data=data)
self.socket.sendto(data, (psu.dest_ips[psu_id], 6038))
| 30.328767
| 143
| 0.544941
|
bfb91d86b3567156505d6eda13201950baecbdd1
| 526
|
py
|
Python
|
src/hri/src/stt.py
|
APMMonteiro/european_robotic_league
|
1a7345bdbdf4a57c434c6fda44b0714c277877a7
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
src/hri/src/stt.py
|
APMMonteiro/european_robotic_league
|
1a7345bdbdf4a57c434c6fda44b0714c277877a7
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
src/hri/src/stt.py
|
APMMonteiro/european_robotic_league
|
1a7345bdbdf4a57c434c6fda44b0714c277877a7
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
import sounddevice as sd
import speech_recognition as sr
# initialize the recognizer
r = sr.Recognizer()
print("Make sure you select the right device (0,1,2...)")
print(sr.Microphone.list_microphone_names())
with sr.Microphone(device_index=2,sample_rate = 48000) as source:
# read the audio data from the default microphone
print("Listening to audio..")
r.adjust_for_ambient_noise(source)
audio = r.listen(source,timeout=2)
text =r.recognize_google(audio,language='en-GB',show_all=True)
print(text)
| 30.941176
| 66
| 0.745247
|
3bf9e252915de05c15f517fec1ec69d62c6b11fd
| 7,381
|
py
|
Python
|
qiskit/chemistry/drivers/gaussiand/__init__.py
|
stefan-woerner/aqua
|
12e1b867e254977d9c5992612a7919d8fe016cb4
|
[
"Apache-2.0"
] | 15
|
2020-06-29T08:33:39.000Z
|
2022-02-12T00:28:51.000Z
|
qiskit/chemistry/drivers/gaussiand/__init__.py
|
stefan-woerner/aqua
|
12e1b867e254977d9c5992612a7919d8fe016cb4
|
[
"Apache-2.0"
] | 4
|
2020-11-27T09:34:13.000Z
|
2021-04-30T21:13:41.000Z
|
qiskit/chemistry/drivers/gaussiand/__init__.py
|
stefan-woerner/aqua
|
12e1b867e254977d9c5992612a7919d8fe016cb4
|
[
"Apache-2.0"
] | 11
|
2020-06-29T08:40:24.000Z
|
2022-02-24T17:39:16.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Gaussian™ 16 Installation
=========================
`Gaussian™ 16 <http://gaussian.com/gaussian16/>`__ is a commercial program for
computational chemistry. This chemistry driver accesses electronic structure information
from Gaussian™ 16 via the Gaussian-supplied open-source
`interfacing code <http://www.gaussian.com/interfacing/>`__.
You should follow the installation instructions that come with your Gaussian™ 16 product.
Installation instructions can also be found online in
`Gaussian product installation support <http://gaussian.com/techsupport/#install]>`__.
Following the installation make sure the Gaussian™ 16 executable, `g16`, can be run from the
command line environment where you will be running Python and Qiskit. For example
verifying that the `g16` executable is reachable via the system environment path,
and appropriate exports, such as `GAUSS_EXEDIR`, have been configured as per
`Gaussian product installation support <http://gaussian.com/techsupport/#install]>`__.
Gaussian™ 16 Interfacing Code
-----------------------------
In the :mod:`gauopen` folder the Python part of the above interfacing code,
as needed by Qiskit's chemistry modules, has been made available. It is licensed under a
`Gaussian Open-Source Public License
<https://github.com/Qiskit/qiskit-aqua/blob/master/qiskit/chemistry/drivers/gaussiand/gauopen/LICENSE.txt>`_.
Part of this interfacing code --- specifically, the Fortran file `qcmatrixio.F` --- requires
compilation to a Python native extension. However, Qiskit comes with pre-built binaries
for most common platforms. If there is no pre-built binary matching your platform, then it will be
necessary to compile this file as per the instructions below.
Compiling the Fortran Interfacing Code
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If no prebuilt native extension binary, as supplied with Qiskit, works for your
platform, then to use the Gaussian™ 16 driver on your machine, the Fortran file `qcmatrixio.F`
must be compiled into object code that can be used by Python. This is accomplished using the
`Fortran to Python Interface Generator (F2PY) <https://docs.scipy.org/doc/numpy/f2py/>`__,
which is part of the `NumPy <http://www.numpy.org/>`__ Python library.
Specifically, on your command prompt window, change directory to the
`qiskit/chemistry/drivers/gaussiand/gauopen` directory inside the Qiskit
installation directory, and while in the Python environment created for Aqua and the chemistry
module, invoke `f2py` on `qcmatrixio.F` as explained below.
Apple macOS and Linux
~~~~~~~~~~~~~~~~~~~~~
The full syntax of the `f2py` command on macOS and Linux is as follows:
.. code:: sh
f2py -c -m qcmatrixio qcmatrixio.F
This command will generate a file with name prefix `qcmatrixio` and extension `.so`, for example
`qcmatrixio.cpython-36m-x86_64-linux-gnu.so`.
In order for the command above to work and such file to be generated, you will need a supported
Fortran compiler installed. On macOS, you may have to download the
`GNU Compiler Collection (GCC) <https://gcc.gnu.org/>`__ and, in particular, the
`GFortran Compiler <https://gcc.gnu.org/fortran/>`__ source and compile it first,
if you do not a suitable Fortran compiler already installed.
On Linux you may be able to download and install a supported Fortran compiler via your
distribution's installer.
.. topic:: Special Notes for macOS X
If your account is using the bash shell on a macOS X machine, you can edit the
`.bash_profile` file in your home directory and add the following lines:
.. code:: sh
export GAUSS_SCRDIR=~/.gaussian
export g16root=/Applications
alias enable_gaussian='. $g16root/g16/bsd/g16.profile'
The above assumes that the application Gaussian™ 16 was placed in the `/Applications` folder
and that `~/.gaussian` is the full path to
the selected scratch folder, where Gaussian™ 16 stores its temporary files.
Now, before Qiskit can properly interface Gaussian™ 16, you will have to run the
`enable_gaussian` command defined above. This, however, may generate the following error:
.. code:: sh
bash: ulimit: open files: cannot modify limit: Invalid argument
While this error is not harmful, you might want to suppress it, which can be done by entering
the following sequence of commands on the command line:
.. code:: sh
echo kern.maxfiles=65536 | sudo tee -a /etc/sysctl.conf
echo kern.maxfilesperproc=65536 | sudo tee -a /etc/sysctl.conf
sudo sysctl -w kern.maxfiles=65536
sudo sysctl -w kern.maxfilesperproc=65536
ulimit -n 65536 65536
as well as finally adding the following line to the `.bash_profile` file in your account's
home directory:
.. code:: sh
ulimit -n 65536 65536
At the end of this configuration, the `.bash_profile` in your account's home directory
should have a section in it like in the following script snippet:
.. code:: sh
# Gaussian 16
export GAUSS_SCRDIR=~/.gaussian
export g16root=/Applications
alias enable_gaussian='. $g16root/g16/bsd/g16.profile'
ulimit -n 65536 65536
Microsoft Windows
~~~~~~~~~~~~~~~~~
The following steps can be used with the Intel Fortran compiler on the Microsoft Windows platform:
1. Set up the environment by running the Intel Fortran compiler batch program `ifortvars.bat`
as follows:
.. code:: sh
ifortvars -arch intel64
2. Then, in this environment, issue the following command from within the `gauopen` directory:
.. code:: sh
f2py -c --fcompiler=intelvem -m qcmatrixio qcmatrixio.F
Upon successful execution, the `f2py` command above will generate a file with name prefix
`qcmatrixio` and extension `.so`, for example `qcmatrixio.cp36-win_amd64.pyd`. However,
in order for the `f2py` command above to work, `#ifdef` may need to be manually edited if it
is not recognized or supported during the processing of the `f2py` command above. For
example, with `f2py` from Intel Visual Fortran Compiler with Microsoft Visual Studio, the
following code snippet originally shows two occurrences of the line
`Parameter (Len12D=8,Len4D=8)`, as shown next:
.. code::
`#ifdef` USE_I8
Parameter (Len12D=8,Len4D=8)
`#else`
Parameter (Len12D=4,Len4D=4)
`#endif`
This may need to be simplified by deleting the first three lines and the last line,
leaving just the fourth line, as follows:
.. code::
Parameter (Len12D=4,Len4D=4)
"""
from .gaussiandriver import GaussianDriver
from .gaussian_forces_driver import GaussianForcesDriver
from .gaussian_log_driver import GaussianLogDriver
from .gaussian_log_result import GaussianLogResult
__all__ = ['GaussianDriver',
'GaussianForcesDriver',
'GaussianLogDriver',
'GaussianLogResult']
| 40.333333
| 109
| 0.732286
|
19f8e2a49e57190bb385e735affe5a52d0388dc8
| 4,833
|
py
|
Python
|
frustum_pointnet/configs/dan/parallel/__init__.py
|
anurag1paul/pseudo_lidar
|
02faf327efd43c986629d0ea797b058e464c05aa
|
[
"MIT"
] | 1
|
2020-03-19T21:30:57.000Z
|
2020-03-19T21:30:57.000Z
|
frustum_pointnet/configs/dan/parallel/__init__.py
|
anurag1paul/pseudo_lidar
|
02faf327efd43c986629d0ea797b058e464c05aa
|
[
"MIT"
] | null | null | null |
frustum_pointnet/configs/dan/parallel/__init__.py
|
anurag1paul/pseudo_lidar
|
02faf327efd43c986629d0ea797b058e464c05aa
|
[
"MIT"
] | 1
|
2022-01-26T03:36:42.000Z
|
2022-01-26T03:36:42.000Z
|
import numpy as np
import torch
import torch.optim as optim
from datasets.kitti import FrustumKitti
from datasets.vkitti.attributes import vkitti_attributes as vkitti
from datasets.vkitti import FrustumVkitti
from meters.kitti import MeterFrustumKitti
from modules.frustum import FrustumPointDanParallelLoss
from evaluate.kitti.frustum.eval import evaluate
from utils.config import Config, configs
# data configs
configs.data.num_points_per_object = 512
configs.data.num_heading_angle_bins = 12
configs.data.size_template_names = vkitti.class_names
configs.data.num_size_templates = len(configs.data.size_template_names)
configs.data.class_name_to_size_template_id = {
cat: cls for cls, cat in enumerate(configs.data.size_template_names)
}
configs.data.size_template_id_to_class_name = {
v: k for k, v in configs.data.class_name_to_size_template_id.items()
}
configs.data.size_templates = np.zeros((configs.data.num_size_templates, 3))
for i in range(configs.data.num_size_templates):
configs.data.size_templates[i, :] = vkitti.class_name_to_size_template[
configs.data.size_template_id_to_class_name[i]]
configs.data.size_templates = torch.from_numpy(configs.data.size_templates.astype(np.float32))
# dataset configs
configs.source_dataset = Config(FrustumVkitti)
configs.source_dataset.root = 'data/vkitti/frustum/frustum_data'
configs.source_dataset.num_points = 1024
configs.source_dataset.classes = configs.data.classes
configs.source_dataset.num_heading_angle_bins = configs.data.num_heading_angle_bins
configs.source_dataset.class_name_to_size_template_id = configs.data.class_name_to_size_template_id
configs.source_dataset.random_flip = True
configs.source_dataset.random_shift = True
configs.source_dataset.frustum_rotate = True
configs.source_dataset.from_rgb_detection = False
configs.target_dataset = Config(FrustumKitti)
configs.target_dataset.root = 'data/kitti/frustum/frustum_data'
configs.target_dataset.num_points = 1024
configs.target_dataset.classes = configs.data.classes
configs.target_dataset.num_heading_angle_bins = configs.data.num_heading_angle_bins
configs.target_dataset.class_name_to_size_template_id = configs.data.class_name_to_size_template_id
configs.target_dataset.random_flip = True
configs.target_dataset.random_shift = True
configs.target_dataset.frustum_rotate = True
configs.target_dataset.from_rgb_detection = False
# evaluate configs
configs.evaluate.fn = evaluate
configs.evaluate.batch_size = 32
configs.evaluate.dataset = Config(FrustumKitti)
configs.evaluate.dataset.root = 'data/kitti/frustum/frustum_data'
configs.evaluate.dataset.split = "val"
configs.evaluate.dataset.from_rgb_detection = True
configs.evaluate.dataset.frustum_rotate = True
configs.evaluate.dataset.num_points = 1024
configs.evaluate.dataset.classes = configs.data.classes
configs.evaluate.dataset.num_heading_angle_bins = configs.data.num_heading_angle_bins
configs.evaluate.dataset.class_name_to_size_template_id = configs.data.class_name_to_size_template_id
# train configs
configs.train = Config()
configs.train.num_epochs = 100
configs.train.batch_size = 32
# train: meters
configs.train.meters = Config()
for name, metric in [
('acc/iou_3d_{}', 'iou_3d'), ('acc/acc_{}', 'accuracy'),
('acc/iou_3d_acc_{}', 'iou_3d_accuracy'), ('acc/iou_3d_class_acc_{}', 'iou_3d_class_accuracy')
]:
configs.train.meters[name] = Config(
MeterFrustumKitti, metric=metric, num_heading_angle_bins=configs.data.num_heading_angle_bins,
num_size_templates=configs.data.num_size_templates, size_templates=configs.data.size_templates,
class_name_to_class_id={cat: cls for cls, cat in enumerate(configs.data.classes)}
)
# train: metric for save best checkpoint
configs.train.metrics = ('acc/iou_3d_class_acc_val', 'acc/iou_3d_acc_val')
# train: criterion
configs.train.criterion = Config(FrustumPointDanParallelLoss)
configs.train.criterion.num_heading_angle_bins = configs.data.num_heading_angle_bins
configs.train.criterion.num_size_templates = configs.data.num_size_templates
configs.train.criterion.size_templates = configs.data.size_templates
configs.train.criterion.box_loss_weight = 1.0
configs.train.criterion.corners_loss_weight = 10.0
configs.train.criterion.heading_residual_loss_weight = 20.0
configs.train.criterion.size_residual_loss_weight = 20.0
# train: optimizer
configs.train.base_lr = 5e-4
configs.train.optimizer_g = Config(optim.Adam)
configs.train.optimizer_g.weight_decay = 5e-4
configs.train.optimizer_g.lr = configs.train.base_lr
configs.train.optimizer_cls = Config(optim.Adam)
configs.train.optimizer_cls.lr = 2 * configs.train.base_lr
configs.train.optimizer_cls.weight_decay = 5e-4
configs.train.optimizer_dis = Config(optim.Adam)
configs.train.optimizer_dis.lr = configs.train.base_lr
configs.train.optimizer_dis.weight_decay = 5e-4
| 44.33945
| 103
| 0.827023
|
f679cf4f4c74b5acdba643cf7891714e201c35b0
| 5,166
|
py
|
Python
|
credit_integration/migrations/0001_initial.py
|
City-of-Helsinki/mvj
|
6f786047805a968317ecc37b38c2262ada2c3805
|
[
"MIT"
] | 1
|
2021-01-12T08:14:10.000Z
|
2021-01-12T08:14:10.000Z
|
credit_integration/migrations/0001_initial.py
|
City-of-Helsinki/mvj
|
6f786047805a968317ecc37b38c2262ada2c3805
|
[
"MIT"
] | 249
|
2017-04-18T14:00:13.000Z
|
2022-03-30T12:18:03.000Z
|
credit_integration/migrations/0001_initial.py
|
City-of-Helsinki/mvj
|
6f786047805a968317ecc37b38c2262ada2c3805
|
[
"MIT"
] | 7
|
2017-04-18T08:43:54.000Z
|
2021-07-28T07:29:30.000Z
|
import credit_integration.enums
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import enumfields.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
("leasing", "0045_move_plotsearch_to_plotsearch_app"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="CreditDecisionReason",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created_at",
models.DateTimeField(
auto_now_add=True, verbose_name="Time created"
),
),
(
"modified_at",
models.DateTimeField(auto_now=True, verbose_name="Time modified"),
),
(
"reason_code",
models.CharField(
max_length=3, unique=True, verbose_name="Reason code"
),
),
("reason", models.TextField(verbose_name="Reason")),
],
options={"abstract": False,},
),
migrations.CreateModel(
name="CreditDecision",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created_at",
models.DateTimeField(
auto_now_add=True, verbose_name="Time created"
),
),
(
"modified_at",
models.DateTimeField(auto_now=True, verbose_name="Time modified"),
),
(
"status",
enumfields.fields.EnumField(
enum=credit_integration.enums.CreditDecisionStatus,
max_length=30,
verbose_name="Status",
),
),
(
"business_id",
models.CharField(
blank=True, max_length=9, verbose_name="Business ID"
),
),
(
"official_name",
models.CharField(
blank=True, max_length=255, verbose_name="Official name"
),
),
(
"address",
models.CharField(
blank=True, max_length=255, verbose_name="Address"
),
),
(
"phone_number",
models.CharField(
blank=True, max_length=50, verbose_name="Phone number"
),
),
(
"business_entity",
models.CharField(
blank=True, max_length=50, verbose_name="Business entity"
),
),
(
"operation_start_date",
models.DateField(
blank=True, verbose_name="Date of commencement of operations"
),
),
(
"industry_code",
models.CharField(
blank=True, max_length=10, verbose_name="Industry code"
),
),
(
"claimant",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="credit_decisions",
to=settings.AUTH_USER_MODEL,
verbose_name="Claimant",
),
),
(
"customer",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="credit_decisions",
to="leasing.Contact",
verbose_name="Customer",
),
),
(
"reasons",
models.ManyToManyField(
to="credit_integration.CreditDecisionReason",
verbose_name="Reasons",
),
),
],
options={"abstract": False,},
),
]
| 33.764706
| 86
| 0.378242
|
10a93524c4481be2f255813c9db4b958bd763ec5
| 966
|
py
|
Python
|
test/exp/build.py
|
xiyie/yolox
|
3916c492b987b60e44bb64057696ba945e5557f3
|
[
"Apache-2.0"
] | 1
|
2021-09-09T07:50:23.000Z
|
2021-09-09T07:50:23.000Z
|
test/exp/build.py
|
xiyie/yolox
|
3916c492b987b60e44bb64057696ba945e5557f3
|
[
"Apache-2.0"
] | null | null | null |
test/exp/build.py
|
xiyie/yolox
|
3916c492b987b60e44bb64057696ba945e5557f3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import importlib
import os
import sys
def get_exp_by_file(exp_file):
try:
sys.path.append(os.path.dirname(exp_file))
current_exp = importlib.import_module(os.path.basename(exp_file).split(".")[0])
exp = current_exp.Exp()
except Exception:
raise ImportError("{} doesn't contains class named 'Exp'".format(exp_file))
return exp
def get_exp(exp_file, exp_name):
"""
get Exp object by file or name. If exp_file and exp_name
are both provided, get Exp by exp_file.
Args:
exp_file (str): file path of experiment.
exp_name (str): name of experiment. "yolo-s",
"""
assert (
exp_file is not None or exp_name is not None
), "plz provide exp file or exp name."
if exp_file is not None:
return get_exp_by_file(exp_file)
| 26.108108
| 88
| 0.630435
|
942de13c221f3c0fb342cdc2d263496cc5aba1c6
| 2,728
|
py
|
Python
|
evmosproto/cosmos/slashing/v1beta1/tx_pb2_grpc.py
|
hanchon-live/evmosproto
|
141f336cf027a88c5bf227ab49069dd1cf2e4853
|
[
"MIT"
] | null | null | null |
evmosproto/cosmos/slashing/v1beta1/tx_pb2_grpc.py
|
hanchon-live/evmosproto
|
141f336cf027a88c5bf227ab49069dd1cf2e4853
|
[
"MIT"
] | null | null | null |
evmosproto/cosmos/slashing/v1beta1/tx_pb2_grpc.py
|
hanchon-live/evmosproto
|
141f336cf027a88c5bf227ab49069dd1cf2e4853
|
[
"MIT"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from evmosproto.cosmos.slashing.v1beta1 import tx_pb2 as cosmos_dot_slashing_dot_v1beta1_dot_tx__pb2
class MsgStub(object):
"""Msg defines the slashing Msg service.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Unjail = channel.unary_unary(
'/cosmos.slashing.v1beta1.Msg/Unjail',
request_serializer=cosmos_dot_slashing_dot_v1beta1_dot_tx__pb2.MsgUnjail.SerializeToString,
response_deserializer=cosmos_dot_slashing_dot_v1beta1_dot_tx__pb2.MsgUnjailResponse.FromString,
)
class MsgServicer(object):
"""Msg defines the slashing Msg service.
"""
def Unjail(self, request, context):
"""Unjail defines a method for unjailing a jailed validator, thus returning
them into the bonded validator set, so they can begin receiving provisions
and rewards again.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MsgServicer_to_server(servicer, server):
rpc_method_handlers = {
'Unjail': grpc.unary_unary_rpc_method_handler(
servicer.Unjail,
request_deserializer=cosmos_dot_slashing_dot_v1beta1_dot_tx__pb2.MsgUnjail.FromString,
response_serializer=cosmos_dot_slashing_dot_v1beta1_dot_tx__pb2.MsgUnjailResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'cosmos.slashing.v1beta1.Msg', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Msg(object):
"""Msg defines the slashing Msg service.
"""
@staticmethod
def Unjail(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/cosmos.slashing.v1beta1.Msg/Unjail',
cosmos_dot_slashing_dot_v1beta1_dot_tx__pb2.MsgUnjail.SerializeToString,
cosmos_dot_slashing_dot_v1beta1_dot_tx__pb2.MsgUnjailResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 37.369863
| 120
| 0.684751
|
54819829fd43a66c9bc5baf0a34ef0a3175390ef
| 5,244
|
py
|
Python
|
tests/backends/test_utils.py
|
Lord-Elrond/django
|
178109c1734ccc16386c3e3cbae1465c7a1b8ed8
|
[
"BSD-3-Clause",
"0BSD"
] | 61,676
|
2015-01-01T00:05:13.000Z
|
2022-03-31T20:37:54.000Z
|
tests/backends/test_utils.py
|
Lord-Elrond/django
|
178109c1734ccc16386c3e3cbae1465c7a1b8ed8
|
[
"BSD-3-Clause",
"0BSD"
] | 8,884
|
2015-01-01T00:12:05.000Z
|
2022-03-31T19:53:11.000Z
|
tests/backends/test_utils.py
|
Lord-Elrond/django
|
178109c1734ccc16386c3e3cbae1465c7a1b8ed8
|
[
"BSD-3-Clause",
"0BSD"
] | 33,143
|
2015-01-01T02:04:52.000Z
|
2022-03-31T19:42:46.000Z
|
"""Tests for django.db.backends.utils"""
from decimal import Decimal, Rounded
from django.db import NotSupportedError, connection
from django.db.backends.utils import (
format_number, split_identifier, split_tzname_delta, truncate_name,
)
from django.test import (
SimpleTestCase, TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
class TestUtils(SimpleTestCase):
def test_truncate_name(self):
self.assertEqual(truncate_name('some_table', 10), 'some_table')
self.assertEqual(truncate_name('some_long_table', 10), 'some_la38a')
self.assertEqual(truncate_name('some_long_table', 10, 3), 'some_loa38')
self.assertEqual(truncate_name('some_long_table'), 'some_long_table')
# "user"."table" syntax
self.assertEqual(truncate_name('username"."some_table', 10), 'username"."some_table')
self.assertEqual(truncate_name('username"."some_long_table', 10), 'username"."some_la38a')
self.assertEqual(truncate_name('username"."some_long_table', 10, 3), 'username"."some_loa38')
def test_split_identifier(self):
self.assertEqual(split_identifier('some_table'), ('', 'some_table'))
self.assertEqual(split_identifier('"some_table"'), ('', 'some_table'))
self.assertEqual(split_identifier('namespace"."some_table'), ('namespace', 'some_table'))
self.assertEqual(split_identifier('"namespace"."some_table"'), ('namespace', 'some_table'))
def test_format_number(self):
def equal(value, max_d, places, result):
self.assertEqual(format_number(Decimal(value), max_d, places), result)
equal('0', 12, 3, '0.000')
equal('0', 12, 8, '0.00000000')
equal('1', 12, 9, '1.000000000')
equal('0.00000000', 12, 8, '0.00000000')
equal('0.000000004', 12, 8, '0.00000000')
equal('0.000000008', 12, 8, '0.00000001')
equal('0.000000000000000000999', 10, 8, '0.00000000')
equal('0.1234567890', 12, 10, '0.1234567890')
equal('0.1234567890', 12, 9, '0.123456789')
equal('0.1234567890', 12, 8, '0.12345679')
equal('0.1234567890', 12, 5, '0.12346')
equal('0.1234567890', 12, 3, '0.123')
equal('0.1234567890', 12, 1, '0.1')
equal('0.1234567890', 12, 0, '0')
equal('0.1234567890', None, 0, '0')
equal('1234567890.1234567890', None, 0, '1234567890')
equal('1234567890.1234567890', None, 2, '1234567890.12')
equal('0.1234', 5, None, '0.1234')
equal('123.12', 5, None, '123.12')
with self.assertRaises(Rounded):
equal('0.1234567890', 5, None, '0.12346')
with self.assertRaises(Rounded):
equal('1234567890.1234', 5, None, '1234600000')
def test_split_tzname_delta(self):
tests = [
('Asia/Ust+Nera', ('Asia/Ust+Nera', None, None)),
('Asia/Ust-Nera', ('Asia/Ust-Nera', None, None)),
('Asia/Ust+Nera-02:00', ('Asia/Ust+Nera', '-', '02:00')),
('Asia/Ust-Nera+05:00', ('Asia/Ust-Nera', '+', '05:00')),
('America/Coral_Harbour-01:00', ('America/Coral_Harbour', '-', '01:00')),
('America/Coral_Harbour+02:30', ('America/Coral_Harbour', '+', '02:30')),
('UTC+15:00', ('UTC', '+', '15:00')),
('UTC-04:43', ('UTC', '-', '04:43')),
('UTC', ('UTC', None, None)),
('UTC+1', ('UTC+1', None, None)),
]
for tzname, expected in tests:
with self.subTest(tzname=tzname):
self.assertEqual(split_tzname_delta(tzname), expected)
class CursorWrapperTests(TransactionTestCase):
available_apps = []
def _test_procedure(self, procedure_sql, params, param_types, kparams=None):
with connection.cursor() as cursor:
cursor.execute(procedure_sql)
# Use a new cursor because in MySQL a procedure can't be used in the
# same cursor in which it was created.
with connection.cursor() as cursor:
cursor.callproc('test_procedure', params, kparams)
with connection.schema_editor() as editor:
editor.remove_procedure('test_procedure', param_types)
@skipUnlessDBFeature('create_test_procedure_without_params_sql')
def test_callproc_without_params(self):
self._test_procedure(connection.features.create_test_procedure_without_params_sql, [], [])
@skipUnlessDBFeature('create_test_procedure_with_int_param_sql')
def test_callproc_with_int_params(self):
self._test_procedure(connection.features.create_test_procedure_with_int_param_sql, [1], ['INTEGER'])
@skipUnlessDBFeature('create_test_procedure_with_int_param_sql', 'supports_callproc_kwargs')
def test_callproc_kparams(self):
self._test_procedure(connection.features.create_test_procedure_with_int_param_sql, [], ['INTEGER'], {'P_I': 1})
@skipIfDBFeature('supports_callproc_kwargs')
def test_unsupported_callproc_kparams_raises_error(self):
msg = 'Keyword parameters for callproc are not supported on this database backend.'
with self.assertRaisesMessage(NotSupportedError, msg):
with connection.cursor() as cursor:
cursor.callproc('test_procedure', [], {'P_I': 1})
| 48.110092
| 119
| 0.646834
|
e5aa3c48ae61e3a307a3575d1db96a1358904f60
| 18,037
|
py
|
Python
|
python/ccxt/coinegg.py
|
caoshitong369/ccxt
|
e0f183448bbf8f95e84c71e5f185404dabab3955
|
[
"MIT"
] | 3
|
2020-06-02T10:48:48.000Z
|
2022-03-12T20:46:01.000Z
|
python/ccxt/coinegg.py
|
caoshitong369/ccxt
|
e0f183448bbf8f95e84c71e5f185404dabab3955
|
[
"MIT"
] | 3
|
2020-09-08T00:13:39.000Z
|
2021-05-08T20:05:48.000Z
|
python/ccxt/coinegg.py
|
caoshitong369/ccxt
|
e0f183448bbf8f95e84c71e5f185404dabab3955
|
[
"MIT"
] | 1
|
2019-11-27T06:36:26.000Z
|
2019-11-27T06:36:26.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import InvalidNonce
class coinegg(Exchange):
def describe(self):
return self.deep_extend(super(coinegg, self).describe(), {
'id': 'coinegg',
'name': 'CoinEgg',
'countries': ['CN', 'UK'],
'has': {
'fetchOrder': True,
'fetchOrders': True,
'fetchOpenOrders': 'emulated',
'fetchMyTrades': False,
'fetchTickers': False,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/36770310-adfa764e-1c5a-11e8-8e09-449daac3d2fb.jpg',
'api': {
'web': 'https://trade.coinegg.com/web',
'rest': 'https://api.coinegg.com/api/v1',
},
'www': 'https://www.coinegg.com',
'doc': 'https://www.coinegg.com/explain.api.html',
'fees': 'https://www.coinegg.com/fee.html',
'referral': 'https://www.coinegg.com/user/register?invite=523218',
},
'api': {
'web': {
'get': [
'symbol/ticker?right_coin={quote}',
'{quote}/trends',
'{quote}/{base}/order',
'{quote}/{base}/trades',
'{quote}/{base}/depth.js',
],
},
'public': {
'get': [
'ticker/region/{quote}',
'depth/region/{quote}',
'orders/region/{quote}',
],
},
'private': {
'post': [
'balance',
'trade_add/region/{quote}',
'trade_cancel/region/{quote}',
'trade_view/region/{quote}',
'trade_list/region/{quote}',
],
},
},
'fees': {
'trading': {
'maker': 0.1 / 100,
'taker': 0.1 / 100,
},
'funding': {
'withdraw': {
'BTC': 0.008,
'BCH': 0.002,
'LTC': 0.001,
'ETH': 0.01,
'ETC': 0.01,
'NEO': 0,
'QTUM': '1%',
'XRP': '1%',
'DOGE': '1%',
'LSK': '1%',
'XAS': '1%',
'BTS': '1%',
'GAME': '1%',
'GOOC': '1%',
'NXT': '1%',
'IFC': '1%',
'DNC': '1%',
'BLK': '1%',
'VRC': '1%',
'XPM': '1%',
'VTC': '1%',
'TFC': '1%',
'PLC': '1%',
'EAC': '1%',
'PPC': '1%',
'FZ': '1%',
'ZET': '1%',
'RSS': '1%',
'PGC': '1%',
'SKT': '1%',
'JBC': '1%',
'RIO': '1%',
'LKC': '1%',
'ZCC': '1%',
'MCC': '1%',
'QEC': '1%',
'MET': '1%',
'YTC': '1%',
'HLB': '1%',
'MRYC': '1%',
'MTC': '1%',
'KTC': 0,
},
},
},
'exceptions': {
'103': AuthenticationError,
'104': AuthenticationError,
'105': AuthenticationError,
'106': InvalidNonce,
'200': InsufficientFunds,
'201': InvalidOrder,
'202': InvalidOrder,
'203': OrderNotFound,
'402': DDoSProtection,
},
'errorMessages': {
'100': 'Required parameters can not be empty',
'101': 'Illegal parameter',
'102': 'coin does not exist',
'103': 'Key does not exist',
'104': 'Signature does not match',
'105': 'Insufficient permissions',
'106': 'Request expired(nonce error)',
'200': 'Lack of balance',
'201': 'Too small for the number of trading',
'202': 'Price must be in 0 - 1000000',
'203': 'Order does not exist',
'204': 'Pending order amount must be above 0.001 BTC',
'205': 'Restrict pending order prices',
'206': 'Decimal place error',
'401': 'System error',
'402': 'Requests are too frequent',
'403': 'Non-open API',
'404': 'IP restriction does not request the resource',
'405': 'Currency transactions are temporarily closed',
},
'options': {
'quoteIds': ['btc', 'eth', 'usc', 'usdt'],
},
'commonCurrencies': {
'JBC': 'JubaoCoin',
},
})
def fetch_markets(self, params={}):
quoteIds = self.options['quoteIds']
result = []
for b in range(0, len(quoteIds)):
quoteId = quoteIds[b]
response = self.webGetSymbolTickerRightCoinQuote({
'quote': quoteId,
})
tickers = self.safe_value(response, 'data', [])
for i in range(0, len(tickers)):
ticker = tickers[i]
id = ticker['symbol']
baseId = id.split('_')[0]
base = baseId.upper()
quote = quoteId.upper()
base = self.safe_currency_code(base)
quote = self.safe_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': 8,
'price': 8,
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision['amount']),
'max': math.pow(10, precision['amount']),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
},
'cost': {
'min': None,
'max': None,
},
},
'info': ticker,
})
return result
def parse_ticker(self, ticker, market=None):
symbol = market['symbol']
timestamp = self.milliseconds()
last = self.safe_float(ticker, 'last')
percentage = self.safe_float(ticker, 'change')
open = None
change = None
average = None
if percentage is not None:
relativeChange = percentage / 100
open = last / self.sum(1, relativeChange)
change = last - open
average = self.sum(last, open) / 2
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': self.safe_float(ticker, 'vol'),
'quoteVolume': self.safe_float(ticker, 'quoteVol'),
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'coin': market['baseId'],
'quote': market['quoteId'],
}
response = self.publicGetTickerRegionQuote(self.extend(request, params))
return self.parse_ticker(response, market)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'coin': market['baseId'],
'quote': market['quoteId'],
}
response = self.publicGetDepthRegionQuote(self.extend(request, params))
return self.parse_order_book(response)
def parse_trade(self, trade, market=None):
timestamp = self.safe_timestamp(trade, 'date')
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'amount')
symbol = market['symbol']
cost = None
if amount is not None:
if price is not None:
cost = self.cost_to_precision(symbol, price * amount)
type = 'limit'
side = self.safe_string(trade, 'type')
id = self.safe_string(trade, 'tid')
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': None,
'type': type,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'coin': market['baseId'],
'quote': market['quoteId'],
}
response = self.publicGetOrdersRegionQuote(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostBalance(params)
result = {'info': response}
data = self.safe_value(response, 'data', {})
balances = self.omit(data, 'uid')
keys = list(balances.keys())
for i in range(0, len(keys)):
key = keys[i]
currencyId, accountType = key.split('_')
code = self.safe_currency_code(currencyId)
if not (code in list(result.keys())):
result[code] = self.account()
type = 'used' if (accountType == 'lock') else 'free'
result[code][type] = self.safe_float(balances, key)
return self.parse_balance(result)
def parse_order(self, order, market=None):
symbol = None
if market is not None:
symbol = market['symbol']
timestamp = self.parse8601(self.safe_string(order, 'datetime'))
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'amount_original')
remaining = self.safe_float(order, 'amount_outstanding')
filled = None
if amount is not None:
if remaining is not None:
filled = amount - remaining
status = self.safe_string(order, 'status')
if status == 'cancelled':
status = 'canceled'
else:
status = 'open' if remaining else 'closed'
info = self.safe_value(order, 'info', order)
type = 'limit'
side = self.safe_string(order, 'type')
id = self.safe_string(order, 'id')
return {
'id': id,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': None,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': None,
'fee': None,
'info': info,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'coin': market['baseId'],
'quote': market['quoteId'],
'type': side,
'amount': amount,
'price': price,
}
response = self.privatePostTradeAddRegionQuote(self.extend(request, params))
id = self.safe_string(response, 'id')
order = self.parse_order({
'id': id,
'datetime': self.ymdhms(self.milliseconds()),
'amount_original': amount,
'amount_outstanding': amount,
'price': price,
'type': side,
'info': response,
}, market)
self.orders[id] = order
return order
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'id': id,
'coin': market['baseId'],
'quote': market['quoteId'],
}
return self.privatePostTradeCancelRegionQuote(self.extend(request, params))
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'id': id,
'coin': market['baseId'],
'quote': market['quoteId'],
}
response = self.privatePostTradeViewRegionQuote(self.extend(request, params))
return self.parse_order(response['data'], market)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'coin': market['baseId'],
'quote': market['quoteId'],
}
if since is not None:
request['since'] = since / 1000
response = self.privatePostTradeListRegionQuote(self.extend(request, params))
return self.parse_orders(response['data'], market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'type': 'open',
}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
apiType = 'rest'
if api == 'web':
apiType = api
url = self.urls['api'][apiType] + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public' or api == 'web':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
query = self.urlencode(self.extend({
'key': self.apiKey,
'nonce': self.nonce(),
}, query))
secret = self.hash(self.encode(self.secret))
signature = self.hmac(self.encode(query), self.encode(secret))
query += '&' + 'signature=' + signature
if method == 'GET':
url += '?' + query
else:
headers = {
'Content-type': 'application/x-www-form-urlencoded',
}
body = query
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
# private endpoints return the following structure:
# {"result":true,"data":{...}} - success
# {"result":false,"code":"103"} - failure
# {"code":0,"msg":"Suceess","data":{"uid":"2716039","btc_balance":"0.00000000","btc_lock":"0.00000000","xrp_balance":"0.00000000","xrp_lock":"0.00000000"}}
result = self.safe_value(response, 'result')
if result is None:
# public endpoint ← self comment left here by the contributor, in fact a missing result does not necessarily mean a public endpoint...
# we should just check the code and don't rely on the result at all here...
return
if result is True:
# success
return
errorCode = self.safe_string(response, 'code')
errorMessages = self.errorMessages
message = self.safe_string(errorMessages, errorCode, 'Unknown Error')
if errorCode in self.exceptions:
raise self.exceptions[errorCode](self.id + ' ' + message)
else:
raise ExchangeError(self.id + ' ' + message)
| 37.892857
| 163
| 0.458225
|
246212df9cdb90f510ece27b8df35a7fa998ee5a
| 600
|
py
|
Python
|
full_system/configuration.py
|
jeiros/BP-Hack
|
8ecd1ab4edc9541f1d9a575a3a1f9c5ce31c00ce
|
[
"MIT"
] | 1
|
2019-11-19T14:42:18.000Z
|
2019-11-19T14:42:18.000Z
|
full_system/configuration.py
|
Jspujol/BP-Hack
|
c5c079e0c1fad5ad04c521d678ec6f02fb5ce6a6
|
[
"MIT"
] | null | null | null |
full_system/configuration.py
|
Jspujol/BP-Hack
|
c5c079e0c1fad5ad04c521d678ec6f02fb5ce6a6
|
[
"MIT"
] | 1
|
2021-12-15T11:40:24.000Z
|
2021-12-15T11:40:24.000Z
|
# define two constants, one for the eye aspect ratio to indicate
# blink and then a second constant for the number of consecutive
# frames the eye must be below the threshold
AR_THRESHOLD = {}
AR_THRESHOLD['eye'] = AR_THRESHOLD['mouth'] = 0
AR_THRESHOLD['eye'] = 0.17
AR_THRESHOLD['mouth'] = - 0.67
AR_CONSEC_FRAMES = {}
AR_CONSEC_FRAMES['eye'] = AR_CONSEC_FRAMES['mouth'] = 15
THRESHOLD = {}
THRESHOLD['eye'] = THRESHOLD['mouth'] = 0.55
VALUES_CHECK = 60
eye_factor_threshold = 0.65
mouth_factor_threshold = 1.2
shape_predictor = "./shape_predictor.dat"
SET_INITIAL_VALUE = 20
is_raspi = False
| 26.086957
| 64
| 0.738333
|
96a87871d682a6a84b3292ed9c6c1db0ec95ce1d
| 1,004
|
py
|
Python
|
test_readme.py
|
tarczynskitomek/polski-w-it
|
088b57278a90c37b3595b6cb4515d2d48b1e0709
|
[
"Apache-2.0"
] | null | null | null |
test_readme.py
|
tarczynskitomek/polski-w-it
|
088b57278a90c37b3595b6cb4515d2d48b1e0709
|
[
"Apache-2.0"
] | null | null | null |
test_readme.py
|
tarczynskitomek/polski-w-it
|
088b57278a90c37b3595b6cb4515d2d48b1e0709
|
[
"Apache-2.0"
] | null | null | null |
import re
import pytest
@pytest.fixture
def readme():
with open("README.md", "r", encoding="utf-8") as myfile:
return [line for line in myfile.readlines() if line[0] == '|']
def english_term(row):
return row[1:row[1:].find('|')].strip()
@pytest.mark.skip(reason="TODO: skipped after migrating to AsciiDoc")
def test_all_rows_in_table_equal_length(readme):
assert len(set([len(row) for row in readme])) == 1
@pytest.mark.skip(reason="TODO: skipped after migrating to AsciiDoc")
def test_terms_are_sorted(readme):
all_english_terms = [english_term(line) for line in readme[2:]]
assert all_english_terms == sorted(all_english_terms)
@pytest.mark.skip(reason="TODO: skipped after migrating to AsciiDoc")
def test_columns_are_aligned(readme):
def column_position(row):
return [m.start() for m in re.finditer('|', row)]
column_position_in_each_line = [str(column_position(line)) for line in readme]
assert len(set(column_position_in_each_line)) == 1
| 29.529412
| 82
| 0.716135
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.