hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ea472ba98b118db51678256b53b75e13e1db4839 | 5,171 | py | Python | tests/mypy/test_mypy.py | jasujm/pydantic | cc1cb4826c74ac5b651ef2d80c3478428a9950ca | [
"MIT"
] | 1 | 2020-05-03T06:32:47.000Z | 2020-05-03T06:32:47.000Z | tests/mypy/test_mypy.py | jasujm/pydantic | cc1cb4826c74ac5b651ef2d80c3478428a9950ca | [
"MIT"
] | 189 | 2020-07-12T08:13:29.000Z | 2022-03-28T01:16:29.000Z | tests/mypy/test_mypy.py | jasujm/pydantic | cc1cb4826c74ac5b651ef2d80c3478428a9950ca | [
"MIT"
] | 2 | 2021-11-23T16:28:21.000Z | 2021-11-23T16:28:33.000Z | import importlib
import os
import re
from pathlib import Path
import pytest
try:
from mypy import api as mypy_api
except ImportError:
mypy_api = None # type: ignore
try:
import dotenv
except ImportError:
dotenv = None # type: ignore
# This ensures mypy can find the test files, no matter where tests are run from:
os.chdir(Path(__file__).parent.parent.parent)
cases = [
('mypy-plugin.ini', 'plugin_success.py', None),
('mypy-plugin.ini', 'plugin_fail.py', 'plugin-fail.txt'),
('mypy-plugin-strict.ini', 'plugin_success.py', 'plugin-success-strict.txt'),
('mypy-plugin-strict.ini', 'plugin_fail.py', 'plugin-fail-strict.txt'),
('mypy-default.ini', 'success.py', None),
('mypy-default.ini', 'fail1.py', 'fail1.txt'),
('mypy-default.ini', 'fail2.py', 'fail2.txt'),
('mypy-default.ini', 'fail3.py', 'fail3.txt'),
('mypy-default.ini', 'fail4.py', 'fail4.txt'),
('mypy-default.ini', 'plugin_success.py', 'plugin_success.txt'),
('pyproject-default.toml', 'success.py', None),
('pyproject-default.toml', 'fail1.py', 'fail1.txt'),
('pyproject-default.toml', 'fail2.py', 'fail2.txt'),
('pyproject-default.toml', 'fail3.py', 'fail3.txt'),
('pyproject-default.toml', 'fail4.py', 'fail4.txt'),
('pyproject-plugin.toml', 'plugin_success.py', None),
('pyproject-plugin.toml', 'plugin_fail.py', 'plugin-fail.txt'),
('pyproject-plugin-strict.toml', 'plugin_success.py', 'plugin-success-strict.txt'),
('pyproject-plugin-strict.toml', 'plugin_fail.py', 'plugin-fail-strict.txt'),
]
executable_modules = list({fname[:-3] for _, fname, out_fname in cases if out_fname is None})
@pytest.mark.skipif(not (dotenv and mypy_api), reason='dotenv or mypy are not installed')
@pytest.mark.parametrize('config_filename,python_filename,output_filename', cases)
@pytest.mark.skipif(not (dotenv and mypy_api), reason='dotenv or mypy are not installed')
@pytest.mark.parametrize('module', executable_modules)
def test_success_cases_run(module: str) -> None:
"""
Ensure the "success" files can actually be executed
"""
importlib.import_module(f'tests.mypy.modules.{module}')
| 46.169643 | 116 | 0.705473 | import importlib
import os
import re
from pathlib import Path
import pytest
try:
from mypy import api as mypy_api
except ImportError:
mypy_api = None # type: ignore
try:
import dotenv
except ImportError:
dotenv = None # type: ignore
# This ensures mypy can find the test files, no matter where tests are run from:
os.chdir(Path(__file__).parent.parent.parent)
cases = [
('mypy-plugin.ini', 'plugin_success.py', None),
('mypy-plugin.ini', 'plugin_fail.py', 'plugin-fail.txt'),
('mypy-plugin-strict.ini', 'plugin_success.py', 'plugin-success-strict.txt'),
('mypy-plugin-strict.ini', 'plugin_fail.py', 'plugin-fail-strict.txt'),
('mypy-default.ini', 'success.py', None),
('mypy-default.ini', 'fail1.py', 'fail1.txt'),
('mypy-default.ini', 'fail2.py', 'fail2.txt'),
('mypy-default.ini', 'fail3.py', 'fail3.txt'),
('mypy-default.ini', 'fail4.py', 'fail4.txt'),
('mypy-default.ini', 'plugin_success.py', 'plugin_success.txt'),
('pyproject-default.toml', 'success.py', None),
('pyproject-default.toml', 'fail1.py', 'fail1.txt'),
('pyproject-default.toml', 'fail2.py', 'fail2.txt'),
('pyproject-default.toml', 'fail3.py', 'fail3.txt'),
('pyproject-default.toml', 'fail4.py', 'fail4.txt'),
('pyproject-plugin.toml', 'plugin_success.py', None),
('pyproject-plugin.toml', 'plugin_fail.py', 'plugin-fail.txt'),
('pyproject-plugin-strict.toml', 'plugin_success.py', 'plugin-success-strict.txt'),
('pyproject-plugin-strict.toml', 'plugin_fail.py', 'plugin-fail-strict.txt'),
]
executable_modules = list({fname[:-3] for _, fname, out_fname in cases if out_fname is None})
@pytest.mark.skipif(not (dotenv and mypy_api), reason='dotenv or mypy are not installed')
@pytest.mark.parametrize('config_filename,python_filename,output_filename', cases)
def test_mypy_results(config_filename: str, python_filename: str, output_filename: str) -> None:
full_config_filename = f'tests/mypy/configs/{config_filename}'
full_filename = f'tests/mypy/modules/{python_filename}'
output_path = None if output_filename is None else Path(f'tests/mypy/outputs/{output_filename}')
# Specifying a different cache dir for each configuration dramatically speeds up subsequent execution
# It also prevents cache-invalidation-related bugs in the tests
cache_dir = f'.mypy_cache/test-{os.path.splitext(config_filename)[0]}'
command = [full_filename, '--config-file', full_config_filename, '--cache-dir', cache_dir, '--show-error-codes']
print(f"\nExecuting: mypy {' '.join(command)}") # makes it easier to debug as necessary
actual_result = mypy_api.run(command)
actual_out, actual_err, actual_returncode = actual_result
# Need to strip filenames due to differences in formatting by OS
actual_out = '\n'.join(['.py:'.join(line.split('.py:')[1:]) for line in actual_out.split('\n') if line]).strip()
actual_out = re.sub(r'\n\s*\n', r'\n', actual_out)
if actual_out:
print('{0}\n{1:^100}\n{0}\n{2}\n{0}'.format('=' * 100, 'mypy output', actual_out))
assert actual_err == ''
expected_returncode = 0 if output_filename is None else 1
assert actual_returncode == expected_returncode
if output_path and not output_path.exists():
output_path.write_text(actual_out)
raise RuntimeError(f'wrote actual output to {output_path} since file did not exist')
expected_out = Path(output_path).read_text() if output_path else ''
assert actual_out == expected_out, actual_out
@pytest.mark.skipif(not (dotenv and mypy_api), reason='dotenv or mypy are not installed')
def test_bad_toml_config() -> None:
full_config_filename = 'tests/mypy/configs/pyproject-plugin-bad-param.toml'
full_filename = 'tests/mypy/modules/success.py'
# Specifying a different cache dir for each configuration dramatically speeds up subsequent execution
# It also prevents cache-invalidation-related bugs in the tests
cache_dir = '.mypy_cache/test-pyproject-plugin-bad-param'
command = [full_filename, '--config-file', full_config_filename, '--cache-dir', cache_dir, '--show-error-codes']
print(f"\nExecuting: mypy {' '.join(command)}") # makes it easier to debug as necessary
with pytest.raises(ValueError) as e:
mypy_api.run(command)
assert str(e.value) == 'Configuration value must be a boolean for key: init_forbid_extra'
@pytest.mark.parametrize('module', executable_modules)
def test_success_cases_run(module: str) -> None:
"""
Ensure the "success" files can actually be executed
"""
importlib.import_module(f'tests.mypy.modules.{module}')
def test_explicit_reexports() -> None:
from pydantic import __all__ as root_all
from pydantic.main import __all__ as main
from pydantic.networks import __all__ as networks
from pydantic.tools import __all__ as tools
from pydantic.types import __all__ as types
for name, export_all in [('main', main), ('network', networks), ('tools', tools), ('types', types)]:
for export in export_all:
assert export in root_all, f'{export} is in {name}.__all__ but missing from re-export in __init__.py'
| 2,942 | 0 | 67 |
f8b9e4e8b0c62ee60777e7974dd1a400acc61e2c | 18,835 | py | Python | cli/aps.py | Verimatrix/app-shield-protect | 1e9e02ef2760139fb7cf6efc0e22155234e7aa4c | [
"MIT"
] | 19 | 2021-04-26T15:38:22.000Z | 2022-03-30T20:01:20.000Z | cli/aps.py | Daniel1Rosen/app-shield-protect | 1e9e02ef2760139fb7cf6efc0e22155234e7aa4c | [
"MIT"
] | 1 | 2021-03-11T14:20:55.000Z | 2021-03-11T17:32:13.000Z | cli/aps.py | Daniel1Rosen/app-shield-protect | 1e9e02ef2760139fb7cf6efc0e22155234e7aa4c | [
"MIT"
] | 1 | 2022-01-06T20:53:32.000Z | 2022-01-06T20:53:32.000Z | #!/usr/bin/python
'''Entrypoint for APS CLI'''
import argparse
import json
import logging
import os
import sys
import traceback
import coloredlogs
from aps_commands import ApsCommands
from aps_utils import get_config, authenticate_secret
LOGGER = logging.getLogger(__name__)
# set environment variables that control coloredlog module output
os.environ['COLOREDLOGS_LOG_FORMAT'] = '%(levelname)s:%(message)s'
os.environ['COLOREDLOGS_FIELD_STYLES'] = ''
os.environ['COLOREDLOGS_LEVEL_STYLES'] = 'debug=blue;info=green;warning=yellow;' +\
'error=red,bold'
def supported_commands():
'''Returns the list of supported commands'''
return ['protect',
'list-applications',
'add-application',
'update-application',
'delete-application',
'set-signing-certificate',
'list-builds',
'add-build',
'delete-build',
'protect-start',
'protect-get-status',
'protect-cancel',
'protect-download',
'get-account-info',
'display-application-package-id' ]
class Aps:
'''Class encapsulating all supported command line options'''
def protect(self, global_args):
'''Perform APS protection from an input file.
This is a high level command that takes an input
binary to be protected, performs protection and outputs the protected
binary. This command may take many minutes to complete.'''
parser = argparse.ArgumentParser(
usage='aps protect [<args>]',
description='Perform APS protection on the input file.')
parser.add_argument('--file', type=str, required=True,
help='Build file (aab, apk or zipped xcarchive folder)')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.protect(args.file)
def get_account_info(self, global_args):
'''Get info about the user and organization'''
parser = argparse.ArgumentParser(
usage='aps get-account-info [<args>]',
description='Returns information about the user and organization (customer)')
parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.get_account_info()
def add_application(self, global_args):
'''Add a new application'''
parser = argparse.ArgumentParser(
usage='aps add-application [<args>]',
description='''Add a new application. By default the application is
accessible to other users within your organization. The --private, --no-upload,
--no-delete options can be used to restrict access to the application.
''')
parser.add_argument('--os', type=str, required=True,
choices=['ios', 'android'], help='Operating System.')
parser.add_argument('--name', type=str, required=True, help='Friendly name for application.')
parser.add_argument('--package-id', type=str, required=True, help='Application package ID.')
parser.add_argument('--group', type=str, required=False, help='Optional group identifier.')
parser.add_argument('--private',
help='''Prevent the application from being visible to other users.
This option will automatically set each of --no-upload
and --no-delete options.''',
action='store_true', default=False)
parser.add_argument('--no-upload',
help='Prevent other users from uploading new builds for this app.',
action='store_true', default=False)
parser.add_argument('--no-delete',
help='Prevent other users from deleting builds for this app.',
action='store_true', default=False)
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
permissions = {}
permissions['private'] = args.private
permissions['no_upload'] = args.no_upload
permissions['no_delete'] = args.no_delete
self.parse_global_args(global_args)
return self.commands.add_application(args.name, args.package_id, args.os, permissions, args.group)
def update_application(self, global_args):
'''Update application properties'''
parser = argparse.ArgumentParser(
usage='aps update-application [<args>]',
description='''Update application properties. The application name and
permission related properties can be modified''')
parser.add_argument('--application-id', type=str, required=True,
help='''Application ID. This identifies the application whose
properties should be updated, this property cannot itself be
changed. The remaining arguments correspond to application
properties that can be updated by this call.''')
parser.add_argument('--name', type=str, required=True, help='Friendly name for application')
parser.add_argument('--private',
help='''Prevent the app from being visible to other users. This option
will automatically set each of the --no-upload
and --no-delete options.''',
action='store_true', default=False)
parser.add_argument('--no-upload',
help='Prevent other users from uploading new builds for this app.',
action='store_true', default=False)
parser.add_argument('--no-delete',
help='Prevent other users from deleting builds for this app.',
action='store_true', default=False)
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
permissions = {}
permissions['private'] = args.private
permissions['no_upload'] = args.no_upload
permissions['no_delete'] = args.no_delete
self.parse_global_args(global_args)
return self.commands.update_application(args.application_id, args.name, permissions)
def list_applications(self, global_args):
'''List applications'''
parser = argparse.ArgumentParser(
usage='aps list-applications [<args>]',
description='''List applications.
Optional "application-id" or "group" parameters can be specified to restrict
the list of applications that are reported by this call.
When the "application-id" parameter is provided this operation returns the
specific application identified by "application-id".
When the "group" parameter is provided this operation returns all
applications belonging to the specified group.
When neither "application-id" or "group" are provided this operation returns the
list of all applications.''')
parser.add_argument('--application-id', type=str, required=False, help='Application ID')
parser.add_argument('--group', type=str, required=False, help='Application group identifier')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.list_applications(args.application_id, args.group)
def delete_application(self, global_args):
'''Delete an application'''
parser = argparse.ArgumentParser(
usage='aps delete-application [<args>]',
description='''Delete application. This operation will also delete all builds
belonging to this application.''')
parser.add_argument('--application-id', type=str, required=True, help='Application ID')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.delete_application(args.application_id)
def list_builds(self, global_args):
'''List builds'''
parser = argparse.ArgumentParser(
usage='aps list-builds [<args>]',
description='''List builds.
Optional "application-id" or "build-id" parameters can be specified to restrict
the list of builds that are reported by this call.
When the "application-id" parameter is provided this operation returns the list
of builds for that particular application. When the "build-id" parameter is
provided this operation returns the specific build identified by "build-id".
When neither "application-id" or "build-id" are provided this operation returns
all builds.''')
parser.add_argument('--application-id', type=str, required=False, help='Application ID')
parser.add_argument('--build-id', type=str, required=False, help='Build ID')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.list_builds(args.application_id, args.build_id)
def add_build(self, global_args):
'''Add a new build'''
parser = argparse.ArgumentParser(
usage='aps add-build [<args>]',
description='Add a new build')
parser.add_argument('--application-id', type=str, required=True, help='Application ID')
parser.add_argument('--file', type=str, required=False,
help='Build file (apk or xcarchive folder)')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.add_build(args.file, args.application_id)
def delete_build(self, global_args):
'''Delete a build'''
parser = argparse.ArgumentParser(
usage='aps delete-build [<args>]',
description='Delete build')
parser.add_argument('--build-id', type=str, required=True, help='Build ID')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.delete_build(args.build_id)
def protect_start(self, global_args):
'''Start build protection'''
parser = argparse.ArgumentParser(
usage='aps protect-start [<args>]',
description='Initiate protection of a previously added build')
parser.add_argument('--build-id', type=str, required=True, help='Build ID')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.protect_start(args.build_id)
def protect_cancel(self, global_args):
'''Cancel protection of a build'''
parser = argparse.ArgumentParser(
usage='aps protect-cancel [<args>]',
description='Cancel protection of a build.')
parser.add_argument('--build-id', type=str, required=True, help='Build ID')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.protect_cancel(args.build_id)
def protect_get_status(self, global_args):
'''Get the status of a build'''
parser = argparse.ArgumentParser(
usage='aps protect-get-status [<args>]',
description='''Get the status of a build. This includes progress
information when a protection build is ongoing.''')
parser.add_argument('--build-id', type=str, required=True, help='Build ID')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.protect_get_status(args.build_id)
def protect_download(self, global_args):
'''Download a protected build'''
parser = argparse.ArgumentParser(
usage='aps protect-download [<args>]',
description='Download a previously protected build.')
parser.add_argument('--build-id', type=str, required=True, help='Build ID')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.protect_download(args.build_id)
def display_application_package_id(self, global_args):
'''Utility to extract and display the application package id from a file.'''
parser = argparse.ArgumentParser(
usage='aps display_application_package_id [<args>]',
description='''Display the application package id for a input file.
This can be used as input when calling add-application.
''')
parser.add_argument('--file', type=str, required=True,
help='Input file (apk or xcarchive folder)')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.display_application_package_id(args.file)
def set_signing_certificate(self, global_args):
'''Set signing certificate'''
parser = argparse.ArgumentParser(
usage='aps set-signing-certificate [<args>]',
description='''Set signing certificate for an application.''')
parser.add_argument('--application-id', type=str, required=True, help='Application ID')
parser.add_argument('--file', type=str, required=False,
help='PEM encoded certificate file. If omitted then this unsets any previously set certificate')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.set_signing_certificate(args.application_id, args.file)
if __name__ == '__main__':
Aps()
| 43.700696 | 124 | 0.628935 | #!/usr/bin/python
'''Entrypoint for APS CLI'''
import argparse
import json
import logging
import os
import sys
import traceback
import coloredlogs
from aps_commands import ApsCommands
from aps_utils import get_config, authenticate_secret
LOGGER = logging.getLogger(__name__)
# set environment variables that control coloredlog module output
os.environ['COLOREDLOGS_LOG_FORMAT'] = '%(levelname)s:%(message)s'
os.environ['COLOREDLOGS_FIELD_STYLES'] = ''
os.environ['COLOREDLOGS_LEVEL_STYLES'] = 'debug=blue;info=green;warning=yellow;' +\
'error=red,bold'
def supported_commands():
'''Returns the list of supported commands'''
return ['protect',
'list-applications',
'add-application',
'update-application',
'delete-application',
'set-signing-certificate',
'list-builds',
'add-build',
'delete-build',
'protect-start',
'protect-get-status',
'protect-cancel',
'protect-download',
'get-account-info',
'display-application-package-id' ]
class Aps:
'''Class encapsulating all supported command line options'''
def __init__(self):
self.commands = None
parser = argparse.ArgumentParser(
description='APS command line tool',
usage='''aps [global-options] <command> [<command-options>]
The following commands are available
* protect
* list-applications
* add-application
* update-application
* delete-application
* set-signing-certificate
* list-builds
* add-build
* delete-build
* protect-start
* protect-get-status
* protect-cancel
* protect-download
* get-account-info
* display-application-package-id
Use aps <command> -h for information on a specific command.
Use aps -h for information on the global options.
''')
parser.add_argument('command', help='Command to run')
parser.add_argument('-v',
action='store_true',
help='Verbose debug output')
parser.add_argument('-u', '--username', type=str, help='Username (email)')
parser.add_argument('-p', '--password', type=str, help='Password')
parser.add_argument('-c', '--client-id', type=str, help='Client ID')
parser.add_argument('-s', '--client-secret', type=str, help='Client secret')
# find the index of the command argument
self.command_pos = len(sys.argv)
i = 0
for arg in sys.argv[1:]:
i += 1
if arg in supported_commands():
self.command_pos = i + 1
break
# parse_args defaults to [1:] for args, but we exclude the command arguments
# Defer parsing and validation of the global args (parse_global_args method)
# until we have parsed the command arguments. Otherwise this would prevent
# running aps.py COMMAND -h to get help on the COMMAND arguments unless
# all mandatory global args were to be supplied.
args = parser.parse_args(sys.argv[1:self.command_pos])
# python doesn't allow for hyphens in method names
mapped_command = args.command.replace('-', '_')
if not hasattr(self, mapped_command):
print('Unrecognized command')
parser.print_help()
sys.exit(1)
# invoke command
try:
response = getattr(self, mapped_command)(args)
if response is not None:
if isinstance(response, str):
print(response)
else:
print(json.dumps(response, indent=2, sort_keys=True))
except Exception as e:
traceback.print_exc()
sys.exit(1)
def parse_global_args(self, args):
if args.v:
coloredlogs.install(level=logging.DEBUG)
else:
coloredlogs.install(level=logging.ERROR)
config = get_config()
if args.username and args.password:
headers = authenticate_password(args.username, args.password, config)
using_client_secret = False
elif args.client_id and args.client_secret:
headers = authenticate_secret(args.client_id, args.client_secret, config)
using_client_secret = True
else:
msg = ('Error: missing authentication credentials.\n'
'Either a --username, --password pair or a --client-id, --client-secret\n'
'pair of arguments must be provided')
print(msg)
sys.exit(1)
openapi_file = os.path.abspath(os.path.join(os.pardir, os.pardir, 'versions', 'openapi.txt'))
if os.path.exists(openapi_file):
with open(openapi_file, 'r') as file:
version = file.read().replace('\n', '')
headers['Accept'] = 'application/vnd.aps.appshield.verimatrixcloud.net;version=%s' % version
self.commands = ApsCommands(headers, config, using_client_secret, 0)
def protect(self, global_args):
'''Perform APS protection from an input file.
This is a high level command that takes an input
binary to be protected, performs protection and outputs the protected
binary. This command may take many minutes to complete.'''
parser = argparse.ArgumentParser(
usage='aps protect [<args>]',
description='Perform APS protection on the input file.')
parser.add_argument('--file', type=str, required=True,
help='Build file (aab, apk or zipped xcarchive folder)')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.protect(args.file)
def get_account_info(self, global_args):
'''Get info about the user and organization'''
parser = argparse.ArgumentParser(
usage='aps get-account-info [<args>]',
description='Returns information about the user and organization (customer)')
parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.get_account_info()
def add_application(self, global_args):
'''Add a new application'''
parser = argparse.ArgumentParser(
usage='aps add-application [<args>]',
description='''Add a new application. By default the application is
accessible to other users within your organization. The --private, --no-upload,
--no-delete options can be used to restrict access to the application.
''')
parser.add_argument('--os', type=str, required=True,
choices=['ios', 'android'], help='Operating System.')
parser.add_argument('--name', type=str, required=True, help='Friendly name for application.')
parser.add_argument('--package-id', type=str, required=True, help='Application package ID.')
parser.add_argument('--group', type=str, required=False, help='Optional group identifier.')
parser.add_argument('--private',
help='''Prevent the application from being visible to other users.
This option will automatically set each of --no-upload
and --no-delete options.''',
action='store_true', default=False)
parser.add_argument('--no-upload',
help='Prevent other users from uploading new builds for this app.',
action='store_true', default=False)
parser.add_argument('--no-delete',
help='Prevent other users from deleting builds for this app.',
action='store_true', default=False)
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
permissions = {}
permissions['private'] = args.private
permissions['no_upload'] = args.no_upload
permissions['no_delete'] = args.no_delete
self.parse_global_args(global_args)
return self.commands.add_application(args.name, args.package_id, args.os, permissions, args.group)
def update_application(self, global_args):
'''Update application properties'''
parser = argparse.ArgumentParser(
usage='aps update-application [<args>]',
description='''Update application properties. The application name and
permission related properties can be modified''')
parser.add_argument('--application-id', type=str, required=True,
help='''Application ID. This identifies the application whose
properties should be updated, this property cannot itself be
changed. The remaining arguments correspond to application
properties that can be updated by this call.''')
parser.add_argument('--name', type=str, required=True, help='Friendly name for application')
parser.add_argument('--private',
help='''Prevent the app from being visible to other users. This option
will automatically set each of the --no-upload
and --no-delete options.''',
action='store_true', default=False)
parser.add_argument('--no-upload',
help='Prevent other users from uploading new builds for this app.',
action='store_true', default=False)
parser.add_argument('--no-delete',
help='Prevent other users from deleting builds for this app.',
action='store_true', default=False)
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
permissions = {}
permissions['private'] = args.private
permissions['no_upload'] = args.no_upload
permissions['no_delete'] = args.no_delete
self.parse_global_args(global_args)
return self.commands.update_application(args.application_id, args.name, permissions)
def list_applications(self, global_args):
'''List applications'''
parser = argparse.ArgumentParser(
usage='aps list-applications [<args>]',
description='''List applications.
Optional "application-id" or "group" parameters can be specified to restrict
the list of applications that are reported by this call.
When the "application-id" parameter is provided this operation returns the
specific application identified by "application-id".
When the "group" parameter is provided this operation returns all
applications belonging to the specified group.
When neither "application-id" or "group" are provided this operation returns the
list of all applications.''')
parser.add_argument('--application-id', type=str, required=False, help='Application ID')
parser.add_argument('--group', type=str, required=False, help='Application group identifier')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.list_applications(args.application_id, args.group)
def delete_application(self, global_args):
'''Delete an application'''
parser = argparse.ArgumentParser(
usage='aps delete-application [<args>]',
description='''Delete application. This operation will also delete all builds
belonging to this application.''')
parser.add_argument('--application-id', type=str, required=True, help='Application ID')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.delete_application(args.application_id)
def list_builds(self, global_args):
'''List builds'''
parser = argparse.ArgumentParser(
usage='aps list-builds [<args>]',
description='''List builds.
Optional "application-id" or "build-id" parameters can be specified to restrict
the list of builds that are reported by this call.
When the "application-id" parameter is provided this operation returns the list
of builds for that particular application. When the "build-id" parameter is
provided this operation returns the specific build identified by "build-id".
When neither "application-id" or "build-id" are provided this operation returns
all builds.''')
parser.add_argument('--application-id', type=str, required=False, help='Application ID')
parser.add_argument('--build-id', type=str, required=False, help='Build ID')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.list_builds(args.application_id, args.build_id)
def add_build(self, global_args):
'''Add a new build'''
parser = argparse.ArgumentParser(
usage='aps add-build [<args>]',
description='Add a new build')
parser.add_argument('--application-id', type=str, required=True, help='Application ID')
parser.add_argument('--file', type=str, required=False,
help='Build file (apk or xcarchive folder)')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.add_build(args.file, args.application_id)
def delete_build(self, global_args):
'''Delete a build'''
parser = argparse.ArgumentParser(
usage='aps delete-build [<args>]',
description='Delete build')
parser.add_argument('--build-id', type=str, required=True, help='Build ID')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.delete_build(args.build_id)
def protect_start(self, global_args):
'''Start build protection'''
parser = argparse.ArgumentParser(
usage='aps protect-start [<args>]',
description='Initiate protection of a previously added build')
parser.add_argument('--build-id', type=str, required=True, help='Build ID')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.protect_start(args.build_id)
def protect_cancel(self, global_args):
'''Cancel protection of a build'''
parser = argparse.ArgumentParser(
usage='aps protect-cancel [<args>]',
description='Cancel protection of a build.')
parser.add_argument('--build-id', type=str, required=True, help='Build ID')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.protect_cancel(args.build_id)
def protect_get_status(self, global_args):
'''Get the status of a build'''
parser = argparse.ArgumentParser(
usage='aps protect-get-status [<args>]',
description='''Get the status of a build. This includes progress
information when a protection build is ongoing.''')
parser.add_argument('--build-id', type=str, required=True, help='Build ID')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.protect_get_status(args.build_id)
def protect_download(self, global_args):
'''Download a protected build'''
parser = argparse.ArgumentParser(
usage='aps protect-download [<args>]',
description='Download a previously protected build.')
parser.add_argument('--build-id', type=str, required=True, help='Build ID')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.protect_download(args.build_id)
def display_application_package_id(self, global_args):
'''Utility to extract and display the application package id from a file.'''
parser = argparse.ArgumentParser(
usage='aps display_application_package_id [<args>]',
description='''Display the application package id for a input file.
This can be used as input when calling add-application.
''')
parser.add_argument('--file', type=str, required=True,
help='Input file (apk or xcarchive folder)')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.display_application_package_id(args.file)
def set_signing_certificate(self, global_args):
'''Set signing certificate'''
parser = argparse.ArgumentParser(
usage='aps set-signing-certificate [<args>]',
description='''Set signing certificate for an application.''')
parser.add_argument('--application-id', type=str, required=True, help='Application ID')
parser.add_argument('--file', type=str, required=False,
help='PEM encoded certificate file. If omitted then this unsets any previously set certificate')
# inside subcommands ignore the first command_pos argv's
args = parser.parse_args(sys.argv[self.command_pos:])
self.parse_global_args(global_args)
return self.commands.set_signing_certificate(args.application_id, args.file)
if __name__ == '__main__':
Aps()
| 3,888 | 0 | 53 |
9cd14774cef42968c64c6869f09ed35f3728ce7a | 2,379 | py | Python | botx/bots/mixins/collectors.py | ExpressApp/pybotx | 97c8b1ce5d45a05567ed01d545cb43174a2dcbb9 | [
"MIT"
] | 13 | 2021-01-21T12:43:10.000Z | 2022-03-23T11:11:59.000Z | botx/bots/mixins/collectors.py | ExpressApp/pybotx | 97c8b1ce5d45a05567ed01d545cb43174a2dcbb9 | [
"MIT"
] | 259 | 2020-02-26T08:51:03.000Z | 2022-03-23T11:08:36.000Z | botx/bots/mixins/collectors.py | ExpressApp/pybotx | 97c8b1ce5d45a05567ed01d545cb43174a2dcbb9 | [
"MIT"
] | 5 | 2019-12-02T16:19:22.000Z | 2021-11-22T20:33:34.000Z | """Definition for bot's collecting component.
All of this methods are just wrappers around inner collector.
"""
from typing import Any, List, Optional, Sequence
from botx.bots.mixins.collecting.add_handler import AddHandlerMixin
from botx.bots.mixins.collecting.default import DefaultHandlerMixin
from botx.bots.mixins.collecting.handler import HandlerMixin
from botx.bots.mixins.collecting.hidden import HiddenHandlerMixin
from botx.bots.mixins.collecting.system_events import SystemEventsHandlerMixin
from botx.collecting.collectors.collector import Collector
from botx.collecting.handlers.handler import Handler
from botx.dependencies import models as deps
class BotCollectingMixin( # noqa: WPS215
AddHandlerMixin,
HandlerMixin,
DefaultHandlerMixin,
HiddenHandlerMixin,
SystemEventsHandlerMixin,
):
"""Mixin that defines collector-like behaviour."""
collector: Collector
@property
def handlers(self) -> List[Handler]:
"""Get handlers registered on this bot.
Returns:
Registered handlers of bot.
"""
return self.collector.handlers
def include_collector(
self,
collector: Collector,
*,
dependencies: Optional[Sequence[deps.Depends]] = None,
) -> None:
"""Include handlers from collector into bot.
Arguments:
collector: collector from which handlers should be copied.
dependencies: optional sequence of dependencies for handlers for this
collector.
"""
self.collector.include_collector(collector, dependencies=dependencies)
def command_for(self, *args: Any) -> str:
"""Find handler and build a command string using passed body query_params.
Arguments:
args: sequence of elements where first element should be name of handler.
Returns:
Command string.
"""
return self.collector.command_for(*args)
def handler_for(self, name: str) -> Handler:
"""Find handler in handlers of this bot.
Find registered handler using using [botx.collector.Collector.handler_for] of
inner collector.
Arguments:
name: name of handler that should be found.
Returns:
Handler that was found by name.
"""
return self.collector.handler_for(name)
| 30.896104 | 85 | 0.688525 | """Definition for bot's collecting component.
All of this methods are just wrappers around inner collector.
"""
from typing import Any, List, Optional, Sequence
from botx.bots.mixins.collecting.add_handler import AddHandlerMixin
from botx.bots.mixins.collecting.default import DefaultHandlerMixin
from botx.bots.mixins.collecting.handler import HandlerMixin
from botx.bots.mixins.collecting.hidden import HiddenHandlerMixin
from botx.bots.mixins.collecting.system_events import SystemEventsHandlerMixin
from botx.collecting.collectors.collector import Collector
from botx.collecting.handlers.handler import Handler
from botx.dependencies import models as deps
class BotCollectingMixin( # noqa: WPS215
AddHandlerMixin,
HandlerMixin,
DefaultHandlerMixin,
HiddenHandlerMixin,
SystemEventsHandlerMixin,
):
"""Mixin that defines collector-like behaviour."""
collector: Collector
@property
def handlers(self) -> List[Handler]:
"""Get handlers registered on this bot.
Returns:
Registered handlers of bot.
"""
return self.collector.handlers
def include_collector(
self,
collector: Collector,
*,
dependencies: Optional[Sequence[deps.Depends]] = None,
) -> None:
"""Include handlers from collector into bot.
Arguments:
collector: collector from which handlers should be copied.
dependencies: optional sequence of dependencies for handlers for this
collector.
"""
self.collector.include_collector(collector, dependencies=dependencies)
def command_for(self, *args: Any) -> str:
"""Find handler and build a command string using passed body query_params.
Arguments:
args: sequence of elements where first element should be name of handler.
Returns:
Command string.
"""
return self.collector.command_for(*args)
def handler_for(self, name: str) -> Handler:
"""Find handler in handlers of this bot.
Find registered handler using using [botx.collector.Collector.handler_for] of
inner collector.
Arguments:
name: name of handler that should be found.
Returns:
Handler that was found by name.
"""
return self.collector.handler_for(name)
| 0 | 0 | 0 |
7f4f696664f38e804ec5049ca26eb5c74828b503 | 1,455 | py | Python | cds_ils/ldap/cli.py | kpsherva/cds-ils | 8eeeb6e03784756ed24895c8d030682f9d733e8a | [
"MIT"
] | 6 | 2020-09-18T00:13:38.000Z | 2021-11-14T17:12:19.000Z | cds_ils/ldap/cli.py | kpsherva/cds-ils | 8eeeb6e03784756ed24895c8d030682f9d733e8a | [
"MIT"
] | 321 | 2020-08-28T15:42:25.000Z | 2022-03-14T15:11:50.000Z | cds_ils/ldap/cli.py | kpsherva/cds-ils | 8eeeb6e03784756ed24895c8d030682f9d733e8a | [
"MIT"
] | 4 | 2020-08-31T08:55:47.000Z | 2022-01-14T11:30:23.000Z | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2020 CERN.
#
# cds-migrator-kit is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""CDS-ILS ldap users CLI."""
import click
from flask import current_app
from flask.cli import with_appcontext
from invenio_db import db
from .api import delete_users as ldap_delete_users
from .api import import_users as ldap_import_users
from .api import update_users as ldap_update_users
from .models import LdapSynchronizationLog
@click.group()
def ldap_users():
"""Ldap users import CLI."""
@ldap_users.command(name="import")
@with_appcontext
def import_users():
"""Load users from LDAP and import them in DB."""
ldap_import_users()
@ldap_users.command(name="update")
@with_appcontext
def update_users():
"""Load users from LDAP and import new ones or update existing in DB."""
log = LdapSynchronizationLog.create_cli()
try:
result = ldap_update_users()
log.set_succeeded(*result)
except Exception as e:
db.session.rollback()
current_app.logger.exception(e)
log.set_failed(e)
@ldap_users.command(name="delete")
@with_appcontext
def delete_users():
"""Load users from LDAP and delete the ones that are still in the DB."""
try:
ldap_delete_users()
except Exception as e:
current_app.logger.exception(e)
| 25.982143 | 77 | 0.717526 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2020 CERN.
#
# cds-migrator-kit is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""CDS-ILS ldap users CLI."""
import click
from flask import current_app
from flask.cli import with_appcontext
from invenio_db import db
from .api import delete_users as ldap_delete_users
from .api import import_users as ldap_import_users
from .api import update_users as ldap_update_users
from .models import LdapSynchronizationLog
@click.group()
def ldap_users():
"""Ldap users import CLI."""
@ldap_users.command(name="import")
@with_appcontext
def import_users():
"""Load users from LDAP and import them in DB."""
ldap_import_users()
@ldap_users.command(name="update")
@with_appcontext
def update_users():
"""Load users from LDAP and import new ones or update existing in DB."""
log = LdapSynchronizationLog.create_cli()
try:
result = ldap_update_users()
log.set_succeeded(*result)
except Exception as e:
db.session.rollback()
current_app.logger.exception(e)
log.set_failed(e)
@ldap_users.command(name="delete")
@with_appcontext
def delete_users():
"""Load users from LDAP and delete the ones that are still in the DB."""
try:
ldap_delete_users()
except Exception as e:
current_app.logger.exception(e)
| 0 | 0 | 0 |
eb6df24de1ebfc73472eb5e92e6a7151f65ea7fd | 1,354 | py | Python | myDjangoProject/myDjangoProject/SSHConn.py | yueheng-li/pythonLearn | 375028537be08a8ae14104bf439172816017323e | [
"MIT"
] | null | null | null | myDjangoProject/myDjangoProject/SSHConn.py | yueheng-li/pythonLearn | 375028537be08a8ae14104bf439172816017323e | [
"MIT"
] | null | null | null | myDjangoProject/myDjangoProject/SSHConn.py | yueheng-li/pythonLearn | 375028537be08a8ae14104bf439172816017323e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import paramiko
import logging
| 27.632653 | 89 | 0.718612 | # -*- coding: utf-8 -*-
import paramiko
import logging
class SSHConn:
logger = logging.getLogger('scripts')
def __init__(self, hostname, port, username, password):
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.client.connect(hostname=hostname, port=port, username=username, password=password)
self.transport = self.client.get_transport()
self.channel = self.transport.open_session()
def moveFile(self, lpath, repath):
try:
sftp = paramiko.SFTPClient.from_transport(self.transport)
s = sftp.put(localpath=lpath, remotepath=repath)
except Exception, e:
logger.error(e)
return 'Exec failure'
return s
def close(self):
self.client.close()
def execCommand(self, command):
try:
logger.info("command : " + command)
stdin, stdout, stderr = self.client.exec_command(command)
except Exception, e:
logger.error(e)
return 'Exec failure'
return stdout.read(), stderr.read()
def extensionFileName(remote_dir):
d = {}
logger.info("remote_dir : " + remote_dir)
sftp = paramiko.SFTPClient.from_transport(self.transport)
logger.info("remote_dir : " + remote_dir)
fn = sftp.listdir(remote_dir) #这里需要注意,列出远程文件必须使用sftp,而不能用os
for name in fn:
print name
if name.find('.war'):
d['path'] = os.path.join(remote_dir, name)
return d | 1,166 | 154 | 23 |
91d9922392d11c99a25f3dd872cb144761ea21f3 | 1,732 | py | Python | pytdx/pytdx/parser/get_company_info_content.py | iamweilee/nodetdx | 5048cb25da6aae18e3204fa365e92866c0914340 | [
"MIT"
] | 7 | 2021-06-27T11:42:00.000Z | 2022-03-15T15:01:29.000Z | pytdx/pytdx/parser/get_company_info_content.py | hitrading/nodetdx | 5048cb25da6aae18e3204fa365e92866c0914340 | [
"MIT"
] | null | null | null | pytdx/pytdx/parser/get_company_info_content.py | hitrading/nodetdx | 5048cb25da6aae18e3204fa365e92866c0914340 | [
"MIT"
] | 2 | 2021-07-08T03:44:41.000Z | 2021-09-15T00:41:19.000Z | # coding=utf-8
from pytdx.parser.base import BaseParser
from pytdx.helper import get_datetime, get_volume, get_price
from collections import OrderedDict
import struct
import six
import zlib
| 34.64 | 264 | 0.691109 | # coding=utf-8
from pytdx.parser.base import BaseParser
from pytdx.helper import get_datetime, get_volume, get_price
from collections import OrderedDict
import struct
import six
import zlib
class GetCompanyInfoContent(BaseParser):
def setParams(self, market, code, filename, start, length):
if type(code) is six.text_type:
code = code.encode("utf-8")
if type(filename) is six.text_type:
filename = filename.encode("utf-8")
if len(filename) != 80:
filename = filename.ljust(78, b'\x00')+b'\x30\x30'
pkg = bytearray.fromhex(u'0c 03 10 9c 00 01 68 00 68 00 d0 02')
pkg.extend(struct.pack(u"<H6sH80sIII", market, code, 0, filename, start, length, 0))
# pkg=bytearray.fromhex(u'0c04109c000168006800d002000030303030303102003030303030312e74787400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003030af20000073a9000000000000')
self.send_pkg = pkg
def parseResponse(self, body_buf):
pos = 0
_, length = struct.unpack(u'<10sH', body_buf[:12])
pos += 12
content = body_buf[pos: pos + length]
# print(len(content))
return content.decode('GBK', 'ignore')
def ggc(hex):
response = bytearray.fromhex(hex)
# print(len(response))
head_buf = response[:0x10]
_, _, _, zipsize, unzipsize = struct.unpack("<IIIHH", head_buf)
body_buf = bytearray()
body_buf.extend(response[0x10:])
# print(len(body_buf))
unziped_data = zlib.decompress(body_buf)
# print(len(unziped_data))
body_buf = unziped_data
gbk=GetCompanyInfoContent('').parseResponse(body_buf)
print(gbk) | 1,422 | 19 | 100 |
7d8dbfd1ee273cc85c76d1e6dd4508382ec92620 | 5,927 | py | Python | wsrpc/__init__.py | guyingbo/websocket-rpc | 326765cd660b5ae4c77cea456ca774deae80e9f6 | [
"MIT"
] | 8 | 2018-01-22T12:03:13.000Z | 2021-07-06T15:24:48.000Z | wsrpc/__init__.py | guyingbo/websocket-rpc | 326765cd660b5ae4c77cea456ca774deae80e9f6 | [
"MIT"
] | 2 | 2019-10-22T17:43:15.000Z | 2020-04-27T15:09:36.000Z | wsrpc/__init__.py | guyingbo/websocket-rpc | 326765cd660b5ae4c77cea456ca774deae80e9f6 | [
"MIT"
] | 1 | 2020-06-08T19:34:13.000Z | 2020-06-08T19:34:13.000Z | """msgpack rpc over websockets"""
import asyncio
import inspect
import itertools
import logging
import typing
import msgpack # type: ignore
logger = logging.getLogger(__name__)
__version__ = "0.0.6"
| 31.359788 | 86 | 0.596254 | """msgpack rpc over websockets"""
import asyncio
import inspect
import itertools
import logging
import typing
import msgpack # type: ignore
logger = logging.getLogger(__name__)
__version__ = "0.0.6"
class RPCError(Exception):
pass
class RemoteCallError(Exception):
pass
class WebsocketRPC:
REQUEST = 0
RESPONSE = 1
NOTIFY = 2
client_task: typing.Optional[asyncio.Future]
def __init__(
self,
ws,
handler_cls: type = None,
*,
client_mode: bool = False,
timeout: int = 10,
http_request=None,
method_prefix: str = "rpc_"
):
self.ws = ws
self.timeout = timeout
self._packer = msgpack.Packer(use_bin_type=1)
self._request_table: typing.Dict[int, asyncio.Future] = {}
self._tasks: typing.Set[asyncio.Future] = set()
self.notify = NotifyProxy(self)
self.request = RequestProxy(self)
self._iter = itertools.count()
self.client_mode = client_mode
self.max_id = 2 ** 32
self.http_request = http_request
self.method_prefix = method_prefix
self.handler = handler_cls(self) if handler_cls else None
self._exc_handlers: typing.List[typing.Callable] = []
if self.client_mode:
self.client_task = asyncio.ensure_future(self.run())
else:
self.client_task = None
def _next_msgid(self) -> int:
i = next(self._iter)
if i < self.max_id:
return i
self._iter = itertools.count()
return self._next_msgid()
async def run(self) -> None:
async for data in self.ws:
try:
await self._on_data(data)
except Exception as e:
logger.exception(e)
for exc_handler in self._exc_handlers:
if asyncio.iscoroutinefunction(exc_handler):
await exc_handler(e)
else:
exc_handler(e)
try:
await asyncio.shield(self._join())
except asyncio.CancelledError:
await self._join()
async def close(self) -> None:
if self.client_mode:
await self.ws.close()
if self.client_task:
await self.client_task
async def _join(self) -> None:
if self._tasks:
await asyncio.wait(self._tasks, timeout=self.timeout)
def exception(self, func: typing.Callable) -> None:
self._exc_handlers.append(func)
async def _on_data(self, data: bytes) -> None:
msg = msgpack.unpackb(data)
assert type(msg) == list, "unknown message format"
assert len(msg) > 0, "error message length"
msgtype = msg[0]
task: typing.Optional[asyncio.Future]
if msgtype == self.REQUEST:
msgid, method_name, params = msg[1:]
method_name = method_name
task = asyncio.ensure_future(self._on_request(msgid, method_name, params))
elif msgtype == self.RESPONSE:
msgid, error, result = msg[1:]
self._on_response(msgid, error, result)
task = None
elif msgtype == self.NOTIFY:
method_name, params = msg[1:]
method_name = method_name
task = asyncio.ensure_future(self._on_notify(method_name, params))
else:
raise RPCError("unknown msgtype")
if task:
self._tasks.add(task)
task.add_done_callback(self._tasks.remove)
async def _on_request(self, msgid: int, method_name: str, params: tuple) -> None:
try:
method_name = self.method_prefix + method_name
method = getattr(self.handler, method_name)
result = method(*params)
# if asyncio.iscoroutine(result):
if inspect.isawaitable(result):
result = await result
except Exception as e:
await self._send_response(msgid, 1, str(e))
else:
await self._send_response(msgid, 0, result)
def _on_response(self, msgid: int, error: int, result) -> None:
fut = self._request_table.pop(msgid)
if error == 0:
fut.set_result(result)
else:
fut.set_exception(RemoteCallError(error, result))
async def _on_notify(self, method_name: str, params: tuple) -> None:
method_name = self.method_prefix + method_name
method = getattr(self.handler, method_name)
result = method(*params)
# if asyncio.iscoroutine(result):
if inspect.isawaitable(result):
result = await result
async def _send_response(self, msgid: int, error: int, result) -> None:
message = [self.RESPONSE, msgid, error, result]
data = self._packer.pack(message)
await self.ws.send(data)
async def _send_request(self, method: str, params: tuple) -> typing.Any:
msgid = self._next_msgid()
message = [self.REQUEST, msgid, method, params]
data = self._packer.pack(message)
fut: asyncio.Future = asyncio.Future()
self._request_table[msgid] = fut
await self.ws.send(data)
return await fut
async def _send_notify(self, method: str, params: tuple) -> None:
message = [self.NOTIFY, method, params]
data = self._packer.pack(message)
await self.ws.send(data)
class NotifyProxy:
__slots__ = ("rpc",)
def __init__(self, rpc: WebsocketRPC):
self.rpc = rpc
def __getattr__(self, name: str):
async def func(*args):
await self.rpc._send_notify(name, args)
return func
class RequestProxy:
__slots__ = ("rpc",)
def __init__(self, rpc: WebsocketRPC):
self.rpc = rpc
def __getattr__(self, name: str):
async def func(*args):
return await self.rpc._send_request(name, args)
return func
| 4,971 | 634 | 115 |
c945c07012decbd51269c28e92ef14a6644a2d2e | 1,279 | py | Python | tests/neurofire/datasets/isbi/loaders/master.py | nasimrahaman/neurofire | 4d645be149165da4c8202fe13b3c2360c2832383 | [
"MIT"
] | 9 | 2018-01-29T07:30:14.000Z | 2022-03-09T04:23:14.000Z | tests/neurofire/datasets/isbi/loaders/master.py | nasimrahaman/neurofire | 4d645be149165da4c8202fe13b3c2360c2832383 | [
"MIT"
] | 1 | 2017-12-19T14:27:18.000Z | 2017-12-19T15:35:59.000Z | tests/neurofire/datasets/isbi/loaders/master.py | nasimrahaman/neurofire | 4d645be149165da4c8202fe13b3c2360c2832383 | [
"MIT"
] | 5 | 2018-01-27T12:16:37.000Z | 2020-01-20T13:14:26.000Z | import os
import unittest
if __name__ == '__main__':
TestMaster().test_master()
| 36.542857 | 96 | 0.621579 | import os
import unittest
class TestMaster(unittest.TestCase):
DATA_CONFIG = os.path.join(os.path.dirname(__file__), 'data_config_test.yml')
PLOT_DIRECTORY = os.path.join(os.path.dirname(__file__), 'plots')
def test_master(self):
from neurofire.datasets.isbi2012.loaders.master import ISBI2012Dataset
from inferno.utils.io_utils import print_tensor
dataset = ISBI2012Dataset.from_config(self.DATA_CONFIG)
# Get from dataset
batch = dataset[0]
# Validate
self.assertEqual(len(batch), 2)
for _batch in batch:
self.assertEqual(list(_batch.size()), [1, 576, 576]) # batch axis added by loader
# Print to file
if os.path.exists(self.PLOT_DIRECTORY):
assert os.path.isdir(self.PLOT_DIRECTORY)
else:
os.mkdir(self.PLOT_DIRECTORY)
print_tensor(tensor=batch[0].numpy()[None, ...],
prefix='RAW',
directory=self.PLOT_DIRECTORY)
print_tensor(tensor=batch[1].numpy()[None, ...],
prefix='MEM',
directory=self.PLOT_DIRECTORY)
print("Plots printed to {}.".format(self.PLOT_DIRECTORY))
if __name__ == '__main__':
TestMaster().test_master()
| 976 | 194 | 23 |
8f92f89adceb8143275e8758b8bfe37c82df0da9 | 481 | py | Python | setup.py | DiegoLigtenberg/Workspace-MasterThesis-MSS | e8183031b6223051049f48e0da2bc2824e60239e | [
"MIT"
] | null | null | null | setup.py | DiegoLigtenberg/Workspace-MasterThesis-MSS | e8183031b6223051049f48e0da2bc2824e60239e | [
"MIT"
] | null | null | null | setup.py | DiegoLigtenberg/Workspace-MasterThesis-MSS | e8183031b6223051049f48e0da2bc2824e60239e | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name="mss_project",
version="1.0",
author="Diego Ligtenberg",
author_email="diegoligtenberg@gmail.com",
description="Master Thesis about Music Source Separation, and Instrument classification",
license="MIT",
long_description=("README"),
packages=find_packages()
)
# print(find_packages())
# can install this with: pip install -e .
# can uninstall this with: pip uninstall mss_project
| 30.0625 | 93 | 0.711019 | from setuptools import setup, find_packages
setup(
name="mss_project",
version="1.0",
author="Diego Ligtenberg",
author_email="diegoligtenberg@gmail.com",
description="Master Thesis about Music Source Separation, and Instrument classification",
license="MIT",
long_description=("README"),
packages=find_packages()
)
# print(find_packages())
# can install this with: pip install -e .
# can uninstall this with: pip uninstall mss_project
| 0 | 0 | 0 |
f230d13b83b27eab0a372096cee774eb318e7ef3 | 76 | py | Python | djangobotcfg/__init__.py | henriquebastos/django-buildmaster | a3169509e2a8fb2623cde0a99c4087ef04b0be38 | [
"BSD-3-Clause"
] | 1 | 2018-04-18T20:18:59.000Z | 2018-04-18T20:18:59.000Z | djangobotcfg/__init__.py | henriquebastos/django-buildmaster | a3169509e2a8fb2623cde0a99c4087ef04b0be38 | [
"BSD-3-Clause"
] | null | null | null | djangobotcfg/__init__.py | henriquebastos/django-buildmaster | a3169509e2a8fb2623cde0a99c4087ef04b0be38 | [
"BSD-3-Clause"
] | null | null | null | from . import builders, buildsteps, changesource, schedulers, slaves, status | 76 | 76 | 0.815789 | from . import builders, buildsteps, changesource, schedulers, slaves, status | 0 | 0 | 0 |
3f84395d2f49e57e7b6548b518e2f701e3b76824 | 236 | py | Python | backend/reports/admin.py | nikhilkutinha/athena | dd46eac06ed3bf2d24e4c0882c1a79adca8025b5 | [
"MIT"
] | 1 | 2021-02-12T10:25:03.000Z | 2021-02-12T10:25:03.000Z | backend/reports/admin.py | nikhilkutinha/athena | dd46eac06ed3bf2d24e4c0882c1a79adca8025b5 | [
"MIT"
] | null | null | null | backend/reports/admin.py | nikhilkutinha/athena | dd46eac06ed3bf2d24e4c0882c1a79adca8025b5 | [
"MIT"
] | null | null | null | from django.contrib import admin
from . models import Region, Report
admin.site.register(Region, RegionAdmin)
admin.site.register(Report)
| 23.6 | 57 | 0.771186 | from django.contrib import admin
from . models import Region, Report
class RegionAdmin(admin.ModelAdmin):
list_display = ('name', 'get_type_display', 'parent')
admin.site.register(Region, RegionAdmin)
admin.site.register(Report)
| 0 | 73 | 23 |
df8b64d264ab4c00a24b54e27a1d1471a8499466 | 2,390 | py | Python | tardis/configuration/configuration.py | stwunsch/tardis | cfc01a7a3b2e58e7e88653dd8d119a724e27d620 | [
"MIT"
] | 11 | 2019-06-06T14:44:56.000Z | 2021-12-17T19:46:18.000Z | tardis/configuration/configuration.py | stwunsch/tardis | cfc01a7a3b2e58e7e88653dd8d119a724e27d620 | [
"MIT"
] | 165 | 2019-04-26T09:31:19.000Z | 2022-03-09T16:45:45.000Z | tardis/configuration/configuration.py | stwunsch/tardis | cfc01a7a3b2e58e7e88653dd8d119a724e27d620 | [
"MIT"
] | 12 | 2019-06-06T14:06:15.000Z | 2021-12-21T12:31:03.000Z | from ..interfaces.borg import Borg
from ..utilities.attributedict import AttributeDict
from ..utilities.attributedict import convert_to_attribute_dict
# Need to import all pyyaml loadable classes (bootstrapping problem) FIX ME
from ..utilities.executors import * # noqa: F403, F401
from ..utilities.simulators import * # noqa: F403, F401
from cobald.daemon.config.mapping import Translator
from cobald.daemon.plugins import constraints as plugin_constraints
from base64 import b64encode
import os
import yaml
@plugin_constraints(before={"pipeline"})
| 36.769231 | 81 | 0.667364 | from ..interfaces.borg import Borg
from ..utilities.attributedict import AttributeDict
from ..utilities.attributedict import convert_to_attribute_dict
# Need to import all pyyaml loadable classes (bootstrapping problem) FIX ME
from ..utilities.executors import * # noqa: F403, F401
from ..utilities.simulators import * # noqa: F403, F401
from cobald.daemon.config.mapping import Translator
from cobald.daemon.plugins import constraints as plugin_constraints
from base64 import b64encode
import os
import yaml
def translate_config(obj):
if isinstance(obj, AttributeDict):
translated_obj = AttributeDict(obj)
for key, value in obj.items():
if key == "user_data": # base64 encode user data
with open(os.path.join(os.getcwd(), obj[key]), "rb") as f:
translated_obj[key] = b64encode(f.read())
elif key == "__type__": # do legacy object initialisation
return Translator().translate_hierarchy(obj)
else:
translated_obj[key] = translate_config(value)
return translated_obj
elif isinstance(obj, list):
return [translate_config(item) for item in obj]
else:
return obj
@plugin_constraints(before={"pipeline"})
class Configuration(Borg):
_shared_state = AttributeDict()
def __init__(self, configuration: [str, dict] = None):
super(Configuration, self).__init__()
if configuration:
if isinstance(configuration, str): # interpret string as file name
self.load_config(configuration)
else:
self.update_config(configuration)
def load_config(self, config_file: str) -> None:
"""
Loads YAML configuration file into shared state of the configuration borg
:param config_file: The name of the configuration file to be loaded
:type config_file: str
"""
with open(config_file, "r") as config_file:
self.update_config(yaml.safe_load(config_file))
def update_config(self, configuration: dict):
"""
Updates the shared state of the configuration borg
:param configuration: Dictionary containing the configuration
:type configuration: dict
"""
self._shared_state.update(
translate_config(convert_to_attribute_dict(configuration))
)
| 986 | 801 | 45 |
e8a18d18e4d026ba92c527e07f4aab2ed6809c1d | 1,261 | py | Python | backup/main.py | ProgMeli/backup-mega | c7d7c3e0eaa2f416ed0bf596745b19f31b4f5f42 | [
"BSD-2-Clause"
] | null | null | null | backup/main.py | ProgMeli/backup-mega | c7d7c3e0eaa2f416ed0bf596745b19f31b4f5f42 | [
"BSD-2-Clause"
] | null | null | null | backup/main.py | ProgMeli/backup-mega | c7d7c3e0eaa2f416ed0bf596745b19f31b4f5f42 | [
"BSD-2-Clause"
] | null | null | null | import os
import subprocess
from datetime import datetime
from os.path import join
from pathlib import Path
from backup.build_files import zipping_files
| 29.325581 | 101 | 0.66931 | import os
import subprocess
from datetime import datetime
from os.path import join
from pathlib import Path
from backup.build_files import zipping_files
def backup():
home_folder = str(Path.home())
path_conf = join(home_folder, "backup-mega.txt")
now = datetime.now()
output_file = '/tmp/backup/{}-{}-{}_{}:{}:{}.zip'.format(
now.date().day,
now.date().month,
now.date().year,
now.time().hour,
now.time().minute,
now.time().second,
)
with open(path_conf) as f:
conf_file = f.read()
zipping_files(conf_file.split('\n'), output_file)
save_backup_in_storage(backup_path=output_file)
def save_backup_in_storage(storage: str='mega', backup_path: str='/tmp/backup/'):
if storage == 'mega':
return save_in_mega(backup_path)
return None
def save_in_mega(backup_path: str):
if not mega_is_exists_backup_folder():
os.system('megamkdir /Root/Backup')
os.system('megacopy --local {} --remote {}'.format(os.path.dirname(backup_path), '/Root/Backup'))
def mega_is_exists_backup_folder() -> bool:
folders = subprocess.Popen("megals /Root".split(), stdout=subprocess.PIPE)
return b'/Root\n/Root/Backup\n' in folders.communicate(timeout=20)
| 1,012 | 0 | 92 |
120369d341d9b59da985a66835a1ca1e27100fb4 | 3,626 | py | Python | src/sparsezoo/requests/get.py | signalism/sparsezoo | 5ca44f8cb514e80844034920d743baba97279ec2 | [
"Apache-2.0"
] | 116 | 2021-02-04T17:51:22.000Z | 2022-03-25T03:15:19.000Z | src/sparsezoo/requests/get.py | PIlotcnc/new | 6e6413632de01f6acf691dca8fadb84f841444b9 | [
"Apache-2.0"
] | 15 | 2021-02-13T12:00:40.000Z | 2022-03-17T18:44:54.000Z | src/sparsezoo/requests/get.py | PIlotcnc/new | 6e6413632de01f6acf691dca8fadb84f841444b9 | [
"Apache-2.0"
] | 11 | 2021-02-04T22:20:47.000Z | 2021-12-03T12:20:09.000Z | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Code related to wrapping around API calls under api.neuralmagic.com/[object]/get
"""
import logging
from typing import Dict, Union
import requests
from sparsezoo.requests.authentication import get_auth_header
from sparsezoo.requests.base import MODELS_API_URL, RECIPES_API_URL, ModelArgs
__all__ = ["get_request", "get_model_get_request", "get_recipe_get_request", "GET_PATH"]
_LOGGER = logging.getLogger(__name__)
GET_PATH = "get"
def get_request(
base_url: str,
args: Union[ModelArgs, str],
sub_path: Union[str, None] = None,
force_token_refresh: bool = False,
) -> Dict:
"""
Get an object from the sparsezoo for any objects matching the args.
The path called has structure:
[base_url]/get/[args.stub]/{sub_path}
:param base_url: the base url of the request
:param args: the args describing what should be retrieved
:param file_name: the sub path from the model path if any e.g.
file_name for models api or recipe_type for the recipes api
:param force_token_refresh: True to refresh the auth token, False otherwise
:return: the json response as a dict
"""
header = get_auth_header(force_token_refresh=force_token_refresh)
path = args if isinstance(args, str) else args.stub
url = f"{base_url}/{GET_PATH}/{path}"
if sub_path:
url = f"{url}/{sub_path}"
if hasattr(args, "release_version") and args.release_version:
url = f"{url}?release_version={args.release_version}"
_LOGGER.debug(f"GET download from {url}")
response = requests.get(url=url, headers=header)
response.raise_for_status()
response_json = response.json()
return response_json
def get_model_get_request(
args: Union[ModelArgs, str],
file_name: Union[str, None] = None,
force_token_refresh: bool = False,
) -> Dict:
"""
Get a model from the sparsezoo for any objects matching the args
:param args: the model args describing what should be retrieved for
:param file_name: the name of the file, if any, to get model info for
:param force_token_refresh: True to refresh the auth token, False otherwise
:return: the json response as a dict
"""
return get_request(
MODELS_API_URL,
args=args,
sub_path=file_name,
force_token_refresh=force_token_refresh,
)
def get_recipe_get_request(
args: Union[ModelArgs, str],
recipe_type: Union[str, None] = None,
force_token_refresh: bool = False,
):
"""
Get a recipe from the sparsezoo for any objects matching the args
:param args: the model args describing what should be retrieved for
:param recipe_type: the recipe_type to get recipe info for if not original
:param force_token_refresh: True to refresh the auth token, False otherwise
:return: the json response as a dict
"""
return get_request(
base_url=RECIPES_API_URL,
args=args,
sub_path=recipe_type,
force_token_refresh=force_token_refresh,
)
| 32.088496 | 88 | 0.714286 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Code related to wrapping around API calls under api.neuralmagic.com/[object]/get
"""
import logging
from typing import Dict, Union
import requests
from sparsezoo.requests.authentication import get_auth_header
from sparsezoo.requests.base import MODELS_API_URL, RECIPES_API_URL, ModelArgs
__all__ = ["get_request", "get_model_get_request", "get_recipe_get_request", "GET_PATH"]
_LOGGER = logging.getLogger(__name__)
GET_PATH = "get"
def get_request(
base_url: str,
args: Union[ModelArgs, str],
sub_path: Union[str, None] = None,
force_token_refresh: bool = False,
) -> Dict:
"""
Get an object from the sparsezoo for any objects matching the args.
The path called has structure:
[base_url]/get/[args.stub]/{sub_path}
:param base_url: the base url of the request
:param args: the args describing what should be retrieved
:param file_name: the sub path from the model path if any e.g.
file_name for models api or recipe_type for the recipes api
:param force_token_refresh: True to refresh the auth token, False otherwise
:return: the json response as a dict
"""
header = get_auth_header(force_token_refresh=force_token_refresh)
path = args if isinstance(args, str) else args.stub
url = f"{base_url}/{GET_PATH}/{path}"
if sub_path:
url = f"{url}/{sub_path}"
if hasattr(args, "release_version") and args.release_version:
url = f"{url}?release_version={args.release_version}"
_LOGGER.debug(f"GET download from {url}")
response = requests.get(url=url, headers=header)
response.raise_for_status()
response_json = response.json()
return response_json
def get_model_get_request(
args: Union[ModelArgs, str],
file_name: Union[str, None] = None,
force_token_refresh: bool = False,
) -> Dict:
"""
Get a model from the sparsezoo for any objects matching the args
:param args: the model args describing what should be retrieved for
:param file_name: the name of the file, if any, to get model info for
:param force_token_refresh: True to refresh the auth token, False otherwise
:return: the json response as a dict
"""
return get_request(
MODELS_API_URL,
args=args,
sub_path=file_name,
force_token_refresh=force_token_refresh,
)
def get_recipe_get_request(
args: Union[ModelArgs, str],
recipe_type: Union[str, None] = None,
force_token_refresh: bool = False,
):
"""
Get a recipe from the sparsezoo for any objects matching the args
:param args: the model args describing what should be retrieved for
:param recipe_type: the recipe_type to get recipe info for if not original
:param force_token_refresh: True to refresh the auth token, False otherwise
:return: the json response as a dict
"""
return get_request(
base_url=RECIPES_API_URL,
args=args,
sub_path=recipe_type,
force_token_refresh=force_token_refresh,
)
| 0 | 0 | 0 |
0a9e5488d671080d52c88d3170387721e61d8893 | 4,851 | py | Python | data_filter.py | linmengsysu/isonode | e471def853adbcbc25ef169c178510bbcddeab38 | [
"MIT"
] | 1 | 2022-01-28T23:59:24.000Z | 2022-01-28T23:59:24.000Z | data_filter.py | linmengsysu/isonode | e471def853adbcbc25ef169c178510bbcddeab38 | [
"MIT"
] | null | null | null | data_filter.py | linmengsysu/isonode | e471def853adbcbc25ef169c178510bbcddeab38 | [
"MIT"
] | null | null | null | # from pytorch_transformers import *
# from sklearn.preprocessing import OneHotEncoder, LabelBinarizer, MultiLabelBinarizer
# from data import *
# import gensim
# from gensim.models import Word2Vec
# from tqdm import tqdm
# from tqdm import tqdm_notebook as tqdm # Comment this line if using jupyter notebook
from scipy import io as sio
import csv
import argparse
from collections import defaultdict
import os
parser = argparse.ArgumentParser(description='Preprocess ACM Data')
'''
Dataset arguments
'''
parser.add_argument('--data_dir', type=str, default='./data/',
help='The address to store the original data directory.')
parser.add_argument('--output_dir', type=str, default='./data/',
help='The address to output the preprocessed graph.')
parser.add_argument('--cuda', type=int, default=0,
help='Avaiable GPU ID')
parser.add_argument('--domain', type=str, default='citation-acm-v8',
help='CS, Medical or All: _CS or _Med or (empty)')
# parser.add_argument('--citation_bar', type=int, default=1,
# help='Only consider papers with citation larger than (2020 - year) * citation_bar')
args = parser.parse_args()
paper_info = {}
cite_info = defaultdict(lambda: [])
pi = {'abstract':'', 'year':None}
with open(os.path.join(args.data_dir, '%s.txt'%args.domain), 'r', encoding='utf-8') as fin:
for l in fin:
l = l.strip('\n')
if l[:2]=='#*':
pi['title'] = l[2:].strip()
elif l[:2] == '#@':
pi['authors'] = l[2:].strip()
elif l[:2] == '#t':
pi['year'] = l[2:].strip()
elif l[:2] == '#c':
pi['conf'] = l[2:].strip()
elif l[:6] == '#index':
pi['id'] = l[6:]
elif l[:2] == '#%': #ref
cite_info[pi['id']].append(l[2:])
elif l[:2] == '#!': #abstract
pi['abstract'] = l[2:].strip()
elif l == '':
# print(pi)
paper_info[pi['id']] = pi.strip()
pi = {'abstract':'', 'year':None}
'''
use three file to store preprocessed data into tsv file
paper_info -> {paper_id, paper_title, paper_abstract, paper_year}
paper_cite -> {paper_id, paper_id} done
paper_venue -> {paper_id, conf_id}
conf -> {conf, conf_id}
author -> {author, author_id}
paper_author_info -> {paper_id, author_id}
'''
author_index_dict = {}
conf_index_dict = {}
for pid in paper_info:
# print(paper_info[pid])
try:
authors = [a.strip() for a in paper_info[pid]['authors'].split(',') if len(a.strip())>3]
paper_info[pid]['authors'] = authors
for a in authors:
if a not in author_index_dict:
author_index_dict[a] = len(author_index_dict)
conf = paper_info[pid]['conf']
if conf not in conf_index_dict:
conf_index_dict[conf] = len(conf_index_dict)
except:
continue
valid_paper = []
with open(os.path.join(args.data_dir, '%s_paper_info.tsv'% args.domain), 'w') as tsvfile:
for pid in paper_info:
if 'conf' in paper_info[pid] and 'title' in paper_info[pid]:
if paper_info[pid]['conf'] in conf_index_dict:
if 'abstract' not in paper_info[pid]:
paper_info[pid]['abstract'] = ''
tsvfile.write('{}\t{}\t{}\t{}\n'.format(pid, paper_info[pid]['title'], paper_info[pid]['abstract'], paper_info[pid]['year']))
valid_paper.append(pid)
print('paper info finished')
with open(os.path.join(args.data_dir, '%s_paper_conf.tsv'% args.domain), 'w') as tsvfile:
for pid in valid_paper:
tsvfile.write('{}\t{}\n'.format(pid, conf_index_dict[paper_info[pid]['conf']]))
print('paper conf finished')
with open(os.path.join(args.data_dir, '%s_paper_author.tsv'% args.domain), 'w') as tsvfile:
for pid in valid_paper:
if 'authors' in paper_info[pid]:
for author in paper_info[pid]['authors']:
tsvfile.write('{}\t{}\n'.format(pid, author_index_dict[author]))
print('paper author finished')
with open(os.path.join(args.data_dir, '%s_author.tsv'% args.domain), 'w') as tsvfile:
for author in author_index_dict:
if author != '':
tsvfile.write('{}\t{}\n'.format(author, author_index_dict[author]))
print('author finished')
with open(os.path.join(args.data_dir, '%s_conf.tsv'% args.domain), 'w') as tsvfile:
for conf in conf_index_dict:
tsvfile.write('{}\t{}\n'.format(conf, conf_index_dict[conf]))
print('conf finished')
with open(os.path.join(args.data_dir, '%s_paper_cite.tsv'% args.domain), 'w') as tsvfile:
for pid in valid_paper:
cited = '\t'.join(cite_info[pid])
tsvfile.write('{}\t{}\n'.format(pid, cited))
print('paper cite finished')
| 37.604651 | 141 | 0.603793 | # from pytorch_transformers import *
# from sklearn.preprocessing import OneHotEncoder, LabelBinarizer, MultiLabelBinarizer
# from data import *
# import gensim
# from gensim.models import Word2Vec
# from tqdm import tqdm
# from tqdm import tqdm_notebook as tqdm # Comment this line if using jupyter notebook
from scipy import io as sio
import csv
import argparse
from collections import defaultdict
import os
parser = argparse.ArgumentParser(description='Preprocess ACM Data')
'''
Dataset arguments
'''
parser.add_argument('--data_dir', type=str, default='./data/',
help='The address to store the original data directory.')
parser.add_argument('--output_dir', type=str, default='./data/',
help='The address to output the preprocessed graph.')
parser.add_argument('--cuda', type=int, default=0,
help='Avaiable GPU ID')
parser.add_argument('--domain', type=str, default='citation-acm-v8',
help='CS, Medical or All: _CS or _Med or (empty)')
# parser.add_argument('--citation_bar', type=int, default=1,
# help='Only consider papers with citation larger than (2020 - year) * citation_bar')
args = parser.parse_args()
paper_info = {}
cite_info = defaultdict(lambda: [])
pi = {'abstract':'', 'year':None}
with open(os.path.join(args.data_dir, '%s.txt'%args.domain), 'r', encoding='utf-8') as fin:
for l in fin:
l = l.strip('\n')
if l[:2]=='#*':
pi['title'] = l[2:].strip()
elif l[:2] == '#@':
pi['authors'] = l[2:].strip()
elif l[:2] == '#t':
pi['year'] = l[2:].strip()
elif l[:2] == '#c':
pi['conf'] = l[2:].strip()
elif l[:6] == '#index':
pi['id'] = l[6:]
elif l[:2] == '#%': #ref
cite_info[pi['id']].append(l[2:])
elif l[:2] == '#!': #abstract
pi['abstract'] = l[2:].strip()
elif l == '':
# print(pi)
paper_info[pi['id']] = pi.strip()
pi = {'abstract':'', 'year':None}
'''
use three file to store preprocessed data into tsv file
paper_info -> {paper_id, paper_title, paper_abstract, paper_year}
paper_cite -> {paper_id, paper_id} done
paper_venue -> {paper_id, conf_id}
conf -> {conf, conf_id}
author -> {author, author_id}
paper_author_info -> {paper_id, author_id}
'''
author_index_dict = {}
conf_index_dict = {}
for pid in paper_info:
# print(paper_info[pid])
try:
authors = [a.strip() for a in paper_info[pid]['authors'].split(',') if len(a.strip())>3]
paper_info[pid]['authors'] = authors
for a in authors:
if a not in author_index_dict:
author_index_dict[a] = len(author_index_dict)
conf = paper_info[pid]['conf']
if conf not in conf_index_dict:
conf_index_dict[conf] = len(conf_index_dict)
except:
continue
valid_paper = []
with open(os.path.join(args.data_dir, '%s_paper_info.tsv'% args.domain), 'w') as tsvfile:
for pid in paper_info:
if 'conf' in paper_info[pid] and 'title' in paper_info[pid]:
if paper_info[pid]['conf'] in conf_index_dict:
if 'abstract' not in paper_info[pid]:
paper_info[pid]['abstract'] = ''
tsvfile.write('{}\t{}\t{}\t{}\n'.format(pid, paper_info[pid]['title'], paper_info[pid]['abstract'], paper_info[pid]['year']))
valid_paper.append(pid)
print('paper info finished')
with open(os.path.join(args.data_dir, '%s_paper_conf.tsv'% args.domain), 'w') as tsvfile:
for pid in valid_paper:
tsvfile.write('{}\t{}\n'.format(pid, conf_index_dict[paper_info[pid]['conf']]))
print('paper conf finished')
with open(os.path.join(args.data_dir, '%s_paper_author.tsv'% args.domain), 'w') as tsvfile:
for pid in valid_paper:
if 'authors' in paper_info[pid]:
for author in paper_info[pid]['authors']:
tsvfile.write('{}\t{}\n'.format(pid, author_index_dict[author]))
print('paper author finished')
with open(os.path.join(args.data_dir, '%s_author.tsv'% args.domain), 'w') as tsvfile:
for author in author_index_dict:
if author != '':
tsvfile.write('{}\t{}\n'.format(author, author_index_dict[author]))
print('author finished')
with open(os.path.join(args.data_dir, '%s_conf.tsv'% args.domain), 'w') as tsvfile:
for conf in conf_index_dict:
tsvfile.write('{}\t{}\n'.format(conf, conf_index_dict[conf]))
print('conf finished')
with open(os.path.join(args.data_dir, '%s_paper_cite.tsv'% args.domain), 'w') as tsvfile:
for pid in valid_paper:
cited = '\t'.join(cite_info[pid])
tsvfile.write('{}\t{}\n'.format(pid, cited))
print('paper cite finished')
| 0 | 0 | 0 |
53535af0d4e9f21ee100b03504a8ad034178c9c9 | 738 | py | Python | planners/mouseSampler.py | jkwang1992/rrdt | 5579081fc53f56573c772ff9a894c0093e0bc5e0 | [
"MIT"
] | 8 | 2019-05-21T04:40:53.000Z | 2020-10-17T07:20:04.000Z | planners/mouseSampler.py | soraxas/rrdt | 5579081fc53f56573c772ff9a894c0093e0bc5e0 | [
"MIT"
] | null | null | null | planners/mouseSampler.py | soraxas/rrdt | 5579081fc53f56573c772ff9a894c0093e0bc5e0 | [
"MIT"
] | 3 | 2021-07-16T07:06:14.000Z | 2022-02-09T12:06:06.000Z | import time
import pygame
from overrides import overrides
from planners.baseSampler import Sampler
"""
For demo / testing only. This policy wait for user mouse input for next sampling node.
"""
| 27.333333 | 110 | 0.650407 | import time
import pygame
from overrides import overrides
from planners.baseSampler import Sampler
"""
For demo / testing only. This policy wait for user mouse input for next sampling node.
"""
class MouseSampler(Sampler):
@overrides
def get_next_pos(self):
return self.get_mouse_click_position(scaling=self.args.scaling), self.report_success, self.report_fail
@staticmethod
def get_mouse_click_position(scaling):
while True:
time.sleep(0.05)
pygame.event.wait()
if pygame.mouse.get_pressed()[0] == 1: # Left mouse pressed
pos = pygame.mouse.get_pos()
pos = (int(pos[0] / scaling), int(pos[1] / scaling))
return pos
| 424 | 94 | 23 |
6557b4215fd9305d2dc5481a24a7bb9f504b5e55 | 14,012 | py | Python | tests/comments/test_video_comments_view.py | Torniojaws/vortech-backend | f775a97eeae089fa720088d86fe92d40bc5d65bc | [
"MIT"
] | null | null | null | tests/comments/test_video_comments_view.py | Torniojaws/vortech-backend | f775a97eeae089fa720088d86fe92d40bc5d65bc | [
"MIT"
] | 93 | 2017-09-01T22:24:10.000Z | 2021-12-22T14:07:06.000Z | tests/comments/test_video_comments_view.py | Torniojaws/vortech-backend | f775a97eeae089fa720088d86fe92d40bc5d65bc | [
"MIT"
] | null | null | null | import json
import unittest
from flask_caching import Cache
from sqlalchemy import asc
from app import app, db
from apps.comments.models import CommentsVideos
from apps.videos.models import Videos
from apps.users.models import Users, UsersAccessLevels, UsersAccessMapping, UsersAccessTokens
from apps.utils.time import get_datetime, get_datetime_one_hour_ahead
| 35.03 | 93 | 0.567371 | import json
import unittest
from flask_caching import Cache
from sqlalchemy import asc
from app import app, db
from apps.comments.models import CommentsVideos
from apps.videos.models import Videos
from apps.users.models import Users, UsersAccessLevels, UsersAccessMapping, UsersAccessTokens
from apps.utils.time import get_datetime, get_datetime_one_hour_ahead
class TestCommentsVideosView(unittest.TestCase):
def setUp(self):
# Clear redis cache completely
cache = Cache()
cache.init_app(app, config={"CACHE_TYPE": "RedisCache"})
with app.app_context():
cache.clear()
self.app = app.test_client()
# Add three videos
video1 = Videos(
Title="UnitTest1",
URL="https://unittest1.com",
Created=get_datetime(),
)
video2 = Videos(
Title="UnitTest2",
URL="https://unittest2.com",
Created=get_datetime(),
)
video3 = Videos(
Title="UnitTest3",
URL="https://unittest3.com",
Created=get_datetime(),
)
db.session.add(video1)
db.session.add(video2)
db.session.add(video3)
db.session.commit()
# Add two registered users
user1 = Users(
Name="UnitTest1",
Username="unittester1",
Password="unittest1",
Created=get_datetime(),
)
user2 = Users(
Name="UnitTest2",
Username="unittester2",
Password="unittest2",
Created=get_datetime(),
)
db.session.add(user1)
db.session.add(user2)
db.session.commit()
# Add user level for registered users, if not already
if not UsersAccessLevels.query.filter_by(LevelName="Registered").first():
registered = UsersAccessLevels(
UsersAccessLevelID=2,
LevelName="Registered"
)
db.session.add(registered)
db.session.commit()
register_user1 = UsersAccessMapping(
UserID=user1.UserID,
UsersAccessLevelID=2
)
register_user2 = UsersAccessMapping(
UserID=user2.UserID,
UsersAccessLevelID=2
)
self.access_token1 = "unittest1-access-token"
self.access_token2 = "unittest2-access-token"
user1_token = UsersAccessTokens(
UserID=user1.UserID,
AccessToken=self.access_token1,
ExpirationDate=get_datetime_one_hour_ahead()
)
user2_token = UsersAccessTokens(
UserID=user2.UserID,
AccessToken=self.access_token2,
ExpirationDate=get_datetime_one_hour_ahead()
)
db.session.add(register_user1)
db.session.add(register_user2)
db.session.add(user1_token)
db.session.add(user2_token)
db.session.commit()
self.valid_users = [user1.UserID, user2.UserID]
self.valid_tokens = [self.access_token1, self.access_token2]
self.video_ids = [video1.VideoID, video2.VideoID, video3.VideoID]
# Add some comments for each video
v1_comment1 = CommentsVideos(
VideoID=self.video_ids[0],
Comment="V1C1 Comment",
UserID=self.valid_users[0],
Created=get_datetime()
)
v1_comment2 = CommentsVideos(
VideoID=self.video_ids[0],
Comment="V1C2 Comment",
UserID=self.valid_users[1],
Created=get_datetime()
)
v2_comment1 = CommentsVideos(
VideoID=self.video_ids[1],
Comment="V2C1 Comment",
UserID=self.valid_users[0],
Created=get_datetime()
)
v3_comment1 = CommentsVideos(
VideoID=self.video_ids[2],
Comment="V3C1 Comment",
UserID=self.valid_users[0],
Created=get_datetime()
)
db.session.add(v1_comment1)
db.session.add(v1_comment2)
db.session.add(v2_comment1)
db.session.add(v3_comment1)
db.session.commit()
self.valid_comment_ids = [
v1_comment1.CommentID,
v1_comment2.CommentID,
v2_comment1.CommentID,
v3_comment1.CommentID,
]
self.valid_comment_ids_userid = [
v1_comment1.UserID,
v1_comment2.UserID,
v2_comment1.UserID,
v3_comment1.UserID,
]
def tearDown(self):
# Deleting a video will also delete the comments for it
for video in Videos.query.all():
db.session.delete(video)
db.session.commit()
for user in Users.query.filter(Users.Username.like("unittest%")).all():
db.session.delete(user)
db.session.commit()
access = UsersAccessLevels.query.filter_by(LevelName="Registered").first()
db.session.delete(access)
db.session.commit()
def test_getting_all_comments(self):
"""Should return the current votes for all videos."""
response = self.app.get("/api/1.0/comments/videos/")
data = json.loads(response.data.decode())
self.assertEqual(200, response.status_code)
self.assertNotEqual(None, data)
self.assertEqual(4, len(data["comments"]))
self.assertTrue(data["comments"][0]["videoID"] in self.video_ids)
self.assertTrue(data["comments"][1]["videoID"] in self.video_ids)
self.assertTrue(data["comments"][2]["videoID"] in self.video_ids)
self.assertTrue(data["comments"][3]["videoID"] in self.video_ids)
def test_getting_comments_for_one_video(self):
"""Should return the comments for the specified video."""
response = self.app.get("/api/1.0/comments/videos/{}".format(self.video_ids[0]))
data = json.loads(response.data.decode())
self.assertEqual(200, response.status_code)
self.assertNotEqual(None, data)
self.assertEqual(2, len(data["comments"]))
self.assertEqual(self.video_ids[0], data["comments"][0]["videoID"])
self.assertEqual("UnitTest1", data["comments"][0]["name"])
self.assertEqual("V1C1 Comment", data["comments"][0]["comment"])
self.assertEqual("UnitTest2", data["comments"][1]["name"])
self.assertEqual("V1C2 Comment", data["comments"][1]["comment"])
def test_adding_a_comment_as_registered_user(self):
"""Should add a new comment with the userID."""
response = self.app.post(
"/api/1.0/comments/videos/",
data=json.dumps(
dict(
videoID=self.video_ids[2],
comment="V3 UnitTest Brand New"
)
),
content_type="application/json",
headers={
"User": self.valid_users[1],
"Authorization": self.valid_tokens[1]
}
)
comments = CommentsVideos.query.filter_by(VideoID=self.video_ids[2]).order_by(
asc(CommentsVideos.CommentID)
).all()
self.assertEqual(201, response.status_code)
self.assertEqual(2, len(comments))
self.assertEqual("V3C1 Comment", comments[0].Comment)
self.assertEqual("V3 UnitTest Brand New", comments[1].Comment)
self.assertEqual(self.valid_users[1], comments[1].UserID)
def test_adding_a_comment_as_registered_user_with_invalid_token(self):
"""Should throw a 401, since it is an invalid case."""
response = self.app.post(
"/api/1.0/comments/videos/",
data=json.dumps(
dict(
videoID=self.video_ids[2],
comment="V3 UnitTest Comment Same",
)
),
content_type="application/json",
headers={
"User": self.valid_users[0],
"Authorization": "not valid"
}
)
comments = CommentsVideos.query.filter_by(VideoID=self.video_ids[1]).order_by(
asc(CommentsVideos.CommentID)
).all()
self.assertEqual(401, response.status_code)
self.assertEqual(1, len(comments))
self.assertEqual("V2C1 Comment", comments[0].Comment)
def test_adding_another_comment_as_registered_user_for_same_video(self):
"""Should add a second comment normally."""
response = self.app.post(
"/api/1.0/comments/videos/",
data=json.dumps(
dict(
videoID=self.video_ids[2],
comment="V3 UnitTest Comment Same",
)
),
content_type="application/json",
headers={
"User": self.valid_users[0],
"Authorization": self.valid_tokens[0]
}
)
comments = CommentsVideos.query.filter_by(VideoID=self.video_ids[2]).order_by(
asc(CommentsVideos.CommentID)
).all()
self.assertEqual(201, response.status_code)
self.assertEqual(2, len(comments))
self.assertEqual("V3C1 Comment", comments[0].Comment)
self.assertEqual("V3 UnitTest Comment Same", comments[1].Comment)
def test_editing_a_comment(self):
"""Should modify an existing comment."""
response = self.app.put(
"api/1.0/comments/videos/{}".format(self.video_ids[0]),
data=json.dumps(
dict(
commentID=self.valid_comment_ids[0],
comment="UnitTest Edited"
)
),
content_type="application/json",
headers={
"User": self.valid_users[0],
"Authorization": self.valid_tokens[0]
}
)
comments = CommentsVideos.query.filter_by(VideoID=self.video_ids[0]).order_by(
asc(CommentsVideos.CommentID)
).all()
self.assertEqual(200, response.status_code)
self.assertEqual(2, len(comments))
self.assertEqual("UnitTest Edited", comments[0].Comment)
self.assertEqual("V1C2 Comment", comments[1].Comment)
def test_editing_a_comment_without_comment_id(self):
"""Should return 400 Bad Request."""
response = self.app.put(
"api/1.0/comments/videos/{}".format(self.video_ids[0]),
data=json.dumps(
dict(
comment="UnitTest Edited"
)
),
content_type="application/json",
headers={
"User": self.valid_users[0],
"Authorization": self.valid_tokens[0]
}
)
self.assertEqual(400, response.status_code)
def test_editing_a_comment_without_comment(self):
"""Should return 400 Bad Request."""
response = self.app.put(
"api/1.0/comments/videos/{}".format(self.video_ids[0]),
data=json.dumps(
dict(
commentID=self.valid_comment_ids[0]
)
),
content_type="application/json",
headers={
"User": self.valid_users[0],
"Authorization": self.valid_tokens[0]
}
)
self.assertEqual(400, response.status_code)
def test_editing_a_comment_with_wrong_userid(self):
"""Should return 401 Unauthorized. You can only edit your own comments."""
response = self.app.put(
"api/1.0/comments/videos/{}".format(self.video_ids[1]),
data=json.dumps(
dict(
commentID=self.valid_comment_ids[1],
comment="UnitTest Edited"
)
),
content_type="application/json",
headers={
"User": self.valid_comment_ids_userid[0],
"Authorization": self.valid_tokens[0]
}
)
self.assertEqual(401, response.status_code)
def test_deleting_a_comment(self):
"""Should delete the comment."""
response = self.app.delete(
"api/1.0/comments/videos/{}".format(self.video_ids[0]),
data=json.dumps(
dict(
commentID=self.valid_comment_ids[0]
)
),
content_type="application/json",
headers={
"User": self.valid_comment_ids_userid[0],
"Authorization": self.valid_tokens[0]
}
)
comments = CommentsVideos.query.filter_by(VideoID=self.video_ids[0]).all()
self.assertEqual(204, response.status_code)
self.assertEqual(1, len(comments))
self.assertEqual("V1C2 Comment", comments[0].Comment)
def test_deleting_a_comment_with_invalid_comment_id(self):
"""Should return 400 Bad Request."""
response = self.app.delete(
"api/1.0/comments/videos/{}".format(self.video_ids[0]),
data=json.dumps(
dict(
commentID=None
)
),
content_type="application/json",
headers={
"User": self.valid_comment_ids_userid[0],
"Authorization": self.valid_tokens[0]
}
)
self.assertEqual(400, response.status_code)
def test_deleting_a_comment_with_invalid_user_id(self):
"""Should return 401 Unauthorized."""
response = self.app.delete(
"api/1.0/comments/videos/{}".format(self.video_ids[0]),
data=json.dumps(
dict(
commentID=self.valid_comment_ids[0]
)
),
content_type="application/json",
headers={
"User": self.valid_comment_ids_userid[1],
"Authorization": self.valid_tokens[1]
}
)
self.assertEqual(401, response.status_code)
| 4,545 | 9,080 | 23 |
b36409855170a5bba1fa8e4bfcd74d4fb54b5b4d | 5,887 | py | Python | Tutorial 2 - Data Navigation/PlugIns/experimental/scripts/CompositeSurveyImage.py | paradimdata/Cornell_EM_SummerSchool_2021 | 9f3583e1b85a9cdd86e1b91800027966d501ce96 | [
"MIT"
] | 8 | 2021-06-13T20:02:12.000Z | 2022-03-24T09:19:23.000Z | Tutorial 2 - Data Navigation/PlugIns/experimental/scripts/CompositeSurveyImage.py | paradimdata/Cornell_EM_SummerSchool_2021 | 9f3583e1b85a9cdd86e1b91800027966d501ce96 | [
"MIT"
] | null | null | null | Tutorial 2 - Data Navigation/PlugIns/experimental/scripts/CompositeSurveyImage.py | paradimdata/Cornell_EM_SummerSchool_2021 | 9f3583e1b85a9cdd86e1b91800027966d501ce96 | [
"MIT"
] | 1 | 2021-07-16T20:12:28.000Z | 2021-07-16T20:12:28.000Z | """
Example script to do acquire a composite survey image using stage shift.
The script uses the center 50% of the image, shifts the stage by the appropriate amount
in x, y directions, and stitches the resulting images together into a larger super image.
To use:
Run Nion Swift and get a good image
Set the defocus value to a large number (positive or negative) such as 500000nm.
Ensure that the aperture is circular and centered.
Ensure that the aperture is large enough so that the center 50% of the image is exposed through aperture.
Decide how many images to acquire by setting the 'size' variable.
Decide how to reduce the acquired data by setting the 'reduce' variable.
Run the script from command line or PyCharm or another suitable Python interpreter.
TODO: SShft.x, SShft.y add rotation; need to fix this in AS2.
TODO: The size of the camera output is hardcoded to 1024, 1024. It should read from the camera object.
TODO: Update composite image live during acquisition. Requires improvements to nionlib.
"""
import math
import numpy
import time
import nionlib
# these measurements are determined by a line made from a feature before a shift to a feature after a
# shift. for instance, make a line starting on a feature. then add 100um to SShft.x and measure the
# length of the line and the angle. plug those in here.
rotation = math.radians(-23)
scale = 1.2
acquire_composite_survey_image(size=(5, 5), rotation=rotation, scale=scale, print_fn=print)
| 51.191304 | 189 | 0.675556 | """
Example script to do acquire a composite survey image using stage shift.
The script uses the center 50% of the image, shifts the stage by the appropriate amount
in x, y directions, and stitches the resulting images together into a larger super image.
To use:
Run Nion Swift and get a good image
Set the defocus value to a large number (positive or negative) such as 500000nm.
Ensure that the aperture is circular and centered.
Ensure that the aperture is large enough so that the center 50% of the image is exposed through aperture.
Decide how many images to acquire by setting the 'size' variable.
Decide how to reduce the acquired data by setting the 'reduce' variable.
Run the script from command line or PyCharm or another suitable Python interpreter.
TODO: SShft.x, SShft.y add rotation; need to fix this in AS2.
TODO: The size of the camera output is hardcoded to 1024, 1024. It should read from the camera object.
TODO: Update composite image live during acquisition. Requires improvements to nionlib.
"""
import math
import numpy
import time
import nionlib
def acquire_composite_survey_image(size, rotation=0, scale=1, reduce=4, print_fn=None):
print_fn = print_fn if print_fn is not None else lambda *args: None
document_controller = nionlib.api.application.document_controllers[0]
library = nionlib.api.library
camera = nionlib.api.get_hardware_source_by_id("nionccd1010", "1")
autostem = nionlib.api.get_instrument_by_id("autostem_controller", "1")
shift_x_control_name = "SShft.x"
shift_y_control_name = "SShft.y"
# grab stage original location
sx_m = autostem.get_control_output(shift_x_control_name)
sy_m = autostem.get_control_output(shift_y_control_name)
tv_pixel_angle_rad = autostem.get_control_output("TVPixelAngle")
defocus = autostem.get_control_output("C10")
print_fn("Acquiring composite survey image...")
print_fn("stage starting position (um) ", sx_m * 1e6, sy_m * 1e6)
print_fn("pixel angle (rad) ", tv_pixel_angle_rad)
print_fn("defocus (nm) ", defocus * 1e9)
image_size = 1024, 1024 # TODO: grab this from camera
image_dtype = numpy.float32
image_width_m = abs(defocus) * math.sin(tv_pixel_angle_rad * image_size[0])
master_sub_area_size = 512, 512
master_sub_area = (image_size[0]//2 - master_sub_area_size[0]//2, image_size[1]//2 - master_sub_area_size[1]//2), master_sub_area_size
reduce = max(1, reduce // (512 / master_sub_area_size[0]))
sub_area_shift_m = image_width_m * (master_sub_area[1][0] / image_size[0])
sub_area = (master_sub_area[0][0]//reduce,master_sub_area[0][1]//reduce), (master_sub_area[1][0]//reduce,master_sub_area[1][1]//reduce)
print_fn("image width (um) ", image_width_m * 1e6)
master_data = numpy.empty((sub_area[1][0] * size[0], sub_area[1][1] * size[1]), image_dtype)
print_fn("master size ", master_data.shape)
try:
for row in range(size[0]):
for column in range(size[1]):
delta_x_m, delta_y_m = sub_area_shift_m * (column - size[1]//2), sub_area_shift_m * (row - size[0]//2)
print_fn("offset (um) ", delta_x_m * 1e6, delta_y_m * 1e6)
start = time.time()
# when used below, we use the rotation rotated by 180 degrees since we are moving the stage, not the
# view. i.e. use -angle and subtract the delta's.
rotated_delta_x_m = (math.cos(rotation) * delta_x_m - math.sin(rotation) * delta_y_m) / scale
rotated_delta_y_m = (math.sin(rotation) * delta_x_m + math.cos(rotation) * delta_y_m) / scale
print_fn("rotated offset (um) ", rotated_delta_x_m * 1e6, rotated_delta_y_m * 1e6)
# set both values. be robust, retrying if set_control_output fails.
attempts = 0
while attempts < 4:
attempts += 1
try:
tolerance_factor = 0.02
autostem.set_control_output(shift_x_control_name, sx_m - rotated_delta_x_m, {"confirm": True, "confirm_tolerance_factor": tolerance_factor})
autostem.set_control_output(shift_y_control_name, sy_m - rotated_delta_y_m, {"confirm": True, "confirm_tolerance_factor": tolerance_factor})
except TimeoutError as e:
print("Timeout row=", row, " column=", column)
continue
break
print_fn("Time", time.time() - start, " row=", row, " column=", column)
supradata = camera.grab_next_to_start()[0]
data = supradata.data[master_sub_area[0][0]:master_sub_area[0][0] + master_sub_area[1][0]:reduce, master_sub_area[0][1]:master_sub_area[0][1] + master_sub_area[1][1]:reduce]
slice_row = row
slice_column = column
slice0 = slice(slice_row * sub_area[1][0], (slice_row + 1) * sub_area[1][0])
slice1 = slice(slice_column * sub_area[1][1], (slice_column + 1) * sub_area[1][1])
master_data[slice0, slice1] = data
data_item = library.create_data_item_from_data(master_data, "Composite Survey")
document_controller.display_data_item(data_item)
finally:
# restore stage to original location
autostem.set_control_output(shift_x_control_name, sx_m)
autostem.set_control_output(shift_y_control_name, sy_m)
# these measurements are determined by a line made from a feature before a shift to a feature after a
# shift. for instance, make a line starting on a feature. then add 100um to SShft.x and measure the
# length of the line and the angle. plug those in here.
rotation = math.radians(-23)
scale = 1.2
acquire_composite_survey_image(size=(5, 5), rotation=rotation, scale=scale, print_fn=print)
| 4,368 | 0 | 23 |
7b6e6935e9f10e3177f7f225ae50e045d968b7a9 | 18,997 | py | Python | python/biograph/vdb/study_cmd.py | spiralgenetics/biograph | 33c78278ce673e885f38435384f9578bfbf9cdb8 | [
"BSD-2-Clause"
] | 16 | 2021-07-14T23:32:31.000Z | 2022-03-24T16:25:15.000Z | python/biograph/vdb/study_cmd.py | spiralgenetics/biograph | 33c78278ce673e885f38435384f9578bfbf9cdb8 | [
"BSD-2-Clause"
] | 9 | 2021-07-20T20:39:47.000Z | 2021-09-16T20:57:59.000Z | python/biograph/vdb/study_cmd.py | spiralgenetics/biograph | 33c78278ce673e885f38435384f9578bfbf9cdb8 | [
"BSD-2-Clause"
] | 9 | 2021-07-15T19:38:35.000Z | 2022-01-31T19:24:56.000Z | #!/usr/bin/env python3
'''
Manage VDB studies
'''
import argparse
import inspect
import multiprocessing
import subprocess
import sys
import tempfile
from pathlib import Path
import orjson as json
import biograph.vdb.athena as athena
from biograph.tools.log import setup_logging, log
from biograph.tools.refhash import refhash
class AthenaTableReader(multiprocessing.Process):
'''
Parallel TSV reader. Read gz chunks directly from S3 and write lines to outq.
'''
def merge_samples(self, sample_json, strict=False):
'''
Generate merged format and sample fields.
Returns the format string, a list of sample columns in sorted order,
and the number of samples with data.
'''
samples = {}
sample_data = json.loads(sample_json)
for sample in sample_data:
if sample_data[sample] is None:
continue
samples[sample] = sample_data[sample]
# square-off VCFs may have no sample data at all
if not samples:
return ("GT", ".")
unique_fields = {field for sample in samples for field in samples[sample]}
format_fields = sorted(unique_fields)
# GT is always first
format_fields.remove('GT')
format_fields.insert(0, 'GT')
sample_column = []
for sample in self.sample_names:
if sample not in samples:
if strict:
samples[sample] = {}
else:
sample_column.append('.')
continue
for field in format_fields:
if field not in samples[sample]:
samples[sample][field] = '.'
sample_column.append(':'.join([samples[sample][field] for field in format_fields]))
return (':'.join(format_fields), sample_column)
def write_vcf(in_path, out_fh, tmp=tempfile.gettempdir(), **kwargs):
'''
Sort headerless VCF files from in_path and append to out_file using GNU sort
'''
args = [
kwargs.get("gnusort", "/usr/bin/sort"),
"-k1,1V" if kwargs.get("chrom_sort", False) else "-k1,1d",
"-k2,2n",
"-T", tmp
] + [str(f) for f in Path(in_path).glob("*")]
psort = subprocess.Popen(
args,
stdout=out_fh
)
psort.wait()
def add_common_arguments(parser):
''' common arguments '''
parser.add_argument("study_name", help="Name of the study")
def cmd_create(clargs):
''' Create a new study '''
parser = argparse.ArgumentParser(prog=f"{CMD} create", description=inspect.getdoc(cmd_create),
formatter_class=argparse.RawDescriptionHelpFormatter)
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
db.create_study(args.study_name)
print(f"Study '{args.study_name}' created")
def cmd_meta(clargs):
''' Describe a study '''
raise SystemExit('Not implemented yet.')
def cmd_add(clargs):
''' Add variants to a study '''
description = f"""{inspect.getdoc(cmd_add)}
Specify a VCF id or sample name to include all of its variants.
Wildcard matching * is applied to match multiple sample names.
To copy variants from the most recent checkpoint of an existing study,
use --from and specify one or more sample names with optional wildcards.
Use --checkpoint to select an older checkpoint in the study.
To remove VCFs from a study, use the 'filter' or 'revert' study commands.
All variants in a study must be called against the same reference.
Examples:
# Add a specific VCF id
$ biograph vdb study add my_study 0d1da4fa-778d-4d1d-9700-45f56acba576
# Sample name
$ biograph vdb study add my_study HG002
# Wildcard match. Wrap in '' to avoid accidental shell glob matching.
$ biograph vdb study add my_study 'HG00*' 'NA*3'
# Copy all variants from an existing study at the most recent checkpoint
$ biograph vdb study add my_study --from another_study '*'
# Copy sample HG003 from an existing study at a specific checkpoint
$ biograph vdb study add my_study --from another_study --checkpoint 3 'HG003'
"""
parser = argparse.ArgumentParser(prog=f"{CMD} add", description=description,
formatter_class=argparse.RawDescriptionHelpFormatter)
add_common_arguments(parser)
parser.add_argument("sample", nargs="+", help="VCF Sample name or aid to add")
parser.add_argument("--from", dest="src_study", help="Look for samples in this study")
parser.add_argument("--checkpoint", type=int, help="When using --from, copy variants form this checkpoint (default: most recent)")
args = parser.parse_args(clargs)
db = athena.connect()
if args.src_study:
db.copy_from_study(args.src_study, args.checkpoint, args.study_name, args.sample)
else:
if not args.sample:
raise SystemExit('You must specify at least one sample, aid, or --from')
if args.checkpoint:
raise SystemExit('You must specify --from when using --checkpoint.')
db.add_to_study(args.study_name, args.sample)
def cmd_show(clargs):
''' Show details about a study '''
parser = argparse.ArgumentParser(prog=f"{CMD} show", description=inspect.getdoc(cmd_show),
formatter_class=argparse.RawDescriptionHelpFormatter)
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
db.assert_study_exists(args.study_name)
meta = db.scalar(
db.query(
f"""
SELECT CAST(MAP_AGG(key, value) AS JSON) AS meta
FROM {db.table.study.meta}
WHERE study_name = %(study_name)s
;
""",
params={"study_name": args.study_name},
)
)
print(f"{'study_name':>16}:", args.study_name)
print(f"{'created_on':>16}:", meta.get('created_on', '')[:19])
for k in sorted(meta):
if k == "created_on" or k.startswith("checkpoint"):
continue
print(f"{k:>16}:", meta[k])
checkpoint = db.get_current_study_checkpoint(args.study_name)
if not checkpoint:
print("\nNo variants have been added to this study.")
return
print("\ncheckpoints:")
for k in sorted(meta):
if k.startswith("checkpoint"):
print(f"{k[11:]:>4}:", meta[k])
print(f"\n{'sample_name':<17}variant_count")
for (sample, count) in db.query(f"""
SELECT sample_name, COUNT(*)
FROM {db.table.study.data}
WHERE study_name = %(study_name)s
AND checkpoint = %(checkpoint)d
GROUP BY sample_name
ORDER BY sample_name ASC
;
""", params={"study_name": args.study_name, "checkpoint": checkpoint}):
print(f"{sample:<17}{count}")
def cmd_export(clargs): # pylint: disable=too-many-statements
''' Export a study to a VCF file '''
parser = argparse.ArgumentParser(prog=f"{CMD} export", description=inspect.getdoc(cmd_export),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-o", "--output", default="/dev/stdout", help="Write output VCF to this file (default: STDOUT)")
parser.add_argument("-f", "--force", action="store_true", help="Overwrite local output directory without confirmation")
parser.add_argument("-a", "--anno", default=None, help="Annotate the output with this annotation")
parser.add_argument("-r", "--remerge", action="store_true", help="Force a merge prior to export, required when changing --fields (default: use pre-merged data if possible)")
parser.add_argument("-t", "--tmp", type=str, default=tempfile.gettempdir(), help="Temporary directory (%(default)s)")
parser.add_argument("-c", "--chromosomal", action="store_true", help="Use natural order (1,2,3,10,22,X) instead of alphabetic order (1,10,2,22,3,X)")
parser.add_argument("--fields", help="List of FORMAT fields to export, separated by : (default: all fields)")
parser.add_argument("--checkpoint", type=int, help="Export the study from this checkpoint (default: latest)")
parser.add_argument("--square-off", help="Create a 'square-off' VCF with this single sample column")
parser.add_argument("--no-header", action="store_true", help="Do not write a VCF header")
parser.add_argument("--threads", type=int, default=multiprocessing.cpu_count(), help="Number of threads to use (%(default)s)")
parser.add_argument("--sort", default="/usr/bin/sort", type=str, help=argparse.SUPPRESS)
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
db.assert_study_exists(args.study_name)
out_file = Path(args.output)
if str(out_file) != "/dev/stdout" and out_file.exists() and not args.force:
raise SystemExit(f"Output path {out_file} already exists, refusing to overwrite.")
checkpoint = args.checkpoint or db.get_current_study_checkpoint(args.study_name)
sample_names = db.get_study_sample_names(args.study_name, checkpoint)
if args.square_off:
if args.square_off in sample_names:
sample_names = [args.square_off]
else:
raise SystemExit(f"sample '{args.square_off}' is not present in {args.study_name} at checkpoint {checkpoint}.")
try:
(header_path, variants_path) = \
db.merge_study(
args.study_name,
force_merge=args.remerge,
anno_name=args.anno,
square_off=args.square_off,
checkpoint=checkpoint,
format_fields=args.fields.split(':') if args.fields else None
)
except KeyboardInterrupt:
raise SystemExit('\nAborted.')
inq = multiprocessing.Queue()
log("Downloading VDB data")
with tempfile.TemporaryDirectory(prefix=f"{args.tmp}/") as tmpdir:
chunk_dir = Path(tmpdir) / "chunks"
chunk_dir.mkdir()
out_vcf = open(out_file, "wb")
if not args.no_header:
db.download_fileobj(header_path, out_vcf)
out_vcf.write(b'\t'.join([s.encode() for s in sample_names]))
out_vcf.write(b'\n')
out_vcf.flush()
reader_threads = []
for fn in range(max(1, args.threads)):
reader = AthenaTableReader(inq, f"{chunk_dir}/{fn}", sample_names)
reader.start()
reader_threads.append(reader)
rh = refhash(lookup=db.get_metadata_from_study(args.study_name, 'refname'))
for gz in db.ls(variants_path, '.gz'):
# db_name/study_name/merged/_export/study_name=the_study/chrom=1/junk_uuid.gz
# Chroms are stored internally in ebi style, so convert to native
chrom = rh.to_native(Path(gz).parts[-2].split('=')[1], rh.build(), rh.style())
inq.put((chrom, gz))
for rt in reader_threads:
inq.put(None)
for rt in reader_threads:
rt.join()
log("Exporting VCF")
write_vcf(chunk_dir, out_vcf, tmp=tmpdir, chrom_sort=args.chromosomal, gnusort=args.sort)
out_vcf.close()
def cmd_filter(clargs):
''' Filter variants in a study '''
description = f"""{inspect.getdoc(cmd_filter)}
Filter variants in a study using bcftools filter syntax. A new study
checkpoint will be created.
Use --include to include variants that match the filter.
Use --exclude to exclude variants that match the filter.
Examples:
# PASS only
$ biograph vdb study filter my_study --exclude "FILTER != 'PASS'"
# High quality hets on chr16
$ biograph vdb study filter my_study --include "chrom = '16' AND GT = 0/1 AND qual > 50"
# Per-variant missingness
$ biograph vdb study filter my_study --include "F_MISS > 0.2"
# Per-sample missingness
$ biograph vdb study filter my_study --exclude "SAMPLE_MISS > 0.1"
"""
parser = argparse.ArgumentParser(prog=f"{CMD} filter", description=description,
formatter_class=argparse.RawDescriptionHelpFormatter)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-i", "--include", help="Include only variants that match these criteria")
group.add_argument("-e", "--exclude", help="Exclude all variants that match these criteria")
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
db.assert_study_exists(args.study_name)
db.filter_study(
study_name=args.study_name,
the_filter=args.include or args.exclude,
exclude=args.include is None
)
def cmd_list(clargs):
''' List all available studies '''
parser = argparse.ArgumentParser(prog=f"{CMD} list", description=inspect.getdoc(cmd_list),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.parse_args(clargs)
db = athena.connect()
print(f"{'study_name':<21} {'created_on':<21}")
for (study_name, meta) in db.query(f"SELECT study_name, CAST(MAP_AGG(key, value) AS JSON) FROM {db.table.study.meta} GROUP BY study_name ORDER BY study_name ASC;"):
print(f"{study_name:<21} {meta.get('created_on', '')[:19]:<21}")
def cmd_freeze(clargs):
''' Prevent changes to a study '''
parser = argparse.ArgumentParser(prog=f"{CMD} freeze", description=inspect.getdoc(cmd_freeze),
formatter_class=argparse.RawDescriptionHelpFormatter)
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
db.study_freeze(args.study_name)
print(f"Study '{args.study_name}' frozen")
def cmd_unfreeze(clargs):
''' Allow changes to a study '''
parser = argparse.ArgumentParser(prog=f"{CMD} unfreeze", description=inspect.getdoc(cmd_unfreeze),
formatter_class=argparse.RawDescriptionHelpFormatter)
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
db.study_unfreeze(args.study_name)
print(f"Study '{args.study_name}' unfrozen")
def cmd_delete(clargs):
''' Delete a study '''
parser = argparse.ArgumentParser(prog=f"{CMD} delete", description=inspect.getdoc(cmd_delete),
formatter_class=argparse.RawDescriptionHelpFormatter)
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
db.delete_study(args.study_name)
print(f"Study '{args.study_name}' deleted")
def cmd_revert(clargs):
''' Revert to a previous checkpoint '''
parser = argparse.ArgumentParser(prog=f"{CMD} revert", description=inspect.getdoc(cmd_revert),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--checkpoint", type=int, help="Revert to this checkpoint (default: roll back one)")
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
current_checkpoint = db.get_current_study_checkpoint(args.study_name)
if current_checkpoint == 0:
raise SystemExit(f"No checkpoints yet in study {args.study_name}")
if args.checkpoint:
if args.checkpoint < 0:
raise SystemExit(f"Invalid checkpoint {args.checkpoint}")
if current_checkpoint < args.checkpoint:
raise SystemExit(f"No checkpoint {args.checkpoint} in {args.study_name} (max {current_checkpoint})")
if current_checkpoint == args.checkpoint:
raise SystemExit(f"Study {args.study_name} already at checkpoint {current_checkpoint}, nothing to do.")
target_checkpoint = args.checkpoint
else:
target_checkpoint = current_checkpoint - 1
for chkpt in range(current_checkpoint, target_checkpoint, -1):
db.delete_study(args.study_name, chkpt)
print(f"Study '{args.study_name}' reverted to checkpoint {target_checkpoint}")
def main(clargs):
''' Top level parser '''
usage = f'''study [COMMAND] [options]
Manage studies in the Spiral Variant DataBase (VDB).
Run any command with --help for additional information.
create {inspect.getdoc(CMDS['create'])}
list {inspect.getdoc(CMDS['list'])}
show {inspect.getdoc(CMDS['show'])}
add {inspect.getdoc(CMDS['add'])}
filter {inspect.getdoc(CMDS['filter'])}
export {inspect.getdoc(CMDS['export'])}
freeze {inspect.getdoc(CMDS['freeze'])}
unfreeze {inspect.getdoc(CMDS['unfreeze'])}
revert {inspect.getdoc(CMDS['revert'])}
delete {inspect.getdoc(CMDS['delete'])}
'''
parser = argparse.ArgumentParser(prog="study", usage=usage,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("cmd", metavar="COMMAND", choices=CMDS.keys(), type=str, help=argparse.SUPPRESS)
parser.add_argument("options", metavar="OPTIONS", nargs=argparse.REMAINDER, help=argparse.SUPPRESS)
parser.add_argument("--debug", action="store_true", help=argparse.SUPPRESS)
if len(sys.argv) == 3:
raise SystemExit(parser.print_help())
args = parser.parse_args(clargs)
setup_logging(debug_mode=args.debug, simple=True)
CMDS[args.cmd](args.options)
# top level command
CMD = 'biograph vdb study'
# module global CMDs
CMDS = {
'create': cmd_create,
'add': cmd_add,
'filter': cmd_filter,
'list': cmd_list,
'show': cmd_show,
'delete': cmd_delete,
'freeze': cmd_freeze,
'unfreeze': cmd_unfreeze,
'export': cmd_export,
'revert': cmd_revert,
}
if __name__ == '__main__':
try:
main(sys.argv[1:])
except KeyboardInterrupt:
raise SystemExit('\nAborted.')
| 37.031189 | 177 | 0.635574 | #!/usr/bin/env python3
'''
Manage VDB studies
'''
import argparse
import inspect
import multiprocessing
import subprocess
import sys
import tempfile
from pathlib import Path
import orjson as json
import biograph.vdb.athena as athena
from biograph.tools.log import setup_logging, log
from biograph.tools.refhash import refhash
class AthenaTableReader(multiprocessing.Process):
'''
Parallel TSV reader. Read gz chunks directly from S3 and write lines to outq.
'''
def __init__(self, inq, out_file, sample_names):
super().__init__()
self.inq = inq
self.out_file = out_file
self.sample_names = sample_names
self.db = athena.connect(allow_db_create=False)
def merge_samples(self, sample_json, strict=False):
'''
Generate merged format and sample fields.
Returns the format string, a list of sample columns in sorted order,
and the number of samples with data.
'''
samples = {}
sample_data = json.loads(sample_json)
for sample in sample_data:
if sample_data[sample] is None:
continue
samples[sample] = sample_data[sample]
# square-off VCFs may have no sample data at all
if not samples:
return ("GT", ".")
unique_fields = {field for sample in samples for field in samples[sample]}
format_fields = sorted(unique_fields)
# GT is always first
format_fields.remove('GT')
format_fields.insert(0, 'GT')
sample_column = []
for sample in self.sample_names:
if sample not in samples:
if strict:
samples[sample] = {}
else:
sample_column.append('.')
continue
for field in format_fields:
if field not in samples[sample]:
samples[sample][field] = '.'
sample_column.append(':'.join([samples[sample][field] for field in format_fields]))
return (':'.join(format_fields), sample_column)
def run(self):
with open(self.out_file, "w") as f:
while True:
data = self.inq.get()
if data is None:
break
(chrom, prefix) = data
with self.db.download_gz_fh(prefix) as in_vcf:
for line in in_vcf:
(pos, varid, ref, alt, qual, filt, info, sample_column) = line.decode().rstrip().split('\t')
fmt, samples = self.merge_samples(sample_column)
print(
chrom,
pos,
varid or '.',
ref,
alt,
qual,
filt,
info or 'NS=0',
fmt,
'\t'.join(samples),
sep='\t',
file=f
)
def write_vcf(in_path, out_fh, tmp=tempfile.gettempdir(), **kwargs):
'''
Sort headerless VCF files from in_path and append to out_file using GNU sort
'''
args = [
kwargs.get("gnusort", "/usr/bin/sort"),
"-k1,1V" if kwargs.get("chrom_sort", False) else "-k1,1d",
"-k2,2n",
"-T", tmp
] + [str(f) for f in Path(in_path).glob("*")]
psort = subprocess.Popen(
args,
stdout=out_fh
)
psort.wait()
def add_common_arguments(parser):
''' common arguments '''
parser.add_argument("study_name", help="Name of the study")
def cmd_create(clargs):
''' Create a new study '''
parser = argparse.ArgumentParser(prog=f"{CMD} create", description=inspect.getdoc(cmd_create),
formatter_class=argparse.RawDescriptionHelpFormatter)
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
db.create_study(args.study_name)
print(f"Study '{args.study_name}' created")
def cmd_meta(clargs):
''' Describe a study '''
raise SystemExit('Not implemented yet.')
def cmd_add(clargs):
''' Add variants to a study '''
description = f"""{inspect.getdoc(cmd_add)}
Specify a VCF id or sample name to include all of its variants.
Wildcard matching * is applied to match multiple sample names.
To copy variants from the most recent checkpoint of an existing study,
use --from and specify one or more sample names with optional wildcards.
Use --checkpoint to select an older checkpoint in the study.
To remove VCFs from a study, use the 'filter' or 'revert' study commands.
All variants in a study must be called against the same reference.
Examples:
# Add a specific VCF id
$ biograph vdb study add my_study 0d1da4fa-778d-4d1d-9700-45f56acba576
# Sample name
$ biograph vdb study add my_study HG002
# Wildcard match. Wrap in '' to avoid accidental shell glob matching.
$ biograph vdb study add my_study 'HG00*' 'NA*3'
# Copy all variants from an existing study at the most recent checkpoint
$ biograph vdb study add my_study --from another_study '*'
# Copy sample HG003 from an existing study at a specific checkpoint
$ biograph vdb study add my_study --from another_study --checkpoint 3 'HG003'
"""
parser = argparse.ArgumentParser(prog=f"{CMD} add", description=description,
formatter_class=argparse.RawDescriptionHelpFormatter)
add_common_arguments(parser)
parser.add_argument("sample", nargs="+", help="VCF Sample name or aid to add")
parser.add_argument("--from", dest="src_study", help="Look for samples in this study")
parser.add_argument("--checkpoint", type=int, help="When using --from, copy variants form this checkpoint (default: most recent)")
args = parser.parse_args(clargs)
db = athena.connect()
if args.src_study:
db.copy_from_study(args.src_study, args.checkpoint, args.study_name, args.sample)
else:
if not args.sample:
raise SystemExit('You must specify at least one sample, aid, or --from')
if args.checkpoint:
raise SystemExit('You must specify --from when using --checkpoint.')
db.add_to_study(args.study_name, args.sample)
def cmd_show(clargs):
''' Show details about a study '''
parser = argparse.ArgumentParser(prog=f"{CMD} show", description=inspect.getdoc(cmd_show),
formatter_class=argparse.RawDescriptionHelpFormatter)
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
db.assert_study_exists(args.study_name)
meta = db.scalar(
db.query(
f"""
SELECT CAST(MAP_AGG(key, value) AS JSON) AS meta
FROM {db.table.study.meta}
WHERE study_name = %(study_name)s
;
""",
params={"study_name": args.study_name},
)
)
print(f"{'study_name':>16}:", args.study_name)
print(f"{'created_on':>16}:", meta.get('created_on', '')[:19])
for k in sorted(meta):
if k == "created_on" or k.startswith("checkpoint"):
continue
print(f"{k:>16}:", meta[k])
checkpoint = db.get_current_study_checkpoint(args.study_name)
if not checkpoint:
print("\nNo variants have been added to this study.")
return
print("\ncheckpoints:")
for k in sorted(meta):
if k.startswith("checkpoint"):
print(f"{k[11:]:>4}:", meta[k])
print(f"\n{'sample_name':<17}variant_count")
for (sample, count) in db.query(f"""
SELECT sample_name, COUNT(*)
FROM {db.table.study.data}
WHERE study_name = %(study_name)s
AND checkpoint = %(checkpoint)d
GROUP BY sample_name
ORDER BY sample_name ASC
;
""", params={"study_name": args.study_name, "checkpoint": checkpoint}):
print(f"{sample:<17}{count}")
def cmd_export(clargs): # pylint: disable=too-many-statements
''' Export a study to a VCF file '''
parser = argparse.ArgumentParser(prog=f"{CMD} export", description=inspect.getdoc(cmd_export),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-o", "--output", default="/dev/stdout", help="Write output VCF to this file (default: STDOUT)")
parser.add_argument("-f", "--force", action="store_true", help="Overwrite local output directory without confirmation")
parser.add_argument("-a", "--anno", default=None, help="Annotate the output with this annotation")
parser.add_argument("-r", "--remerge", action="store_true", help="Force a merge prior to export, required when changing --fields (default: use pre-merged data if possible)")
parser.add_argument("-t", "--tmp", type=str, default=tempfile.gettempdir(), help="Temporary directory (%(default)s)")
parser.add_argument("-c", "--chromosomal", action="store_true", help="Use natural order (1,2,3,10,22,X) instead of alphabetic order (1,10,2,22,3,X)")
parser.add_argument("--fields", help="List of FORMAT fields to export, separated by : (default: all fields)")
parser.add_argument("--checkpoint", type=int, help="Export the study from this checkpoint (default: latest)")
parser.add_argument("--square-off", help="Create a 'square-off' VCF with this single sample column")
parser.add_argument("--no-header", action="store_true", help="Do not write a VCF header")
parser.add_argument("--threads", type=int, default=multiprocessing.cpu_count(), help="Number of threads to use (%(default)s)")
parser.add_argument("--sort", default="/usr/bin/sort", type=str, help=argparse.SUPPRESS)
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
db.assert_study_exists(args.study_name)
out_file = Path(args.output)
if str(out_file) != "/dev/stdout" and out_file.exists() and not args.force:
raise SystemExit(f"Output path {out_file} already exists, refusing to overwrite.")
checkpoint = args.checkpoint or db.get_current_study_checkpoint(args.study_name)
sample_names = db.get_study_sample_names(args.study_name, checkpoint)
if args.square_off:
if args.square_off in sample_names:
sample_names = [args.square_off]
else:
raise SystemExit(f"sample '{args.square_off}' is not present in {args.study_name} at checkpoint {checkpoint}.")
try:
(header_path, variants_path) = \
db.merge_study(
args.study_name,
force_merge=args.remerge,
anno_name=args.anno,
square_off=args.square_off,
checkpoint=checkpoint,
format_fields=args.fields.split(':') if args.fields else None
)
except KeyboardInterrupt:
raise SystemExit('\nAborted.')
inq = multiprocessing.Queue()
log("Downloading VDB data")
with tempfile.TemporaryDirectory(prefix=f"{args.tmp}/") as tmpdir:
chunk_dir = Path(tmpdir) / "chunks"
chunk_dir.mkdir()
out_vcf = open(out_file, "wb")
if not args.no_header:
db.download_fileobj(header_path, out_vcf)
out_vcf.write(b'\t'.join([s.encode() for s in sample_names]))
out_vcf.write(b'\n')
out_vcf.flush()
reader_threads = []
for fn in range(max(1, args.threads)):
reader = AthenaTableReader(inq, f"{chunk_dir}/{fn}", sample_names)
reader.start()
reader_threads.append(reader)
rh = refhash(lookup=db.get_metadata_from_study(args.study_name, 'refname'))
for gz in db.ls(variants_path, '.gz'):
# db_name/study_name/merged/_export/study_name=the_study/chrom=1/junk_uuid.gz
# Chroms are stored internally in ebi style, so convert to native
chrom = rh.to_native(Path(gz).parts[-2].split('=')[1], rh.build(), rh.style())
inq.put((chrom, gz))
for rt in reader_threads:
inq.put(None)
for rt in reader_threads:
rt.join()
log("Exporting VCF")
write_vcf(chunk_dir, out_vcf, tmp=tmpdir, chrom_sort=args.chromosomal, gnusort=args.sort)
out_vcf.close()
def cmd_filter(clargs):
''' Filter variants in a study '''
description = f"""{inspect.getdoc(cmd_filter)}
Filter variants in a study using bcftools filter syntax. A new study
checkpoint will be created.
Use --include to include variants that match the filter.
Use --exclude to exclude variants that match the filter.
Examples:
# PASS only
$ biograph vdb study filter my_study --exclude "FILTER != 'PASS'"
# High quality hets on chr16
$ biograph vdb study filter my_study --include "chrom = '16' AND GT = 0/1 AND qual > 50"
# Per-variant missingness
$ biograph vdb study filter my_study --include "F_MISS > 0.2"
# Per-sample missingness
$ biograph vdb study filter my_study --exclude "SAMPLE_MISS > 0.1"
"""
parser = argparse.ArgumentParser(prog=f"{CMD} filter", description=description,
formatter_class=argparse.RawDescriptionHelpFormatter)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-i", "--include", help="Include only variants that match these criteria")
group.add_argument("-e", "--exclude", help="Exclude all variants that match these criteria")
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
db.assert_study_exists(args.study_name)
db.filter_study(
study_name=args.study_name,
the_filter=args.include or args.exclude,
exclude=args.include is None
)
def cmd_list(clargs):
''' List all available studies '''
parser = argparse.ArgumentParser(prog=f"{CMD} list", description=inspect.getdoc(cmd_list),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.parse_args(clargs)
db = athena.connect()
print(f"{'study_name':<21} {'created_on':<21}")
for (study_name, meta) in db.query(f"SELECT study_name, CAST(MAP_AGG(key, value) AS JSON) FROM {db.table.study.meta} GROUP BY study_name ORDER BY study_name ASC;"):
print(f"{study_name:<21} {meta.get('created_on', '')[:19]:<21}")
def cmd_freeze(clargs):
''' Prevent changes to a study '''
parser = argparse.ArgumentParser(prog=f"{CMD} freeze", description=inspect.getdoc(cmd_freeze),
formatter_class=argparse.RawDescriptionHelpFormatter)
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
db.study_freeze(args.study_name)
print(f"Study '{args.study_name}' frozen")
def cmd_unfreeze(clargs):
''' Allow changes to a study '''
parser = argparse.ArgumentParser(prog=f"{CMD} unfreeze", description=inspect.getdoc(cmd_unfreeze),
formatter_class=argparse.RawDescriptionHelpFormatter)
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
db.study_unfreeze(args.study_name)
print(f"Study '{args.study_name}' unfrozen")
def cmd_delete(clargs):
''' Delete a study '''
parser = argparse.ArgumentParser(prog=f"{CMD} delete", description=inspect.getdoc(cmd_delete),
formatter_class=argparse.RawDescriptionHelpFormatter)
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
db.delete_study(args.study_name)
print(f"Study '{args.study_name}' deleted")
def cmd_revert(clargs):
''' Revert to a previous checkpoint '''
parser = argparse.ArgumentParser(prog=f"{CMD} revert", description=inspect.getdoc(cmd_revert),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--checkpoint", type=int, help="Revert to this checkpoint (default: roll back one)")
add_common_arguments(parser)
args = parser.parse_args(clargs)
db = athena.connect()
current_checkpoint = db.get_current_study_checkpoint(args.study_name)
if current_checkpoint == 0:
raise SystemExit(f"No checkpoints yet in study {args.study_name}")
if args.checkpoint:
if args.checkpoint < 0:
raise SystemExit(f"Invalid checkpoint {args.checkpoint}")
if current_checkpoint < args.checkpoint:
raise SystemExit(f"No checkpoint {args.checkpoint} in {args.study_name} (max {current_checkpoint})")
if current_checkpoint == args.checkpoint:
raise SystemExit(f"Study {args.study_name} already at checkpoint {current_checkpoint}, nothing to do.")
target_checkpoint = args.checkpoint
else:
target_checkpoint = current_checkpoint - 1
for chkpt in range(current_checkpoint, target_checkpoint, -1):
db.delete_study(args.study_name, chkpt)
print(f"Study '{args.study_name}' reverted to checkpoint {target_checkpoint}")
def main(clargs):
''' Top level parser '''
usage = f'''study [COMMAND] [options]
Manage studies in the Spiral Variant DataBase (VDB).
Run any command with --help for additional information.
create {inspect.getdoc(CMDS['create'])}
list {inspect.getdoc(CMDS['list'])}
show {inspect.getdoc(CMDS['show'])}
add {inspect.getdoc(CMDS['add'])}
filter {inspect.getdoc(CMDS['filter'])}
export {inspect.getdoc(CMDS['export'])}
freeze {inspect.getdoc(CMDS['freeze'])}
unfreeze {inspect.getdoc(CMDS['unfreeze'])}
revert {inspect.getdoc(CMDS['revert'])}
delete {inspect.getdoc(CMDS['delete'])}
'''
parser = argparse.ArgumentParser(prog="study", usage=usage,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("cmd", metavar="COMMAND", choices=CMDS.keys(), type=str, help=argparse.SUPPRESS)
parser.add_argument("options", metavar="OPTIONS", nargs=argparse.REMAINDER, help=argparse.SUPPRESS)
parser.add_argument("--debug", action="store_true", help=argparse.SUPPRESS)
if len(sys.argv) == 3:
raise SystemExit(parser.print_help())
args = parser.parse_args(clargs)
setup_logging(debug_mode=args.debug, simple=True)
CMDS[args.cmd](args.options)
# top level command
CMD = 'biograph vdb study'
# module global CMDs
CMDS = {
'create': cmd_create,
'add': cmd_add,
'filter': cmd_filter,
'list': cmd_list,
'show': cmd_show,
'delete': cmd_delete,
'freeze': cmd_freeze,
'unfreeze': cmd_unfreeze,
'export': cmd_export,
'revert': cmd_revert,
}
if __name__ == '__main__':
try:
main(sys.argv[1:])
except KeyboardInterrupt:
raise SystemExit('\nAborted.')
| 1,197 | 0 | 53 |
4fd45db9c2e379d53922d350b4c817fe2011fd1f | 38 | py | Python | tests/core/backend_tests.py | LTD-Beget/cement | e885932583c14037599a2aa8a16d4d8a521364bd | [
"BSD-3-Clause"
] | null | null | null | tests/core/backend_tests.py | LTD-Beget/cement | e885932583c14037599a2aa8a16d4d8a521364bd | [
"BSD-3-Clause"
] | null | null | null | tests/core/backend_tests.py | LTD-Beget/cement | e885932583c14037599a2aa8a16d4d8a521364bd | [
"BSD-3-Clause"
] | null | null | null | """Tests for cement.core.backend."""
| 12.666667 | 36 | 0.657895 | """Tests for cement.core.backend."""
| 0 | 0 | 0 |
8038c2dcc55dc95a304ef67bf6c48b936f148f68 | 3,610 | py | Python | sdk/python/lib/pulumi/runtime/proto/language_pb2_grpc.py | goverdhan07/pulumi | 301efa60653c90047a3427af41339387223dbccd | [
"Apache-2.0"
] | 1 | 2021-11-23T21:49:18.000Z | 2021-11-23T21:49:18.000Z | sdk/python/lib/pulumi/runtime/proto/language_pb2_grpc.py | goverdhan07/pulumi | 301efa60653c90047a3427af41339387223dbccd | [
"Apache-2.0"
] | null | null | null | sdk/python/lib/pulumi/runtime/proto/language_pb2_grpc.py | goverdhan07/pulumi | 301efa60653c90047a3427af41339387223dbccd | [
"Apache-2.0"
] | null | null | null | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from . import language_pb2 as language__pb2
from . import plugin_pb2 as plugin__pb2
class LanguageRuntimeStub(object):
"""LanguageRuntime is the interface that the planning monitor uses to drive execution of an interpreter responsible
for confguring and creating resource objects.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetRequiredPlugins = channel.unary_unary(
"/pulumirpc.LanguageRuntime/GetRequiredPlugins",
request_serializer=language__pb2.GetRequiredPluginsRequest.SerializeToString,
response_deserializer=language__pb2.GetRequiredPluginsResponse.FromString,
)
self.Run = channel.unary_unary(
"/pulumirpc.LanguageRuntime/Run",
request_serializer=language__pb2.RunRequest.SerializeToString,
response_deserializer=language__pb2.RunResponse.FromString,
)
self.GetPluginInfo = channel.unary_unary(
"/pulumirpc.LanguageRuntime/GetPluginInfo",
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=plugin__pb2.PluginInfo.FromString,
)
class LanguageRuntimeServicer(object):
"""LanguageRuntime is the interface that the planning monitor uses to drive execution of an interpreter responsible
for confguring and creating resource objects.
"""
def GetRequiredPlugins(self, request, context):
"""GetRequiredPlugins computes the complete set of anticipated plugins required by a program."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def Run(self, request, context):
"""Run executes a program and returns its result."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetPluginInfo(self, request, context):
"""GetPluginInfo returns generic information about this plugin, like its version."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
| 43.493976 | 119 | 0.726593 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from . import language_pb2 as language__pb2
from . import plugin_pb2 as plugin__pb2
class LanguageRuntimeStub(object):
"""LanguageRuntime is the interface that the planning monitor uses to drive execution of an interpreter responsible
for confguring and creating resource objects.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetRequiredPlugins = channel.unary_unary(
"/pulumirpc.LanguageRuntime/GetRequiredPlugins",
request_serializer=language__pb2.GetRequiredPluginsRequest.SerializeToString,
response_deserializer=language__pb2.GetRequiredPluginsResponse.FromString,
)
self.Run = channel.unary_unary(
"/pulumirpc.LanguageRuntime/Run",
request_serializer=language__pb2.RunRequest.SerializeToString,
response_deserializer=language__pb2.RunResponse.FromString,
)
self.GetPluginInfo = channel.unary_unary(
"/pulumirpc.LanguageRuntime/GetPluginInfo",
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=plugin__pb2.PluginInfo.FromString,
)
class LanguageRuntimeServicer(object):
"""LanguageRuntime is the interface that the planning monitor uses to drive execution of an interpreter responsible
for confguring and creating resource objects.
"""
def GetRequiredPlugins(self, request, context):
"""GetRequiredPlugins computes the complete set of anticipated plugins required by a program."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def Run(self, request, context):
"""Run executes a program and returns its result."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetPluginInfo(self, request, context):
"""GetPluginInfo returns generic information about this plugin, like its version."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_LanguageRuntimeServicer_to_server(servicer, server):
rpc_method_handlers = {
"GetRequiredPlugins": grpc.unary_unary_rpc_method_handler(
servicer.GetRequiredPlugins,
request_deserializer=language__pb2.GetRequiredPluginsRequest.FromString,
response_serializer=language__pb2.GetRequiredPluginsResponse.SerializeToString,
),
"Run": grpc.unary_unary_rpc_method_handler(
servicer.Run,
request_deserializer=language__pb2.RunRequest.FromString,
response_serializer=language__pb2.RunResponse.SerializeToString,
),
"GetPluginInfo": grpc.unary_unary_rpc_method_handler(
servicer.GetPluginInfo,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=plugin__pb2.PluginInfo.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"pulumirpc.LanguageRuntime", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
| 1,053 | 0 | 23 |
340a6637d5d3b1008bf7f41fcad83627e2c6f06d | 2,447 | py | Python | tsdl/classifiers/poly.py | burgerdev/hostload | 93142628bb32923c5e6f3a8b791488d72a5c9077 | [
"MIT"
] | null | null | null | tsdl/classifiers/poly.py | burgerdev/hostload | 93142628bb32923c5e6f3a8b791488d72a5c9077 | [
"MIT"
] | null | null | null | tsdl/classifiers/poly.py | burgerdev/hostload | 93142628bb32923c5e6f3a8b791488d72a5c9077 | [
"MIT"
] | null | null | null |
import numpy as np
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from lazyflow.rtype import SubRegion
from lazyflow.operator import InputSlot
from .abcs import OpTrain
from .abcs import OpPredict
from tsdl.tools import Regression
| 32.197368 | 78 | 0.637515 |
import numpy as np
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from lazyflow.rtype import SubRegion
from lazyflow.operator import InputSlot
from .abcs import OpTrain
from .abcs import OpPredict
from tsdl.tools import Regression
class OpPolynomialTrain(OpTrain, Regression):
Degree = InputSlot(optional=True)
__degree = 3
def setupOutputs(self):
super(OpPolynomialTrain, self).setupOutputs()
if self.Degree.ready():
self.__degree = self.Degree.value
else:
# single underscore is from config, double is used later on
self.__degree = self._degree
@classmethod
def get_default_config(cls):
conf = super(OpPolynomialTrain, cls).get_default_config()
conf["degree"] = cls.__degree
return conf
def execute(self, slot, subindex, roi, result):
assert len(self.Train) == 2, "need data and target"
assert len(self.Valid) == 2, "need data and target"
assert roi.start[0] == 0
assert roi.stop[0] == 1
train = self.Train[0][...].wait().view(np.ndarray)
valid = self.Valid[0][...].wait().view(np.ndarray)
features = np.concatenate((train, valid), axis=0)
train = self.Train[1][...].wait().view(np.ndarray)
valid = self.Valid[1][...].wait().view(np.ndarray)
target = np.concatenate((train, valid), axis=0)
target = target.squeeze()
poly = PolynomialFeatures(degree=self.__degree)
# polynomial features contain column of 1's, no need for fit_intercept
linear = LinearRegression(fit_intercept=False)
model = Pipeline([("poly", poly), ("linear", linear)])
model.fit(features, target)
result[0] = model
class OpPolynomialPredict(OpPredict, Regression):
def execute(self, slot, subindex, roi, result):
start_t = roi.start[0]
stop_t = roi.stop[0]
start_c = 0
stop_c = self.Input.meta.shape[1]
new_roi = SubRegion(self.Input, start=(start_t, start_c),
stop=(stop_t, stop_c))
features = self.Input.get(new_roi).wait()
model = self.Classifier[...].wait()[0]
if not isinstance(model, Pipeline):
raise ValueError("unsupported model '{}'".format(type(model)))
result[:, 0] = model.predict(features)
| 1,838 | 205 | 72 |
c371bf194a11f9f676fde05a640618c87f3d85b8 | 2,632 | py | Python | slope/models/corefres/mentionpair/data.py | RyanElliott10/slope | 3247b04181cb6696978ce544382d97fa570b862d | [
"MIT"
] | 1 | 2020-10-07T04:33:54.000Z | 2020-10-07T04:33:54.000Z | slope/models/corefres/mentionpair/data.py | RyanElliott10/slope | 3247b04181cb6696978ce544382d97fa570b862d | [
"MIT"
] | 1 | 2021-08-31T04:10:05.000Z | 2021-08-31T04:10:05.000Z | slope/models/corefres/mentionpair/data.py | RyanElliott10/slope | 3247b04181cb6696978ce544382d97fa570b862d | [
"MIT"
] | null | null | null | from itertools import combinations, product
import pprint
from typing import List, Tuple
import numpy as np
from slope.utils.preco_parser import PreCoFileType, PreCoParser
MentionIndices = List[int]
RawMentionClusters = List[List[MentionIndices]]
pp = pprint.PrettyPrinter()
class MentionPairDataLoader(object):
'''
Splits data from PreCoParser into mention pairs (if training), extracts necessary features, and
allows iteration.
'''
def preprocess(self) -> List[MentionPair]:
'''
Converts parsed datga into training data.
'''
np_data = self.parsed_data.to_numpy()
combs: List[MentionPair] = []
for id, dp in enumerate(np_data[1:2]):
pp.pprint(dp[2])
combs.extend(self._build_pairs(dp[2], id))
return combs
def _build_pairs(self, clusters: RawMentionClusters, id: int) -> List[MentionPair]:
'''
Iterates through all mention clusters for a given datapoint/document and constructs a
combinatory matrix (of types) to produce true training data.
'''
combs: List[MentionPair] = []
for i, value in enumerate(clusters[:2]):
for j, sec in enumerate(clusters[i:]):
if j == 0:
# The "value" itself; coreferents
combs.extend([MentionPair(*el, True, MentionPair.make_id(id, i))
for el in list(combinations(value, 2))])
else:
combs.extend([MentionPair(*el, False, MentionPair.make_id(id, i))
for el in list(product(value, sec))])
return combs
| 31.710843 | 106 | 0.616261 | from itertools import combinations, product
import pprint
from typing import List, Tuple
import numpy as np
from slope.utils.preco_parser import PreCoFileType, PreCoParser
MentionIndices = List[int]
RawMentionClusters = List[List[MentionIndices]]
pp = pprint.PrettyPrinter()
class MentionPair(object):
def __init__(self, i_indices: MentionIndices, j_indices: MentionIndices, iscoreferent: bool, id: str):
self.i_indices = i_indices
self.j_indices = j_indices
self.iscoreferent = iscoreferent
self.id = id
@property
def sent_indices(self) -> Tuple[int, int]:
return (self.i_indices[0], self.j_indices[0])
def __str__(self) -> str:
return f'{self.i_indices} | {self.j_indices} {self.iscoreferent} : {self.id}'
def __repr__(self) -> str:
return str(self)
@staticmethod
def make_id(major: int, minor: int) -> str:
return f'{major}.{minor}'
class MentionPairDataLoader(object):
'''
Splits data from PreCoParser into mention pairs (if training), extracts necessary features, and
allows iteration.
'''
def __init__(self, filetype: PreCoFileType, singletons: bool):
parser = PreCoParser(filetype, singletons=singletons)
self.parsed_data = parser.data()
self.data = self.preprocess()
def preprocess(self) -> List[MentionPair]:
'''
Converts parsed datga into training data.
'''
np_data = self.parsed_data.to_numpy()
combs: List[MentionPair] = []
for id, dp in enumerate(np_data[1:2]):
pp.pprint(dp[2])
combs.extend(self._build_pairs(dp[2], id))
return combs
def _build_pairs(self, clusters: RawMentionClusters, id: int) -> List[MentionPair]:
'''
Iterates through all mention clusters for a given datapoint/document and constructs a
combinatory matrix (of types) to produce true training data.
'''
combs: List[MentionPair] = []
for i, value in enumerate(clusters[:2]):
for j, sec in enumerate(clusters[i:]):
if j == 0:
# The "value" itself; coreferents
combs.extend([MentionPair(*el, True, MentionPair.make_id(id, i))
for el in list(combinations(value, 2))])
else:
combs.extend([MentionPair(*el, False, MentionPair.make_id(id, i))
for el in list(product(value, sec))])
return combs
def __len__(self):
return len(self.data)
def __iter__(self):
pass
| 684 | 171 | 104 |
bcc5f415ab3d29c23264e830404576ecdb146419 | 4,816 | py | Python | doorman/webserver.py | fruch/doorman | 75bea9f5e776af6e8a0f1d5a27d8bb508bb9687d | [
"BSD-2-Clause"
] | 8 | 2016-10-23T13:00:06.000Z | 2021-03-05T13:03:24.000Z | doorman/webserver.py | fruch/doorman | 75bea9f5e776af6e8a0f1d5a27d8bb508bb9687d | [
"BSD-2-Clause"
] | null | null | null | doorman/webserver.py | fruch/doorman | 75bea9f5e776af6e8a0f1d5a27d8bb508bb9687d | [
"BSD-2-Clause"
] | 2 | 2019-05-23T13:12:47.000Z | 2021-02-15T18:34:33.000Z | import datetime
import json
import pymongo.errors
from bson import json_util
import pymongo
import pql
from flask import Flask, request, g
from flask_restful import Resource, Api, reqparse, abort
app = Flask(__name__)
api = Api(app)
app.config.from_pyfile('config.py', silent=True)
def get_resources_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'resources'):
g.resources = pymongo.MongoClient(app.config['MONGO_URL']).db.resources
g.resources.ensure_index('id', unique=True)
return g.resources
post_parser = reqparse.RequestParser()
post_parser.add_argument(
'username', dest='username',
required=True,
help='The user\'s username',
)
post_parser.add_argument(
'resource', dest='resource',
required=False,
help='The resource to lock',
)
post_parser.add_argument(
'duration', dest='duration',
help="for how much time the resource will be saved"
)
post_parser.add_argument(
'q', dest='query',
required=False,
help='query to use',
)
query_parser = reqparse.RequestParser()
query_parser.add_argument(
'q', dest='query',
required=False,
help='query to use',
)
api.add_resource(LockedResourceList, '/resources')
api.add_resource(LockedResource, '/resource/<string:id>')
api.add_resource(Lock, '/lock', '/lock/<string:id>')
| 31.477124 | 121 | 0.61711 | import datetime
import json
import pymongo.errors
from bson import json_util
import pymongo
import pql
from flask import Flask, request, g
from flask_restful import Resource, Api, reqparse, abort
app = Flask(__name__)
api = Api(app)
app.config.from_pyfile('config.py', silent=True)
def get_resources_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'resources'):
g.resources = pymongo.MongoClient(app.config['MONGO_URL']).db.resources
g.resources.ensure_index('id', unique=True)
return g.resources
post_parser = reqparse.RequestParser()
post_parser.add_argument(
'username', dest='username',
required=True,
help='The user\'s username',
)
post_parser.add_argument(
'resource', dest='resource',
required=False,
help='The resource to lock',
)
post_parser.add_argument(
'duration', dest='duration',
help="for how much time the resource will be saved"
)
post_parser.add_argument(
'q', dest='query',
required=False,
help='query to use',
)
query_parser = reqparse.RequestParser()
query_parser.add_argument(
'q', dest='query',
required=False,
help='query to use',
)
class LockedResource(Resource):
@staticmethod
def put(id):
resources = get_resources_db()
try:
obj = resources.insert(dict(id=id, data = request.get_json()))
obj = resources.find_one(dict(id=id))
return json.loads(json.dumps(obj, sort_keys=True, indent=4, default=json_util.default))
except pymongo.errors.DuplicateKeyError:
abort(405, message="key[%s] already in use" % id)
@staticmethod
def patch(id):
resources = get_resources_db()
obj = resources.update_one(dict(id=id),
{"$set":
dict({"data.%s" % k : v for k, v in request.get_json().items()})
})
obj = resources.find_one(dict(id=id))
if not obj:
abort(404, message="%s not found" % id)
return json.loads(json.dumps(obj, sort_keys=True, indent=4, default=json_util.default))
@staticmethod
def get(id):
resources = get_resources_db()
obj = resources.find_one(dict(id=id))
return json.loads(json.dumps(obj, sort_keys=True, indent=4, default=json_util.default))
@staticmethod
def delete(id):
resources = get_resources_db()
obj = resources.remove(dict(id=id))
if obj['n'] < 1:
abort(404, message="key[%s] wasn't found" % id)
return "deleted"
class LockedResourceList(Resource):
def get(self):
resources = get_resources_db()
args = query_parser.parse_args()
if args.query:
q = pql.find(args.query)
else:
q = None
objs = resources.find(q)
objs = [o for o in objs]
if not objs:
abort(404, message="'%s' wasn't found" % args.query)
return json.loads(json.dumps(objs, sort_keys=True, indent=4, default=json_util.default))
class Lock(Resource):
@staticmethod
def post():
resources = get_resources_db()
args = post_parser.parse_args()
if args.query:
query = pql.find(args.query)
else:
query = dict(id=args.resource)
obj = resources.find_one(query)
if not obj:
abort(404, message="resource[%s] wasn't found" % args.resource)
if obj.get('locked_by', None):
abort(405, message="locked by %s" % obj['locked_by'])
duration = args.duration if args.duration else 0
lock_endtime = datetime.datetime.now() + datetime.timedelta(minutes=duration)
res = resources.update_one({'_id': obj['_id']}, {"$set": {'locked_by': args.username, 'lock_end': lock_endtime}})
obj = resources.find_one(dict(id=args.resource))
return json.loads(json.dumps(obj, sort_keys=True, indent=4, default=json_util.default))
@staticmethod
def delete(id):
resources = get_resources_db()
obj = resources.find_one(dict(id=id))
if not obj:
abort(404, message="resource[%s] wasn't found" % id)
if obj.get('locked_by', None):
res = resources.update_one({'_id': obj['_id']}, {"$unset": {'locked_by': '', 'lock_end': ''}})
obj = resources.find_one(dict(id=id))
return json.loads(json.dumps(obj, sort_keys=True, indent=4, default=json_util.default))
else:
abort(404, message="resource[%s] is not locked" % id)
api.add_resource(LockedResourceList, '/resources')
api.add_resource(LockedResource, '/resource/<string:id>')
api.add_resource(Lock, '/lock', '/lock/<string:id>')
| 3,032 | 292 | 95 |
05240aa8c3b464addff9da5629d2e9178b976d59 | 2,229 | py | Python | examples/hireps/pendulum/pendulum_cart.py | hanyas/reps | 447c461b89dec516ce3368d841cfe9734be78199 | [
"MIT"
] | 8 | 2021-06-21T18:58:56.000Z | 2021-12-13T09:47:41.000Z | examples/hireps/pendulum/pendulum_cart.py | hanyas/reps | 447c461b89dec516ce3368d841cfe9734be78199 | [
"MIT"
] | null | null | null | examples/hireps/pendulum/pendulum_cart.py | hanyas/reps | 447c461b89dec516ce3368d841cfe9734be78199 | [
"MIT"
] | 1 | 2021-06-29T04:42:45.000Z | 2021-06-29T04:42:45.000Z | import numpy as np
import gym
import scipy as sc
from scipy.special import comb
import torch
from sds.distributions.gamma import Gamma
from sds.models import HybridController
from reps.hireps import hbREPS
# np.random.seed(1337)
# torch.manual_seed(1337)
torch.set_num_threads(1)
env = gym.make('Pendulum-RL-v1')
env._max_episode_steps = 5000
env.unwrapped.dt = 0.02
env.unwrapped.sigma = 1e-4
# env.seed(1337)
state_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
dyn = torch.load(open('./rarhmm_pendulum_cart.pkl', 'rb'))
nb_modes = dyn.nb_states
# ctl type
ctl_type = 'ard'
ctl_degree = 3
# ctl_prior
feat_dim = int(comb(ctl_degree + state_dim, ctl_degree)) - 1
input_dim = feat_dim + 1
output_dim = act_dim
likelihood_precision_prior = Gamma(dim=1, alphas=np.ones((1,)) + 1e-8,
betas=25. * np.ones((1,)))
parameter_precision_prior = Gamma(dim=input_dim, alphas=np.ones((input_dim,)) + 1e-8,
betas=1e1 * np.ones((input_dim,)))
ctl_prior = {'likelihood_precision_prior': likelihood_precision_prior,
'parameter_precision_prior': parameter_precision_prior}
ctl_kwargs = {'degree': ctl_degree}
ctl = HybridController(dynamics=dyn, ctl_type=ctl_type,
ctl_prior=ctl_prior, ctl_kwargs=ctl_kwargs)
# init controller
Ks = np.stack([np.zeros((output_dim, input_dim))] * nb_modes, axis=0)
lmbdas = np.stack([1. / 25. * np.eye(output_dim)] * nb_modes, axis=0)
ctl.controls.params = Ks, lmbdas
hbreps = hbREPS(env=env, dyn=dyn, ctl=ctl,
kl_bound=0.1, discount=0.985,
scale=[1., 1., 8.0], mult=0.5,
nb_vfeat=75, vf_reg=1e-8)
ctl_mstep_kwargs = {'nb_iter': 5}
hbreps.run(nb_iter=15, nb_train_samples=5000,
nb_eval_rollouts=25, nb_eval_steps=250,
ctl_mstep_kwargs=ctl_mstep_kwargs,
iterative=False)
rollouts, _ = hbreps.evaluate(nb_rollouts=25, nb_steps=250)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(nrows=1, ncols=hbreps.state_dim + hbreps.act_dim, figsize=(12, 4))
for roll in rollouts:
for k, col in enumerate(ax[:-1]):
col.plot(roll['x'][:, k])
ax[-1].plot(roll['uc'])
plt.show()
| 29.72 | 89 | 0.677882 | import numpy as np
import gym
import scipy as sc
from scipy.special import comb
import torch
from sds.distributions.gamma import Gamma
from sds.models import HybridController
from reps.hireps import hbREPS
# np.random.seed(1337)
# torch.manual_seed(1337)
torch.set_num_threads(1)
env = gym.make('Pendulum-RL-v1')
env._max_episode_steps = 5000
env.unwrapped.dt = 0.02
env.unwrapped.sigma = 1e-4
# env.seed(1337)
state_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
dyn = torch.load(open('./rarhmm_pendulum_cart.pkl', 'rb'))
nb_modes = dyn.nb_states
# ctl type
ctl_type = 'ard'
ctl_degree = 3
# ctl_prior
feat_dim = int(comb(ctl_degree + state_dim, ctl_degree)) - 1
input_dim = feat_dim + 1
output_dim = act_dim
likelihood_precision_prior = Gamma(dim=1, alphas=np.ones((1,)) + 1e-8,
betas=25. * np.ones((1,)))
parameter_precision_prior = Gamma(dim=input_dim, alphas=np.ones((input_dim,)) + 1e-8,
betas=1e1 * np.ones((input_dim,)))
ctl_prior = {'likelihood_precision_prior': likelihood_precision_prior,
'parameter_precision_prior': parameter_precision_prior}
ctl_kwargs = {'degree': ctl_degree}
ctl = HybridController(dynamics=dyn, ctl_type=ctl_type,
ctl_prior=ctl_prior, ctl_kwargs=ctl_kwargs)
# init controller
Ks = np.stack([np.zeros((output_dim, input_dim))] * nb_modes, axis=0)
lmbdas = np.stack([1. / 25. * np.eye(output_dim)] * nb_modes, axis=0)
ctl.controls.params = Ks, lmbdas
hbreps = hbREPS(env=env, dyn=dyn, ctl=ctl,
kl_bound=0.1, discount=0.985,
scale=[1., 1., 8.0], mult=0.5,
nb_vfeat=75, vf_reg=1e-8)
ctl_mstep_kwargs = {'nb_iter': 5}
hbreps.run(nb_iter=15, nb_train_samples=5000,
nb_eval_rollouts=25, nb_eval_steps=250,
ctl_mstep_kwargs=ctl_mstep_kwargs,
iterative=False)
rollouts, _ = hbreps.evaluate(nb_rollouts=25, nb_steps=250)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(nrows=1, ncols=hbreps.state_dim + hbreps.act_dim, figsize=(12, 4))
for roll in rollouts:
for k, col in enumerate(ax[:-1]):
col.plot(roll['x'][:, k])
ax[-1].plot(roll['uc'])
plt.show()
| 0 | 0 | 0 |
18c63db8476e3140408ff6aba738699a1ebfd1e2 | 3,660 | py | Python | Algorithm.Python/DividendAlgorithm.py | albertozatton/quant | 17381cb2520de0fbad328a7d7412a6e5aacb2e19 | [
"Apache-2.0"
] | 1 | 2022-03-11T09:30:09.000Z | 2022-03-11T09:30:09.000Z | Algorithm.Python/DividendAlgorithm.py | vmanjunathan/Lean | ff783a4d3abd38e6f56c8f9d55804175a421a021 | [
"Apache-2.0"
] | null | null | null | Algorithm.Python/DividendAlgorithm.py | vmanjunathan/Lean | ff783a4d3abd38e6f56c8f9d55804175a421a021 | [
"Apache-2.0"
] | 1 | 2020-08-09T21:36:47.000Z | 2020-08-09T21:36:47.000Z | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Brokerages import *
from QuantConnect.Data import *
from QuantConnect.Data.Market import *
from QuantConnect.Orders import *
class DividendAlgorithm(QCAlgorithm):
'''Showcases the dividend and split event of QCAlgorithm
The data for this algorithm isn't in the github repo, so this will need to be run on the QC site'''
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(1998,01,01) #Set Start Date
self.SetEndDate(2006,01,21) #Set End Date
self.SetCash(100000) #Set Strategy Cash
# Find more symbols here: http://quantconnect.com/data
self.AddSecurity(SecurityType.Equity, "MSFT", Resolution.Daily)
self.Securities["MSFT"].SetDataNormalizationMode(DataNormalizationMode.Raw)
# this will use the Tradier Brokerage open order split behavior
# forward split will modify open order to maintain order value
# reverse split open orders will be cancelled
self.SetBrokerageModel(BrokerageName.TradierBrokerage)
def OnData(self, data):
'''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.
Arguments:
data: Slice object keyed by symbol containing the stock data
'''
if self.Transactions.OrdersCount == 0:
self.SetHoldings("MSFT", .5)
# place some orders that won't fill, when the split comes in they'll get modified to reflect the split
self.Debug("Purchased Stock: {0}".format(self.Securities["MSFT"].Price))
self.StopMarketOrder("MSFT", -self.CalculateOrderQuantity("MSFT", .25), data["MSFT"].Low/2)
self.LimitOrder("MSFT", -self.CalculateOrderQuantity("MSFT", .25), data["MSFT"].High*2)
for kvp in data.Dividends: # update this to Dividends dictionary
symbol = kvp.Key
value = kvp.Value.Distribution
self.Log("{0} >> DIVIDEND >> {1} - {2} - {3} - {4}".format(self.Time, symbol, value, self.Portfolio.Cash, self.Portfolio["MSFT"].Price))
for kvp in data.Splits: # update this to Splits dictionary
symbol = kvp.Key
value = kvp.Value.SplitFactor
self.Log("{0} >> SPLIT >> {1} - {2} - {3} - {4}".format(self.Time, symbol, value, self.Portfolio.Cash, self.Portfolio["MSFT"].Quantity))
| 48.8 | 151 | 0.694809 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Brokerages import *
from QuantConnect.Data import *
from QuantConnect.Data.Market import *
from QuantConnect.Orders import *
class DividendAlgorithm(QCAlgorithm):
'''Showcases the dividend and split event of QCAlgorithm
The data for this algorithm isn't in the github repo, so this will need to be run on the QC site'''
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(1998,01,01) #Set Start Date
self.SetEndDate(2006,01,21) #Set End Date
self.SetCash(100000) #Set Strategy Cash
# Find more symbols here: http://quantconnect.com/data
self.AddSecurity(SecurityType.Equity, "MSFT", Resolution.Daily)
self.Securities["MSFT"].SetDataNormalizationMode(DataNormalizationMode.Raw)
# this will use the Tradier Brokerage open order split behavior
# forward split will modify open order to maintain order value
# reverse split open orders will be cancelled
self.SetBrokerageModel(BrokerageName.TradierBrokerage)
def OnData(self, data):
'''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.
Arguments:
data: Slice object keyed by symbol containing the stock data
'''
if self.Transactions.OrdersCount == 0:
self.SetHoldings("MSFT", .5)
# place some orders that won't fill, when the split comes in they'll get modified to reflect the split
self.Debug("Purchased Stock: {0}".format(self.Securities["MSFT"].Price))
self.StopMarketOrder("MSFT", -self.CalculateOrderQuantity("MSFT", .25), data["MSFT"].Low/2)
self.LimitOrder("MSFT", -self.CalculateOrderQuantity("MSFT", .25), data["MSFT"].High*2)
for kvp in data.Dividends: # update this to Dividends dictionary
symbol = kvp.Key
value = kvp.Value.Distribution
self.Log("{0} >> DIVIDEND >> {1} - {2} - {3} - {4}".format(self.Time, symbol, value, self.Portfolio.Cash, self.Portfolio["MSFT"].Price))
for kvp in data.Splits: # update this to Splits dictionary
symbol = kvp.Key
value = kvp.Value.SplitFactor
self.Log("{0} >> SPLIT >> {1} - {2} - {3} - {4}".format(self.Time, symbol, value, self.Portfolio.Cash, self.Portfolio["MSFT"].Quantity))
def OnOrderEvent(self, orderEvent):
# orders get adjusted based on split events to maintain order value
order = self.Transactions.GetOrderById(orderEvent.OrderId)
self.Log("{0} >> ORDER >> {1}".format(self.Time, order)) | 222 | 0 | 27 |
176d5912fea1a5af11e2bc7beb6f9b3f56c0ae8d | 2,730 | py | Python | adept/__init__.py | fictivekin/adept-python | 56dad62922badd238fd1614fc27ba3ad31380446 | [
"MIT"
] | 1 | 2019-01-14T11:50:04.000Z | 2019-01-14T11:50:04.000Z | adept/__init__.py | fictivekin/adept-python | 56dad62922badd238fd1614fc27ba3ad31380446 | [
"MIT"
] | null | null | null | adept/__init__.py | fictivekin/adept-python | 56dad62922badd238fd1614fc27ba3ad31380446 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import hashlib
import hmac
from urllib import quote as urlquote
from .errors import OperationError, AccountError
| 30.333333 | 110 | 0.545788 | # -*- coding: utf-8 -*-
import hashlib
import hmac
from urllib import quote as urlquote
from .errors import OperationError, AccountError
class Adept(object):
def __init__(self, account_id, account_key, cloudfront_hostname, default_bucket=None):
if account_id is not None:
self.account_id = account_id
else:
raise AccountError('Please provide a valid account ID')
if account_key is not None:
self.account_key = account_key
else:
raise AccountError('Please provide a valid account key')
if cloudfront_hostname is not None:
self.cloudfront_hostname = cloudfront_hostname
else:
raise AccountError('Please provide a valid CloudFront hostname')
if default_bucket is not None:
self.default_bucket = default_bucket
def _generate_hash(self, path):
request_hash = hmac.new(
self.account_key,
path,
hashlib.sha1
)
return request_hash.hexdigest()
def generate_url(self, bucket=None, asset_key=None, asset_url=None, operations=['identity'], secure=True):
"""
Given an S3 key and bucket/URL, perform given image operations on an image and return the URL.
"""
if len(operations) < 1:
raise OperationError('You didn\'t provide any operations to perform on the image')
protocol = 'https://' if secure else 'http://'
base_url = '%s%s' % (protocol, self.cloudfront_hostname)
if asset_key is not None:
if bucket is None:
if self.default_bucket is None:
raise OperationError('No S3 bucket has been provided.')
else:
bucket = self.default_bucket
path = '/%s/%s/%s/%s' % (
bucket,
asset_key,
'/'.join(operations),
self.account_id,
)
request_hash = self._generate_hash(path)
adept_url = ('%s%s/%s' % (
base_url,
path,
request_hash)
)
elif asset_url is not None:
path = '/loader/%s/%s' % (
'/'.join(operations),
self.account_id
)
loader_uri = '%s?url=%s' % (path, asset_url)
request_hash = self._generate_hash(loader_uri)
adept_url = '%s%s/%s?url=%s' % (
base_url,
path,
request_hash,
urlquote(asset_url)
)
else:
raise OperationError("Asset key or URL must be provided")
return adept_url
| 844 | 1,725 | 23 |
99714978b0b817fb700292a492f8900f1517ebec | 4,771 | py | Python | bash_to_python/tail.py | blakfeld/Bash-To-Python | d6a56dedbce1314e33e9773fa7039f25fccef1c3 | [
"MIT"
] | null | null | null | bash_to_python/tail.py | blakfeld/Bash-To-Python | d6a56dedbce1314e33e9773fa7039f25fccef1c3 | [
"MIT"
] | null | null | null | bash_to_python/tail.py | blakfeld/Bash-To-Python | d6a56dedbce1314e33e9773fa7039f25fccef1c3 | [
"MIT"
] | null | null | null | """
tail.py -- Emulate UNIX tail.
Author: Corwin Brown
E-Mail: blakfeld@gmail.com
Date: 5/24/2015
"""
import sys
import time
| 29.269939 | 76 | 0.518759 | """
tail.py -- Emulate UNIX tail.
Author: Corwin Brown
E-Mail: blakfeld@gmail.com
Date: 5/24/2015
"""
import sys
import time
class Tail(object):
BLOCK_SIZE = 512
SLEEP_INTERVAL = 1.0
def __init__(self, stdin=None, fname=None, num_lines=10, watch=False):
"""
Constructor
Args:
args (list): Any command line arguments.
"""
self.stdin = stdin
self.fname = fname
self.num_lines = num_lines
self.watch = watch
def run(self):
"""
Emulate UNIX tail
"""
if self.stdin:
self._tail_stdin(self.stdin, self.num_lines)
return
if isinstance(self.fname, list):
if len(self.fname) > 1:
for f in self.fname:
self._validate_file(f)
print('\n==> {} <=='.format(f))
self._tail_lines(f, self.num_lines)
return
else:
self._validate_file(self.fname[0])
if self.watch:
self._tail_watch(self.fname[0], self.num_lines)
else:
self._tail_lines(self.fname[0], self.num_lines)
return
else:
self._validate_file(self.fname)
if self.watch:
self._tail_watch(self.fname, self.num_lines)
else:
self._tail_lines(self.fname, self.num_lines)
return
def _tail_lines(self, fname, num_lines=10):
"""
Print the last n lines of a file.
Here we will navigate to the end of a file, then march backwards
at some interval (defined by self.BLOCK_SIZE), then count the
number of newlines we have. Once we have greater than or equal
newlines, truncate off any extra, and return.
Args:
fname (str): The file to tail.
num_lines (int): The number of lines from the bottom to display.
"""
block_number = -1
lines_to_go = num_lines
blocks = []
with open(fname, 'r') as f:
f.seek(0, 2)
# Mark the ending byte, so we don't try to read past it.
file_end_byte = f.tell()
while lines_to_go > 0 and file_end_byte > 0:
# If we aren't at the end, backup and read a new block.
if file_end_byte - self.BLOCK_SIZE > 0:
f.seek(self.BLOCK_SIZE * block_number, 2)
blocks.append(f.read(self.BLOCK_SIZE))
else:
f.seek(0, 0)
blocks.append(f.read(file_end_byte))
# Count the number of newlines to see how many lines we have
# left to find.
lines_to_go -= blocks[-1].count('\n')
block_number -= 1
# Reverse the output so we get top to bottom.
tail_text = ''.join(reversed(blocks))
# Truncate off any extra lines and return.
sys.stdout.write('\n'.join(tail_text.splitlines()[-num_lines:]))
def _tail_stdin(self, stdin, num_lines=10):
"""
Print the last n lines of stdin.
Args:
stdin (str): Essentially the output of 'sys.stdin.read()'
"""
sys.stdout.write('\n'.join(stdin.split('\n')[-num_lines:]))
def _tail_watch(self, fname, num_lines=10):
"""
Continuously watch a file and print all new lines.
We already have a function to print the last n lines, lets just call
that, then reopen that file and watch it. It could be more
efficient, but file handles are pretty cheap.
Args:
fname (str): The file to watch.
"""
self._tail_lines(fname, num_lines)
with open(fname, 'r') as f:
f.seek(0, 2)
while True:
# Find the end of the file, see if it contains any data,
# if not, sleep, then seek back to that same point, and
# check again.
file_end_byte = f.tell()
line = f.readline()
if not line:
time.sleep(self.SLEEP_INTERVAL)
f.seek(file_end_byte)
else:
sys.stdout.write(line)
def _validate_file(self, fname):
"""
Ensure that the provided file exists.
Args:
fname (str): The file to validate.
Raises:
ValueError: If fname does not exist.
"""
if not os.path.exists(fname):
raise ValueError('tail: {}: No such file or directory.'
.format(fname))
pass
| 0 | 4,620 | 23 |
e7bd25abfac8f18d2b26d78c00a893195783ee97 | 822 | py | Python | KernelGAN/blur_images.py | geopi1/Improved_USRNet | 06395641c30f3df2986cf70f7ceee6c9a5bc0aa9 | [
"MIT"
] | 20 | 2020-08-24T07:21:30.000Z | 2021-07-09T04:20:06.000Z | KernelGAN/blur_images.py | geopi1/Improved_USRNet | 06395641c30f3df2986cf70f7ceee6c9a5bc0aa9 | [
"MIT"
] | 1 | 2020-10-21T08:02:18.000Z | 2021-01-17T23:29:58.000Z | KernelGAN/blur_images.py | geopi1/Improved_USRNet | 06395641c30f3df2986cf70f7ceee6c9a5bc0aa9 | [
"MIT"
] | 3 | 2020-11-19T05:17:10.000Z | 2021-05-04T11:01:10.000Z | import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
from scipy.ndimage.filters import gaussian_filter
img = cv.imread('/home/george/PycharmProjects/Statistical_im_proc/KernelGAN/test.jpg')
kernel = np.ones((17,17),np.float32)/289
dst = cv.filter2D(img,-1,kernel)
g_img = gaussian_filter(img, [1,10,0])
cv.imwrite('/home/george/PycharmProjects/Statistical_im_proc/KernelGAN/images/blur.png', dst)
cv.imwrite('/home/george/PycharmProjects/Statistical_im_proc/KernelGAN/images/g_blur.png', g_img)
plt.subplot(131),plt.imshow(img[:,:,::-1]),plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(132),plt.imshow(dst[:,:,::-1]),plt.title('Averaging')
plt.xticks([]), plt.yticks([])
plt.subplot(133),plt.imshow(g_img[:,:,::-1]),plt.title('Averaging')
plt.xticks([]), plt.yticks([])
plt.show() | 37.363636 | 97 | 0.733577 | import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
from scipy.ndimage.filters import gaussian_filter
img = cv.imread('/home/george/PycharmProjects/Statistical_im_proc/KernelGAN/test.jpg')
kernel = np.ones((17,17),np.float32)/289
dst = cv.filter2D(img,-1,kernel)
g_img = gaussian_filter(img, [1,10,0])
cv.imwrite('/home/george/PycharmProjects/Statistical_im_proc/KernelGAN/images/blur.png', dst)
cv.imwrite('/home/george/PycharmProjects/Statistical_im_proc/KernelGAN/images/g_blur.png', g_img)
plt.subplot(131),plt.imshow(img[:,:,::-1]),plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(132),plt.imshow(dst[:,:,::-1]),plt.title('Averaging')
plt.xticks([]), plt.yticks([])
plt.subplot(133),plt.imshow(g_img[:,:,::-1]),plt.title('Averaging')
plt.xticks([]), plt.yticks([])
plt.show() | 0 | 0 | 0 |
b148aa310011894324b56d871905e3d3bdc30a06 | 2,550 | py | Python | examples/sample_iso_cliques.py | AtsushiHashimoto/isolated_clique_enumeration | 9ff817f57d929b5db07265fe6d85013c13825869 | [
"MIT"
] | null | null | null | examples/sample_iso_cliques.py | AtsushiHashimoto/isolated_clique_enumeration | 9ff817f57d929b5db07265fe6d85013c13825869 | [
"MIT"
] | null | null | null | examples/sample_iso_cliques.py | AtsushiHashimoto/isolated_clique_enumeration | 9ff817f57d929b5db07265fe6d85013c13825869 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import sys
sys.path.append('../isoclique')
import isoclique as ic
import networkx as nx
import matplotlib.pyplot as plt
import random
import math
import time
if __name__ == '__main__':
E = nx.karate_club_graph().edges()
start = time.time()
ic_graph = ic.IsolatedCliques(E)
elapsed_time = time.time()-start
print("%.5f sec. elapsed for graph sorting."%elapsed_time)
nodes = ic_graph.nodes()
edges = ic_graph.edges()
for v,neigh in zip(nodes,edges):
print(v,": ", neigh)
isolation_factor = 2
start = time.time()
# pivots, iso_cliques = ic_graph.enumerate(isolation_factor=isolation_factor)
pivots, iso_cliques = ic_graph.enumerate(callback=callback)
elapsed_time = time.time()-start
print("%.5f sec. elapsed for enumeration."%elapsed_time)
print("Isolated Cliques")
for pivot, ic in zip(pivots,iso_cliques):
stats = ic_graph.evaluate_subgraph(ic)
print("Pivot: ",pivot, " => [",ic,"]") # ic_graph.decode(ic,1)
# _ics = ic_graph.enumerate_blute(isolation_factor=isolation_factor, at_most=-1)
_ics = ic_graph.enumerate_blute(callback=callback, at_most=-1)
for ic in _ics:
stats = ic_graph.evaluate_subgraph(ic)
print(ic) # ic_graph.decode(ic,1)
sys.exit()
# drawing
rand_colors = generate_random_color_list(len(cliques))
pos=nx.spring_layout(G) # positions for all nodes
node_list = set(G.nodes())
edge_list = set(G.edges())
for i in range(len(cliques)):
H = G.subgraph(cliques[i])
nx.draw_networkx_nodes(H,pos,
nodelist=cliques[i],
node_color=rand_colors[i])
print(H.edges())
nx.draw_networkx_edges(H,pos,
edge_list=H.edges(),
edge_color=rand_colors[i],
width=4)
node_list = node_list - set(cliques[i])
edge_list = edge_list - set(H.edges())
nx.draw_networkx_nodes(H,pos,nodelist=node_list,node_color="#808080")
nx.draw_networkx_edges(H,pos,edgelist=edge_list,edge_color="#808080")
plt.show()
| 28.651685 | 83 | 0.626275 | #!/usr/bin/env python
# coding: utf-8
import sys
sys.path.append('../isoclique')
import isoclique as ic
import networkx as nx
import matplotlib.pyplot as plt
import random
import math
import time
def generate_random_color():
return '#{:X}{:X}{:X}'.format(*[random.randint(16, 255) for _ in range(3)])
def generate_random_color_list(num):
colors = [None]*num
for i in range(num):
colors[i] = generate_random_color()
return colors
if __name__ == '__main__':
E = nx.karate_club_graph().edges()
start = time.time()
ic_graph = ic.IsolatedCliques(E)
elapsed_time = time.time()-start
print("%.5f sec. elapsed for graph sorting."%elapsed_time)
nodes = ic_graph.nodes()
edges = ic_graph.edges()
for v,neigh in zip(nodes,edges):
print(v,": ", neigh)
isolation_factor = 2
def callback(k):
return isolation_factor*math.log(k)
start = time.time()
# pivots, iso_cliques = ic_graph.enumerate(isolation_factor=isolation_factor)
pivots, iso_cliques = ic_graph.enumerate(callback=callback)
elapsed_time = time.time()-start
print("%.5f sec. elapsed for enumeration."%elapsed_time)
print("Isolated Cliques")
for pivot, ic in zip(pivots,iso_cliques):
stats = ic_graph.evaluate_subgraph(ic)
print("Pivot: ",pivot, " => [",ic,"]") # ic_graph.decode(ic,1)
# _ics = ic_graph.enumerate_blute(isolation_factor=isolation_factor, at_most=-1)
_ics = ic_graph.enumerate_blute(callback=callback, at_most=-1)
for ic in _ics:
stats = ic_graph.evaluate_subgraph(ic)
print(ic) # ic_graph.decode(ic,1)
sys.exit()
# drawing
rand_colors = generate_random_color_list(len(cliques))
pos=nx.spring_layout(G) # positions for all nodes
node_list = set(G.nodes())
edge_list = set(G.edges())
for i in range(len(cliques)):
H = G.subgraph(cliques[i])
nx.draw_networkx_nodes(H,pos,
nodelist=cliques[i],
node_color=rand_colors[i])
print(H.edges())
nx.draw_networkx_edges(H,pos,
edge_list=H.edges(),
edge_color=rand_colors[i],
width=4)
node_list = node_list - set(cliques[i])
edge_list = edge_list - set(H.edges())
nx.draw_networkx_nodes(H,pos,nodelist=node_list,node_color="#808080")
nx.draw_networkx_edges(H,pos,edgelist=edge_list,edge_color="#808080")
plt.show()
| 252 | 0 | 71 |
10e2930227e6a62d0a350d6fdef2abb269d54845 | 4,920 | py | Python | run_ensemble.py | gnkm/kaggle.house-prices | 5fd1fcc9fcf8e191c0729ab6244b01180d44f535 | [
"MIT"
] | null | null | null | run_ensemble.py | gnkm/kaggle.house-prices | 5fd1fcc9fcf8e191c0729ab6244b01180d44f535 | [
"MIT"
] | null | null | null | run_ensemble.py | gnkm/kaggle.house-prices | 5fd1fcc9fcf8e191c0729ab6244b01180d44f535 | [
"MIT"
] | null | null | null | import argparse
from datetime import datetime as dt
from lightgbm import LGBMRegressor
import numpy as np
import pandas as pd
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor, StackingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.metrics import r2_score
from sklearn.model_selection import KFold
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
import yaml
# from models import lgbm as my_lgbm
from cv import r2_cv
from param_tuning.optimizer import ENetOptimizer, LassoOptimizer, LGBMRegressorOptimizer
from preprocessing import load_x, load_y
from utils import print_exit, print_float
# Don't define any function in this file,
# thus don't define main function.
# use var `now` in config file and submit file.
now = dt.now().strftime('%Y-%m-%d-%H-%M-%S')
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./configs/default.yml')
options = parser.parse_args()
with open(options.config, 'r') as file:
config = yaml.safe_load(file)
features = config['extracted_features']
col_id_name = config['col_id_name']
col_target_name = config['col_target_name']
dropped_ids = config['dropped_ids']
random_state = config['random_state']
n_folds = config['cv']['n_folds']
hyper_parameters = config['params']
Xs = load_x(features, dropped_ids)
X_train_all = Xs['train']
X_test = Xs['test']
y_train_all = load_y(col_id_name, col_target_name, dropped_ids)
# @todo: Modify preprocessor
X_test = X_test.fillna(X_test.mean())
# Lasso
lasso_with_param_candidates = make_pipeline(
RobustScaler(),
Lasso(
random_state=random_state
)
)
lasso_optimizer = LassoOptimizer(
lasso_with_param_candidates,
X_train_all,
y_train_all,
n_folds,
hyper_parameters['lasso']['candidates'],
)
lasso_best_params = lasso_optimizer.optimize()
lasso = make_pipeline(
RobustScaler(),
Lasso(
alpha=lasso_best_params['lasso__alpha'],
random_state=random_state,
)
)
# Elasticnet
enet_with_param_candidates = make_pipeline(
RobustScaler(),
ElasticNet(
random_state=random_state
)
)
enet_optimizer = ENetOptimizer(
enet_with_param_candidates,
X_train_all,
y_train_all,
n_folds,
hyper_parameters['enet']['candidates']
)
enet_best_params = enet_optimizer.optimize()
ENet = make_pipeline(
RobustScaler(),
ElasticNet(
alpha=enet_best_params['elasticnet__alpha'],
l1_ratio=enet_best_params['elasticnet__l1_ratio'],
random_state=random_state,
)
)
KRR = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5)
GBoost = GradientBoostingRegressor(
n_estimators=3000,
learning_rate=0.05,
max_depth=4,
max_features='sqrt',
min_samples_leaf=15,
min_samples_split=10,
loss='huber',
random_state =5
)
lgbm_instance_params = hyper_parameters['lgbm']['instance']
lgbm_regressor_with_param_candidates = LGBMRegressor(
random_state=random_state,
silent=lgbm_instance_params['silent'],
)
lgbm_optimizer = LGBMRegressorOptimizer(
lgbm_regressor_with_param_candidates,
X_train_all,
y_train_all,
n_folds,
hyper_parameters['lgbm']['candidates']
)
lgbm_best_params = lgbm_optimizer.optimize()
lgbm_regressor_with_optimized_params = LGBMRegressor(
boosting_type=lgbm_best_params['boosting_type'],
learning_rate=lgbm_best_params['learning_rate'],
lambda_l1=lgbm_best_params['lambda_l1'],
lambda_l2=lgbm_best_params['lambda_l2'],
# default params
random_state=random_state,
silent=lgbm_instance_params['silent'],
)
lgbm_regressor_with_optimized_params.fit(X_train_all, y_train_all)
lgbm_y_pred_logarithmic = lgbm_regressor_with_optimized_params.predict(X_test) # error
lgbm_y_pred = np.exp(lgbm_y_pred_logarithmic)
# Stacking
estimators = [
('lasso', lasso),
('ENet', ENet),
('KRR', KRR),
('GBoost', GBoost),
('LGBM', lgbm_regressor_with_optimized_params),
]
stacking_regressor = StackingRegressor(
estimators=estimators,
final_estimator=RandomForestRegressor(
n_estimators=10,
random_state=42
)
)
# Train
stacking_regressor.fit(X_train_all, y_train_all)
# Predict
y_pred_logarithmic = stacking_regressor.predict(X_test) # error
y_pred = np.exp(y_pred_logarithmic)
# Evaluate
scores = r2_cv(stacking_regressor, X_train_all, y_train_all, n_folds)
score = scores.mean()
sub_df = pd.DataFrame(
pd.read_feather('data/input/test.feather')[col_id_name]
)
sub_df[col_target_name] = y_pred
sub_df.to_csv(
'./data/output/sub_{time}_{score:.5f}.csv'.format(
time=now,
score=score,
),
index=False
)
config_file_name = './configs/{time}_{score:.5f}.yml'.format(
time=now,
score=score,
)
with open(config_file_name, 'w') as file:
yaml.dump(config, file)
| 27.333333 | 96 | 0.74878 | import argparse
from datetime import datetime as dt
from lightgbm import LGBMRegressor
import numpy as np
import pandas as pd
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor, StackingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.metrics import r2_score
from sklearn.model_selection import KFold
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
import yaml
# from models import lgbm as my_lgbm
from cv import r2_cv
from param_tuning.optimizer import ENetOptimizer, LassoOptimizer, LGBMRegressorOptimizer
from preprocessing import load_x, load_y
from utils import print_exit, print_float
# Don't define any function in this file,
# thus don't define main function.
# use var `now` in config file and submit file.
now = dt.now().strftime('%Y-%m-%d-%H-%M-%S')
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./configs/default.yml')
options = parser.parse_args()
with open(options.config, 'r') as file:
config = yaml.safe_load(file)
features = config['extracted_features']
col_id_name = config['col_id_name']
col_target_name = config['col_target_name']
dropped_ids = config['dropped_ids']
random_state = config['random_state']
n_folds = config['cv']['n_folds']
hyper_parameters = config['params']
Xs = load_x(features, dropped_ids)
X_train_all = Xs['train']
X_test = Xs['test']
y_train_all = load_y(col_id_name, col_target_name, dropped_ids)
# @todo: Modify preprocessor
X_test = X_test.fillna(X_test.mean())
# Lasso
lasso_with_param_candidates = make_pipeline(
RobustScaler(),
Lasso(
random_state=random_state
)
)
lasso_optimizer = LassoOptimizer(
lasso_with_param_candidates,
X_train_all,
y_train_all,
n_folds,
hyper_parameters['lasso']['candidates'],
)
lasso_best_params = lasso_optimizer.optimize()
lasso = make_pipeline(
RobustScaler(),
Lasso(
alpha=lasso_best_params['lasso__alpha'],
random_state=random_state,
)
)
# Elasticnet
enet_with_param_candidates = make_pipeline(
RobustScaler(),
ElasticNet(
random_state=random_state
)
)
enet_optimizer = ENetOptimizer(
enet_with_param_candidates,
X_train_all,
y_train_all,
n_folds,
hyper_parameters['enet']['candidates']
)
enet_best_params = enet_optimizer.optimize()
ENet = make_pipeline(
RobustScaler(),
ElasticNet(
alpha=enet_best_params['elasticnet__alpha'],
l1_ratio=enet_best_params['elasticnet__l1_ratio'],
random_state=random_state,
)
)
KRR = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5)
GBoost = GradientBoostingRegressor(
n_estimators=3000,
learning_rate=0.05,
max_depth=4,
max_features='sqrt',
min_samples_leaf=15,
min_samples_split=10,
loss='huber',
random_state =5
)
lgbm_instance_params = hyper_parameters['lgbm']['instance']
lgbm_regressor_with_param_candidates = LGBMRegressor(
random_state=random_state,
silent=lgbm_instance_params['silent'],
)
lgbm_optimizer = LGBMRegressorOptimizer(
lgbm_regressor_with_param_candidates,
X_train_all,
y_train_all,
n_folds,
hyper_parameters['lgbm']['candidates']
)
lgbm_best_params = lgbm_optimizer.optimize()
lgbm_regressor_with_optimized_params = LGBMRegressor(
boosting_type=lgbm_best_params['boosting_type'],
learning_rate=lgbm_best_params['learning_rate'],
lambda_l1=lgbm_best_params['lambda_l1'],
lambda_l2=lgbm_best_params['lambda_l2'],
# default params
random_state=random_state,
silent=lgbm_instance_params['silent'],
)
lgbm_regressor_with_optimized_params.fit(X_train_all, y_train_all)
lgbm_y_pred_logarithmic = lgbm_regressor_with_optimized_params.predict(X_test) # error
lgbm_y_pred = np.exp(lgbm_y_pred_logarithmic)
# Stacking
estimators = [
('lasso', lasso),
('ENet', ENet),
('KRR', KRR),
('GBoost', GBoost),
('LGBM', lgbm_regressor_with_optimized_params),
]
stacking_regressor = StackingRegressor(
estimators=estimators,
final_estimator=RandomForestRegressor(
n_estimators=10,
random_state=42
)
)
# Train
stacking_regressor.fit(X_train_all, y_train_all)
# Predict
y_pred_logarithmic = stacking_regressor.predict(X_test) # error
y_pred = np.exp(y_pred_logarithmic)
# Evaluate
scores = r2_cv(stacking_regressor, X_train_all, y_train_all, n_folds)
score = scores.mean()
sub_df = pd.DataFrame(
pd.read_feather('data/input/test.feather')[col_id_name]
)
sub_df[col_target_name] = y_pred
sub_df.to_csv(
'./data/output/sub_{time}_{score:.5f}.csv'.format(
time=now,
score=score,
),
index=False
)
config_file_name = './configs/{time}_{score:.5f}.yml'.format(
time=now,
score=score,
)
with open(config_file_name, 'w') as file:
yaml.dump(config, file)
| 0 | 0 | 0 |
500b280c30d2110068a5ebf3ccc96e2778c5344d | 807 | py | Python | Python/HotelPrices/solution.py | cs-mshah/AlgoCode | 0d6556483c08f63166fd74d4ff80fb48d75e4a63 | [
"MIT"
] | 151 | 2020-10-01T07:38:26.000Z | 2022-03-31T10:07:55.000Z | Python/HotelPrices/solution.py | cs-mshah/AlgoCode | 0d6556483c08f63166fd74d4ff80fb48d75e4a63 | [
"MIT"
] | 285 | 2020-10-01T09:34:29.000Z | 2021-08-02T12:13:49.000Z | Python/HotelPrices/solution.py | cs-mshah/AlgoCode | 0d6556483c08f63166fd74d4ff80fb48d75e4a63 | [
"MIT"
] | 275 | 2020-10-01T09:43:51.000Z | 2022-03-30T19:30:53.000Z |
# object created and method calling
objHotel = hotel()
objHotel.calculateFee()
| 29.888889 | 69 | 0.525403 | class hotel():
def calculateFee(self):
"""
This Method calculates cost of hotel room based
on described scenario.
:return:
none.
"""
testCases = int(input("Enter no of test cases:"))
totalCost = 0
while (testCases != 0):
roomType = input("Select room-type (standard/luxury) :")
bedrooms = int(input("Enter no of bedrooms:"))
bathrooms = int(input("Enter no of bathrooms:"))
roomCost = 5 * bedrooms + 2 * bathrooms
if (roomType == 'luxury'):
roomCost += 200
totalCost += roomCost
print(totalCost)
testCases -= 1
# object created and method calling
objHotel = hotel()
objHotel.calculateFee()
| 0 | 698 | 23 |
985bde9d000f6e6ceedb6a8dd30d3f11831b4cdb | 497 | py | Python | Python/Shape Constrained risk measures/Prototype code ( not cleaned )/plot.py | MohamedMkaouar/Some_Projects | 8170126dc91f313638595f9f4b81e9ae8b308334 | [
"Apache-2.0"
] | null | null | null | Python/Shape Constrained risk measures/Prototype code ( not cleaned )/plot.py | MohamedMkaouar/Some_Projects | 8170126dc91f313638595f9f4b81e9ae8b308334 | [
"Apache-2.0"
] | null | null | null | Python/Shape Constrained risk measures/Prototype code ( not cleaned )/plot.py | MohamedMkaouar/Some_Projects | 8170126dc91f313638595f9f4b81e9ae8b308334 | [
"Apache-2.0"
] | 1 | 2021-02-02T17:09:04.000Z | 2021-02-02T17:09:04.000Z | test=np.arange(-2,2,0.1)
v1=[]
v2=[]
v3=[]
for i in test:
i=cp.Constant(i)
v1.append(pinball(i,0.2).value)
v2.append(pinball(i,0.5).value)
v3.append(pinball(i,0.8).value)
plt.plot(test,v1,label="$\\tau=0.2$")
plt.plot(test,v2,label="$\\tau=0.5$")
plt.plot(test,v3,label="$\\tau=0.8$")
plt.xlabel("z")
plt.ylabel("$l_\\tau (z)$")
plt.legend(loc=[1.01, 0.4])
plt.xlim(-2, 2)
plt.ylim(-0.5, 2)
plt.gca().set_aspect('equal', adjustable='box')
plt.grid()
plt.show() | 23.666667 | 48 | 0.585513 | test=np.arange(-2,2,0.1)
v1=[]
v2=[]
v3=[]
for i in test:
i=cp.Constant(i)
v1.append(pinball(i,0.2).value)
v2.append(pinball(i,0.5).value)
v3.append(pinball(i,0.8).value)
plt.plot(test,v1,label="$\\tau=0.2$")
plt.plot(test,v2,label="$\\tau=0.5$")
plt.plot(test,v3,label="$\\tau=0.8$")
plt.xlabel("z")
plt.ylabel("$l_\\tau (z)$")
plt.legend(loc=[1.01, 0.4])
plt.xlim(-2, 2)
plt.ylim(-0.5, 2)
plt.gca().set_aspect('equal', adjustable='box')
plt.grid()
plt.show() | 0 | 0 | 0 |
005b4ddbd559ece75bff42e2ee12e7619e1eb168 | 30,771 | py | Python | venv/lib/python3.8/site-packages/gtfs_kit/routes.py | MaximilianJanetschek/Urban_Intermodal_Transportation | 632caf668636448dc9290d54cf1c7b527c68a957 | [
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/gtfs_kit/routes.py | MaximilianJanetschek/Urban_Intermodal_Transportation | 632caf668636448dc9290d54cf1c7b527c68a957 | [
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/gtfs_kit/routes.py | MaximilianJanetschek/Urban_Intermodal_Transportation | 632caf668636448dc9290d54cf1c7b527c68a957 | [
"MIT"
] | null | null | null | """
Functions about routes.
"""
from collections import OrderedDict
from typing import Optional, Iterable, List, Dict, TYPE_CHECKING
import json
import geopandas as gp
import pandas as pd
import numpy as np
import shapely.geometry as sg
import shapely.ops as so
import folium as fl
from . import constants as cs
from . import helpers as hp
# Help mypy but avoid circular imports
if TYPE_CHECKING:
from .feed import Feed
def compute_route_stats_0(
trip_stats_subset: pd.DataFrame,
headway_start_time: str = "07:00:00",
headway_end_time: str = "19:00:00",
*,
split_directions: bool = False,
) -> pd.DataFrame:
"""
Compute stats for the given subset of trips stats (of the form output by the
function :func:`.trips.compute_trip_stats`).
If ``split_directions``, then separate the stats by trip direction (0 or 1).
Use the headway start and end times to specify the time period for computing
headway stats.
Return a DataFrame with the columns
- ``'route_id'``
- ``'route_short_name'``
- ``'route_type'``
- ``'direction_id'``
- ``'num_trips'``: number of trips on the route in the subset
- ``'num_trip_starts'``: number of trips on the route with
nonnull start times
- ``'num_trip_ends'``: number of trips on the route with nonnull
end times that end before 23:59:59
- ``'is_loop'``: 1 if at least one of the trips on the route has
its ``is_loop`` field equal to 1; 0 otherwise
- ``'is_bidirectional'``: 1 if the route has trips in both
directions; 0 otherwise
- ``'start_time'``: start time of the earliest trip on the route
- ``'end_time'``: end time of latest trip on the route
- ``'max_headway'``: maximum of the durations (in minutes)
between trip starts on the route between
``headway_start_time`` and ``headway_end_time`` on the given
dates
- ``'min_headway'``: minimum of the durations (in minutes)
mentioned above
- ``'mean_headway'``: mean of the durations (in minutes)
mentioned above
- ``'peak_num_trips'``: maximum number of simultaneous trips in
service (for the given direction, or for both directions when
``split_directions==False``)
- ``'peak_start_time'``: start time of first longest period
during which the peak number of trips occurs
- ``'peak_end_time'``: end time of first longest period during
which the peak number of trips occurs
- ``'service_duration'``: total of the duration of each trip on
the route in the given subset of trips; measured in hours
- ``'service_distance'``: total of the distance traveled by each
trip on the route in the given subset of trips; measured in
whatever distance units are present in ``trip_stats_subset``;
contains all ``np.nan`` entries if ``feed.shapes is None``
- ``'service_speed'``: service_distance/service_duration;
measured in distance units per hour
- ``'mean_trip_distance'``: service_distance/num_trips
- ``'mean_trip_duration'``: service_duration/num_trips
If not ``split_directions``, then remove the
direction_id column and compute each route's stats,
except for headways, using its trips running in both directions.
In this case, (1) compute max headway by taking the max of the
max headways in both directions; (2) compute mean headway by
taking the weighted mean of the mean headways in both
directions.
If ``trip_stats_subset`` is empty, return an empty DataFrame.
Raise a ValueError if ``split_directions`` and no non-NaN
direction ID values present
"""
if trip_stats_subset.empty:
return pd.DataFrame()
# Convert trip start and end times to seconds to ease calculations below
f = trip_stats_subset.copy()
f[["start_time", "end_time"]] = f[["start_time", "end_time"]].applymap(
hp.timestr_to_seconds
)
headway_start = hp.timestr_to_seconds(headway_start_time)
headway_end = hp.timestr_to_seconds(headway_end_time)
if split_directions:
f = f.loc[lambda x: x.direction_id.notnull()].assign(
direction_id=lambda x: x.direction_id.astype(int)
)
if f.empty:
raise ValueError(
"At least one trip stats direction ID value " "must be non-NaN."
)
g = (
f.groupby(["route_id", "direction_id"])
.apply(compute_route_stats_split_directions)
.reset_index()
)
# Add the is_bidirectional column
gg = g.groupby("route_id").apply(is_bidirectional).reset_index()
g = g.merge(gg)
else:
g = f.groupby("route_id").apply(compute_route_stats).reset_index()
# Compute a few more stats
g["service_speed"] = (g["service_distance"] / g["service_duration"]).fillna(
g["service_distance"]
)
g["mean_trip_distance"] = g["service_distance"] / g["num_trips"]
g["mean_trip_duration"] = g["service_duration"] / g["num_trips"]
# Convert route times to time strings
g[["start_time", "end_time", "peak_start_time", "peak_end_time"]] = g[
["start_time", "end_time", "peak_start_time", "peak_end_time"]
].applymap(lambda x: hp.timestr_to_seconds(x, inverse=True))
return g
def compute_route_time_series_0(
trip_stats_subset: pd.DataFrame,
date_label: str = "20010101",
freq: str = "5Min",
*,
split_directions: bool = False,
) -> pd.DataFrame:
"""
Compute stats in a 24-hour time series form for the given subset of trips (of the
form output by the function :func:`.trips.compute_trip_stats`).
If ``split_directions``, then separate each routes's stats by trip direction.
Set the time series frequency according to the given frequency string;
max frequency is one minute ('Min').
Use the given YYYYMMDD date label as the date in the time series index.
Return a DataFrame time series version the following route stats for each route.
- ``num_trips``: number of trips in service on the route
at any time within the time bin
- ``num_trip_starts``: number of trips that start within
the time bin
- ``num_trip_ends``: number of trips that end within the
time bin, ignoring trips that end past midnight
- ``service_distance``: sum of the service duration accrued
during the time bin across all trips on the route;
measured in hours
- ``service_distance``: sum of the service distance accrued
during the time bin across all trips on the route; measured
in kilometers
- ``service_speed``: ``service_distance/service_duration``
for the route
The columns are hierarchical (multi-indexed) with
- top level: name is ``'indicator'``; values are
``'num_trip_starts'``, ``'num_trip_ends'``, ``'num_trips'``,
``'service_distance'``, ``'service_duration'``, and
``'service_speed'``
- middle level: name is ``'route_id'``;
values are the active routes
- bottom level: name is ``'direction_id'``; values are 0s and 1s
If not ``split_directions``, then don't include the bottom level.
The time series has a timestamp index for a 24-hour period
sampled at the given frequency.
The maximum allowable frequency is 1 minute.
If ``trip_stats_subset`` is empty, then return an empty
DataFrame with the columns ``'num_trip_starts'``,
``'num_trip_ends'``, ``'num_trips'``, ``'service_distance'``,
``'service_duration'``, and ``'service_speed'``.
Notes
-----
- The time series is computed at a one-minute frequency, then
resampled at the end to the given frequency
- Trips that lack start or end times are ignored, so the the
aggregate ``num_trips`` across the day could be less than the
``num_trips`` column of :func:`compute_route_stats_0`
- All trip departure times are taken modulo 24 hours.
So routes with trips that end past 23:59:59 will have all
their stats wrap around to the early morning of the time series,
except for their ``num_trip_ends`` indicator.
Trip endings past 23:59:59 not binned so that resampling the
``num_trips`` indicator works efficiently.
- Note that the total number of trips for two consecutive time bins
t1 < t2 is the sum of the number of trips in bin t2 plus the
number of trip endings in bin t1.
Thus we can downsample the ``num_trips`` indicator by keeping
track of only one extra count, ``num_trip_ends``, and can avoid
recording individual trip IDs.
- All other indicators are downsampled by summing.
- Raise a ValueError if ``split_directions`` and no non-NaN
direction ID values present
"""
if trip_stats_subset.empty:
return pd.DataFrame()
tss = trip_stats_subset.copy()
if split_directions:
tss = tss.loc[lambda x: x.direction_id.notnull()].assign(
direction_id=lambda x: x.direction_id.astype(int)
)
if tss.empty:
raise ValueError(
"At least one trip stats direction ID value " "must be non-NaN."
)
# Alter route IDs to encode direction:
# <route ID>-0 and <route ID>-1 or <route ID>-NA
tss["route_id"] = (
tss["route_id"] + "-" + tss["direction_id"].map(lambda x: str(int(x)))
)
routes = tss["route_id"].unique()
# Build a dictionary of time series and then merge them all
# at the end.
# Assign a uniform generic date for the index
date_str = date_label
day_start = pd.to_datetime(date_str + " 00:00:00")
day_end = pd.to_datetime(date_str + " 23:59:00")
rng = pd.period_range(day_start, day_end, freq="Min")
indicators = [
"num_trip_starts",
"num_trip_ends",
"num_trips",
"service_duration",
"service_distance",
]
bins = [i for i in range(24 * 60)] # One bin for each minute
num_bins = len(bins)
# Bin start and end times
tss[["start_index", "end_index"]] = tss[["start_time", "end_time"]].applymap(F)
routes = sorted(set(tss["route_id"].values))
# Bin each trip according to its start and end time and weight
series_by_route_by_indicator = {
indicator: {route: [0 for i in range(num_bins)] for route in routes}
for indicator in indicators
}
for index, row in tss.iterrows():
route = row["route_id"]
start = row["start_index"]
end = row["end_index"]
distance = row["distance"]
if start is None or np.isnan(start) or start == end:
continue
# Get bins to fill
if start <= end:
bins_to_fill = bins[start:end]
else:
bins_to_fill = bins[start:] + bins[:end]
# Bin trip.
# Do num trip starts.
series_by_route_by_indicator["num_trip_starts"][route][start] += 1
# Don't mark trip ends for trips that run past midnight;
# allows for easy resampling of num_trips later
if start <= end:
series_by_route_by_indicator["num_trip_ends"][route][end] += 1
# Do rest of indicators
for indicator in indicators[2:]:
if indicator == "num_trips":
weight = 1
elif indicator == "service_duration":
weight = 1 / 60
else:
weight = distance / len(bins_to_fill)
for bin in bins_to_fill:
series_by_route_by_indicator[indicator][route][bin] += weight
# Create one time series per indicator
rng = pd.date_range(date_str, periods=24 * 60, freq="Min")
series_by_indicator = {
indicator: pd.DataFrame(
series_by_route_by_indicator[indicator], index=rng
).fillna(0)
for indicator in indicators
}
# Combine all time series into one time series
g = hp.combine_time_series(
series_by_indicator, kind="route", split_directions=split_directions
)
return hp.downsample(g, freq=freq)
def get_routes(
feed: "Feed", date: Optional[str] = None, time: Optional[str] = None
) -> pd.DataFrame:
"""
Return ``feed.routes`` or a subset thereof.
If a YYYYMMDD date string is given, then restrict routes to only those active on
the date.
If a HH:MM:SS time string is given, possibly with HH > 23, then restrict routes
to only those active during the time.
"""
if date is None:
return feed.routes.copy()
trips = feed.get_trips(date, time)
R = trips["route_id"].unique()
return feed.routes[feed.routes["route_id"].isin(R)]
def compute_route_stats(
feed: "Feed",
trip_stats_subset: pd.DataFrame,
dates: List[str],
headway_start_time: str = "07:00:00",
headway_end_time: str = "19:00:00",
*,
split_directions: bool = False,
) -> pd.DataFrame:
"""
Compute route stats for all the trips that lie in the given subset
of trip stats (of the form output by the function :func:`.trips.compute_trip_stats`)
and that start on the given dates (YYYYMMDD date strings).
If ``split_directions``, then separate the stats by trip direction (0 or 1).
Use the headway start and end times to specify the time period for computing
headway stats.
Return a DataFrame with the columns
- ``'date'``
- the columns listed in :func:``compute_route_stats_0``
Exclude dates with no active trips, which could yield the empty DataFrame.
Notes
-----
- The route stats for date d contain stats for trips that start on
date d only and ignore trips that start on date d-1 and end on
date d
- Raise a ValueError if ``split_directions`` and no non-NaN
direction ID values present
"""
dates = feed.subset_dates(dates)
if not dates:
return pd.DataFrame()
# Collect stats for each date,
# memoizing stats the sequence of trip IDs active on the date
# to avoid unnecessary recomputations.
# Store in a dictionary of the form
# trip ID sequence -> stats DataFarme.
stats_by_ids = {}
activity = feed.compute_trip_activity(dates)
frames = []
for date in dates:
ids = tuple(activity.loc[activity[date] > 0, "trip_id"])
if ids in stats_by_ids:
stats = (
stats_by_ids[ids]
# Assign date
.assign(date=date)
)
elif ids:
# Compute stats
t = trip_stats_subset.loc[lambda x: x.trip_id.isin(ids)].copy()
stats = (
compute_route_stats_0(
t,
split_directions=split_directions,
headway_start_time=headway_start_time,
headway_end_time=headway_end_time,
)
# Assign date
.assign(date=date)
)
# Memoize stats
stats_by_ids[ids] = stats
else:
stats = pd.DataFrame()
frames.append(stats)
# Assemble stats into a single DataFrame
return pd.concat(frames)
def build_zero_route_time_series(
feed: "Feed",
date_label: str = "20010101",
freq: str = "5Min",
*,
split_directions: bool = False,
) -> pd.DataFrame:
"""
Return a route time series with the same index and hierarchical columns
as output by :func:`compute_route_time_series_0`,
but fill it full of zero values.
"""
start = date_label
end = pd.to_datetime(date_label + " 23:59:00")
rng = pd.date_range(start, end, freq=freq)
inds = [
"num_trip_starts",
"num_trip_ends",
"num_trips",
"service_duration",
"service_distance",
"service_speed",
]
rids = feed.routes.route_id
if split_directions:
product = [inds, rids, [0, 1]]
names = ["indicator", "route_id", "direction_id"]
else:
product = [inds, rids]
names = ["indicator", "route_id"]
cols = pd.MultiIndex.from_product(product, names=names)
return pd.DataFrame([[0 for c in cols]], index=rng, columns=cols).sort_index(
axis="columns"
)
def compute_route_time_series(
feed: "Feed",
trip_stats_subset: pd.DataFrame,
dates: List[str],
freq: str = "5Min",
*,
split_directions: bool = False,
) -> pd.DataFrame:
"""
Compute route stats in time series form for the trips that lie in
the trip stats subset (of the form output by the function
:func:`.trips.compute_trip_stats`) and that start on the given dates
(YYYYMMDD date strings).
If ``split_directions``, then separate each routes's stats by trip direction.
Specify the time series frequency with a Pandas frequency string, e.g. ``'5Min'``;
max frequency is one minute ('Min').
Return a DataFrame of the same format output by the function
:func:`compute_route_time_series_0` but with multiple dates
Exclude dates that lie outside of the Feed's date range.
If all dates lie outside the Feed's date range, then return an
empty DataFrame.
Notes
-----
- See the notes for :func:`compute_route_time_series_0`
- Raise a ValueError if ``split_directions`` and no non-NaN
direction ID values present
"""
dates = feed.subset_dates(dates)
if not dates:
return pd.DataFrame()
activity = feed.compute_trip_activity(dates)
ts = trip_stats_subset.copy()
# Collect stats for each date, memoizing stats by trip ID sequence
# to avoid unnecessary re-computations.
# Store in dictionary of the form
# trip ID sequence ->
# [stats DataFarme, date list that stats apply]
stats_by_ids = {}
zero_stats = build_zero_route_time_series(
feed, split_directions=split_directions, freq=freq
)
for date in dates:
ids = tuple(activity.loc[activity[date] > 0, "trip_id"])
if ids in stats_by_ids:
# Append date to date list
stats_by_ids[ids][1].append(date)
elif not ids:
# Null stats
stats_by_ids[ids] = [zero_stats, [date]]
else:
# Compute stats
t = ts[ts["trip_id"].isin(ids)].copy()
stats = compute_route_time_series_0(
t, split_directions=split_directions, freq=freq, date_label=date
)
# Remember stats
stats_by_ids[ids] = [stats, [date]]
# Assemble stats into DataFrame
frames = []
for stats, dates_ in stats_by_ids.values():
for date in dates_:
f = stats.copy()
# Replace date
d = hp.datestr_to_date(date)
f.index = f.index.map(
lambda t: t.replace(year=d.year, month=d.month, day=d.day)
)
frames.append(f)
f = pd.concat(frames).sort_index().sort_index(axis="columns")
if len(dates) > 1:
# Insert missing dates and zeros to complete series index
end_datetime = pd.to_datetime(dates[-1] + " 23:59:59")
new_index = pd.date_range(dates[0], end_datetime, freq=freq)
f = f.reindex(new_index)
else:
# Set frequency
f.index.freq = pd.tseries.frequencies.to_offset(freq)
return f.rename_axis("datetime", axis="index")
def build_route_timetable(
feed: "Feed", route_id: str, dates: List[str]
) -> pd.DataFrame:
"""
Return a timetable for the given route and dates (YYYYMMDD date strings).
Return a DataFrame with whose columns are all those in ``feed.trips`` plus those in
``feed.stop_times`` plus ``'date'``.
The trip IDs are restricted to the given route ID.
The result is sorted first by date and then by grouping by
trip ID and sorting the groups by their first departure time.
Skip dates outside of the Feed's dates.
If there is no route activity on the given dates, then return
an empty DataFrame.
"""
dates = feed.subset_dates(dates)
if not dates:
return pd.DataFrame()
t = pd.merge(feed.trips, feed.stop_times)
t = t[t["route_id"] == route_id].copy()
a = feed.compute_trip_activity(dates)
frames = []
for date in dates:
# Slice to trips active on date
ids = a.loc[a[date] == 1, "trip_id"]
f = t[t["trip_id"].isin(ids)].copy()
f["date"] = date
# Groupby trip ID and sort groups by their minimum departure time.
# For some reason NaN departure times mess up the transform below.
# So temporarily fill NaN departure times as a workaround.
f["dt"] = f["departure_time"].fillna(method="ffill")
f["min_dt"] = f.groupby("trip_id")["dt"].transform(min)
frames.append(f)
f = pd.concat(frames)
return f.sort_values(["date", "min_dt", "stop_sequence"]).drop(
["min_dt", "dt"], axis=1
)
def geometrize_routes(
feed: "Feed",
route_ids: Optional[Iterable[str]] = None,
*,
use_utm: bool = False,
split_directions: bool = False,
) -> gp.GeoDataFrame:
"""
Given a Feed, return a GeoDataFrame with all the columns of ``feed.routes``
plus a geometry column of (Multi)LineStrings, each of which represents the
corresponding routes's shape.
If an iterable of route IDs is given, then subset to those routes.
If ``use_utm``, then use local UTM coordinates for the geometries.
If ``split_directions``, then add the column ``direction_id`` and split each route
route shapes into shapes in trip direction 0 and its shapes in trip direction 1.
Raise a ValueError if the Feed has no shapes.
"""
if feed.shapes is None:
raise ValueError("This Feed has no shapes.")
# Subset routes
if route_ids is None:
route_ids = feed.routes.route_id
# Subset trips
trip_ids = (
feed.trips.loc[lambda x: x.route_id.isin(route_ids)]
# Drop unnecessary duplicate shapes
.drop_duplicates(subset="shape_id").loc[:, "trip_id"]
)
# Combine shape LineStrings within route and direction
if split_directions:
groupby_cols = ["route_id", "direction_id"]
else:
groupby_cols = ["route_id"]
if use_utm:
lat, lon = feed.shapes[["shape_pt_lat", "shape_pt_lon"]].values[0]
crs = hp.get_utm_crs(lat, lon)
else:
crs = cs.WGS84
return (
feed.geometrize_trips(trip_ids)
.filter(["route_id", "direction_id", "geometry"])
# GeoDataFrame disappears here
.groupby(groupby_cols)
.apply(merge_lines)
.reset_index()
.merge(feed.routes)
.pipe(gp.GeoDataFrame, crs=crs)
)
def routes_to_geojson(
feed: "Feed",
route_ids: Optional[Iterable[str]] = None,
*,
split_directions: bool = False,
include_stops: bool = False,
) -> Dict:
"""
Return a GeoJSON FeatureCollection of MultiLineString features representing this Feed's routes.
The coordinates reference system is the default one for GeoJSON,
namely WGS84.
If ``include_stops``, then include the route stops as Point features .
If an iterable of route IDs is given, then subset to those routes.
If the subset is empty, then return a FeatureCollection with an empty list of
features.
If the Feed has no shapes, then raise a ValueError.
If any of the given route IDs are not found in the feed, then raise a ValueError.
"""
if route_ids is not None:
D = set(route_ids) - set(feed.routes.route_id)
if D:
raise ValueError(f"Route IDs {D} not found in feed.")
# Get routes
g = geometrize_routes(feed, route_ids=route_ids, split_directions=split_directions)
if g.empty:
collection = {"type": "FeatureCollection", "features": []}
else:
collection = json.loads(g.to_json())
# Get stops if desired
if include_stops:
if route_ids is not None:
stop_ids = (
feed.stop_times.merge(feed.trips.filter(["trip_id", "route_id"]))
.loc[lambda x: x.route_id.isin(route_ids), "stop_id"]
.unique()
)
else:
stop_ids = None
stops_gj = feed.stops_to_geojson(stop_ids=stop_ids)
collection["features"].extend(stops_gj["features"])
return hp.drop_feature_ids(collection)
def map_routes(
feed: "Feed",
route_ids: Iterable[str],
color_palette: List[str] = cs.COLORS_SET2,
*,
include_stops: bool = False,
):
"""
Return a Folium map showing the given routes and (optionally)
their stops.
If any of the given route IDs are not found in the feed, then raise a ValueError.
"""
# Initialize map
my_map = fl.Map(tiles="cartodbpositron", prefer_canvas=True)
# Create route colors
n = len(route_ids)
colors = [color_palette[i % len(color_palette)] for i in range(n)]
# Collect route bounding boxes to set map zoom later
bboxes = []
# Create a feature group for each route and add it to the map
for i, route_id in enumerate(route_ids):
collection = feed.routes_to_geojson(
route_ids=[route_id], include_stops=include_stops
)
# Use route short name for group name if possible; otherwise use route ID
route_name = route_id
for f in collection["features"]:
if "route_short_name" in f["properties"]:
route_name = f["properties"]["route_short_name"]
break
group = fl.FeatureGroup(name=f"Route {route_name}")
color = colors[i]
for f in collection["features"]:
prop = f["properties"]
# Add stop
if f["geometry"]["type"] == "Point":
lon, lat = f["geometry"]["coordinates"]
fl.CircleMarker(
location=[lat, lon],
radius=8,
fill=True,
color=color,
weight=1,
popup=fl.Popup(hp.make_html(prop)),
).add_to(group)
# Add path
else:
prop["color"] = color
path = fl.GeoJson(
f,
name=prop["route_short_name"],
style_function=lambda x: {"color": x["properties"]["color"]},
)
path.add_child(fl.Popup(hp.make_html(prop)))
path.add_to(group)
bboxes.append(sg.box(*sg.shape(f["geometry"]).bounds))
group.add_to(my_map)
fl.LayerControl().add_to(my_map)
# Fit map to bounds
bounds = so.unary_union(bboxes).bounds
bounds2 = [bounds[1::-1], bounds[3:1:-1]] # Folium expects this ordering
my_map.fit_bounds(bounds2)
return my_map
| 35.491349 | 99 | 0.624192 | """
Functions about routes.
"""
from collections import OrderedDict
from typing import Optional, Iterable, List, Dict, TYPE_CHECKING
import json
import geopandas as gp
import pandas as pd
import numpy as np
import shapely.geometry as sg
import shapely.ops as so
import folium as fl
from . import constants as cs
from . import helpers as hp
# Help mypy but avoid circular imports
if TYPE_CHECKING:
from .feed import Feed
def compute_route_stats_0(
trip_stats_subset: pd.DataFrame,
headway_start_time: str = "07:00:00",
headway_end_time: str = "19:00:00",
*,
split_directions: bool = False,
) -> pd.DataFrame:
"""
Compute stats for the given subset of trips stats (of the form output by the
function :func:`.trips.compute_trip_stats`).
If ``split_directions``, then separate the stats by trip direction (0 or 1).
Use the headway start and end times to specify the time period for computing
headway stats.
Return a DataFrame with the columns
- ``'route_id'``
- ``'route_short_name'``
- ``'route_type'``
- ``'direction_id'``
- ``'num_trips'``: number of trips on the route in the subset
- ``'num_trip_starts'``: number of trips on the route with
nonnull start times
- ``'num_trip_ends'``: number of trips on the route with nonnull
end times that end before 23:59:59
- ``'is_loop'``: 1 if at least one of the trips on the route has
its ``is_loop`` field equal to 1; 0 otherwise
- ``'is_bidirectional'``: 1 if the route has trips in both
directions; 0 otherwise
- ``'start_time'``: start time of the earliest trip on the route
- ``'end_time'``: end time of latest trip on the route
- ``'max_headway'``: maximum of the durations (in minutes)
between trip starts on the route between
``headway_start_time`` and ``headway_end_time`` on the given
dates
- ``'min_headway'``: minimum of the durations (in minutes)
mentioned above
- ``'mean_headway'``: mean of the durations (in minutes)
mentioned above
- ``'peak_num_trips'``: maximum number of simultaneous trips in
service (for the given direction, or for both directions when
``split_directions==False``)
- ``'peak_start_time'``: start time of first longest period
during which the peak number of trips occurs
- ``'peak_end_time'``: end time of first longest period during
which the peak number of trips occurs
- ``'service_duration'``: total of the duration of each trip on
the route in the given subset of trips; measured in hours
- ``'service_distance'``: total of the distance traveled by each
trip on the route in the given subset of trips; measured in
whatever distance units are present in ``trip_stats_subset``;
contains all ``np.nan`` entries if ``feed.shapes is None``
- ``'service_speed'``: service_distance/service_duration;
measured in distance units per hour
- ``'mean_trip_distance'``: service_distance/num_trips
- ``'mean_trip_duration'``: service_duration/num_trips
If not ``split_directions``, then remove the
direction_id column and compute each route's stats,
except for headways, using its trips running in both directions.
In this case, (1) compute max headway by taking the max of the
max headways in both directions; (2) compute mean headway by
taking the weighted mean of the mean headways in both
directions.
If ``trip_stats_subset`` is empty, return an empty DataFrame.
Raise a ValueError if ``split_directions`` and no non-NaN
direction ID values present
"""
if trip_stats_subset.empty:
return pd.DataFrame()
# Convert trip start and end times to seconds to ease calculations below
f = trip_stats_subset.copy()
f[["start_time", "end_time"]] = f[["start_time", "end_time"]].applymap(
hp.timestr_to_seconds
)
headway_start = hp.timestr_to_seconds(headway_start_time)
headway_end = hp.timestr_to_seconds(headway_end_time)
def compute_route_stats_split_directions(group):
# Take this group of all trips stats for a single route
# and compute route-level stats.
d = OrderedDict()
d["route_short_name"] = group["route_short_name"].iat[0]
d["route_type"] = group["route_type"].iat[0]
d["num_trips"] = group.shape[0]
d["num_trip_starts"] = group["start_time"].count()
d["num_trip_ends"] = group.loc[
group["end_time"] < 24 * 3600, "end_time"
].count()
d["is_loop"] = int(group["is_loop"].any())
d["start_time"] = group["start_time"].min()
d["end_time"] = group["end_time"].max()
# Compute max and mean headway
stimes = group["start_time"].values
stimes = sorted(
[stime for stime in stimes if headway_start <= stime <= headway_end]
)
headways = np.diff(stimes)
if headways.size:
d["max_headway"] = np.max(headways) / 60 # minutes
d["min_headway"] = np.min(headways) / 60 # minutes
d["mean_headway"] = np.mean(headways) / 60 # minutes
else:
d["max_headway"] = np.nan
d["min_headway"] = np.nan
d["mean_headway"] = np.nan
# Compute peak num trips
active_trips = hp.get_active_trips_df(group[["start_time", "end_time"]])
times, counts = active_trips.index.values, active_trips.values
start, end = hp.get_peak_indices(times, counts)
d["peak_num_trips"] = counts[start]
d["peak_start_time"] = times[start]
d["peak_end_time"] = times[end]
d["service_distance"] = group["distance"].sum()
d["service_duration"] = group["duration"].sum()
return pd.Series(d)
def compute_route_stats(group):
d = OrderedDict()
d["route_short_name"] = group["route_short_name"].iat[0]
d["route_type"] = group["route_type"].iat[0]
d["num_trips"] = group.shape[0]
d["num_trip_starts"] = group["start_time"].count()
d["num_trip_ends"] = group.loc[
group["end_time"] < 24 * 3600, "end_time"
].count()
d["is_loop"] = int(group["is_loop"].any())
d["is_bidirectional"] = int(group["direction_id"].unique().size > 1)
d["start_time"] = group["start_time"].min()
d["end_time"] = group["end_time"].max()
# Compute headway stats
headways = np.array([])
for direction in [0, 1]:
stimes = group[group["direction_id"] == direction]["start_time"].values
stimes = sorted(
[stime for stime in stimes if headway_start <= stime <= headway_end]
)
headways = np.concatenate([headways, np.diff(stimes)])
if headways.size:
d["max_headway"] = np.max(headways) / 60 # minutes
d["min_headway"] = np.min(headways) / 60 # minutes
d["mean_headway"] = np.mean(headways) / 60 # minutes
else:
d["max_headway"] = np.nan
d["min_headway"] = np.nan
d["mean_headway"] = np.nan
# Compute peak num trips
active_trips = hp.get_active_trips_df(group[["start_time", "end_time"]])
times, counts = active_trips.index.values, active_trips.values
start, end = hp.get_peak_indices(times, counts)
d["peak_num_trips"] = counts[start]
d["peak_start_time"] = times[start]
d["peak_end_time"] = times[end]
d["service_distance"] = group["distance"].sum()
d["service_duration"] = group["duration"].sum()
return pd.Series(d)
if split_directions:
f = f.loc[lambda x: x.direction_id.notnull()].assign(
direction_id=lambda x: x.direction_id.astype(int)
)
if f.empty:
raise ValueError(
"At least one trip stats direction ID value " "must be non-NaN."
)
g = (
f.groupby(["route_id", "direction_id"])
.apply(compute_route_stats_split_directions)
.reset_index()
)
# Add the is_bidirectional column
def is_bidirectional(group):
d = {}
d["is_bidirectional"] = int(group["direction_id"].unique().size > 1)
return pd.Series(d)
gg = g.groupby("route_id").apply(is_bidirectional).reset_index()
g = g.merge(gg)
else:
g = f.groupby("route_id").apply(compute_route_stats).reset_index()
# Compute a few more stats
g["service_speed"] = (g["service_distance"] / g["service_duration"]).fillna(
g["service_distance"]
)
g["mean_trip_distance"] = g["service_distance"] / g["num_trips"]
g["mean_trip_duration"] = g["service_duration"] / g["num_trips"]
# Convert route times to time strings
g[["start_time", "end_time", "peak_start_time", "peak_end_time"]] = g[
["start_time", "end_time", "peak_start_time", "peak_end_time"]
].applymap(lambda x: hp.timestr_to_seconds(x, inverse=True))
return g
def compute_route_time_series_0(
trip_stats_subset: pd.DataFrame,
date_label: str = "20010101",
freq: str = "5Min",
*,
split_directions: bool = False,
) -> pd.DataFrame:
"""
Compute stats in a 24-hour time series form for the given subset of trips (of the
form output by the function :func:`.trips.compute_trip_stats`).
If ``split_directions``, then separate each routes's stats by trip direction.
Set the time series frequency according to the given frequency string;
max frequency is one minute ('Min').
Use the given YYYYMMDD date label as the date in the time series index.
Return a DataFrame time series version the following route stats for each route.
- ``num_trips``: number of trips in service on the route
at any time within the time bin
- ``num_trip_starts``: number of trips that start within
the time bin
- ``num_trip_ends``: number of trips that end within the
time bin, ignoring trips that end past midnight
- ``service_distance``: sum of the service duration accrued
during the time bin across all trips on the route;
measured in hours
- ``service_distance``: sum of the service distance accrued
during the time bin across all trips on the route; measured
in kilometers
- ``service_speed``: ``service_distance/service_duration``
for the route
The columns are hierarchical (multi-indexed) with
- top level: name is ``'indicator'``; values are
``'num_trip_starts'``, ``'num_trip_ends'``, ``'num_trips'``,
``'service_distance'``, ``'service_duration'``, and
``'service_speed'``
- middle level: name is ``'route_id'``;
values are the active routes
- bottom level: name is ``'direction_id'``; values are 0s and 1s
If not ``split_directions``, then don't include the bottom level.
The time series has a timestamp index for a 24-hour period
sampled at the given frequency.
The maximum allowable frequency is 1 minute.
If ``trip_stats_subset`` is empty, then return an empty
DataFrame with the columns ``'num_trip_starts'``,
``'num_trip_ends'``, ``'num_trips'``, ``'service_distance'``,
``'service_duration'``, and ``'service_speed'``.
Notes
-----
- The time series is computed at a one-minute frequency, then
resampled at the end to the given frequency
- Trips that lack start or end times are ignored, so the the
aggregate ``num_trips`` across the day could be less than the
``num_trips`` column of :func:`compute_route_stats_0`
- All trip departure times are taken modulo 24 hours.
So routes with trips that end past 23:59:59 will have all
their stats wrap around to the early morning of the time series,
except for their ``num_trip_ends`` indicator.
Trip endings past 23:59:59 not binned so that resampling the
``num_trips`` indicator works efficiently.
- Note that the total number of trips for two consecutive time bins
t1 < t2 is the sum of the number of trips in bin t2 plus the
number of trip endings in bin t1.
Thus we can downsample the ``num_trips`` indicator by keeping
track of only one extra count, ``num_trip_ends``, and can avoid
recording individual trip IDs.
- All other indicators are downsampled by summing.
- Raise a ValueError if ``split_directions`` and no non-NaN
direction ID values present
"""
if trip_stats_subset.empty:
return pd.DataFrame()
tss = trip_stats_subset.copy()
if split_directions:
tss = tss.loc[lambda x: x.direction_id.notnull()].assign(
direction_id=lambda x: x.direction_id.astype(int)
)
if tss.empty:
raise ValueError(
"At least one trip stats direction ID value " "must be non-NaN."
)
# Alter route IDs to encode direction:
# <route ID>-0 and <route ID>-1 or <route ID>-NA
tss["route_id"] = (
tss["route_id"] + "-" + tss["direction_id"].map(lambda x: str(int(x)))
)
routes = tss["route_id"].unique()
# Build a dictionary of time series and then merge them all
# at the end.
# Assign a uniform generic date for the index
date_str = date_label
day_start = pd.to_datetime(date_str + " 00:00:00")
day_end = pd.to_datetime(date_str + " 23:59:00")
rng = pd.period_range(day_start, day_end, freq="Min")
indicators = [
"num_trip_starts",
"num_trip_ends",
"num_trips",
"service_duration",
"service_distance",
]
bins = [i for i in range(24 * 60)] # One bin for each minute
num_bins = len(bins)
# Bin start and end times
def F(x):
return (hp.timestr_to_seconds(x) // 60) % (24 * 60)
tss[["start_index", "end_index"]] = tss[["start_time", "end_time"]].applymap(F)
routes = sorted(set(tss["route_id"].values))
# Bin each trip according to its start and end time and weight
series_by_route_by_indicator = {
indicator: {route: [0 for i in range(num_bins)] for route in routes}
for indicator in indicators
}
for index, row in tss.iterrows():
route = row["route_id"]
start = row["start_index"]
end = row["end_index"]
distance = row["distance"]
if start is None or np.isnan(start) or start == end:
continue
# Get bins to fill
if start <= end:
bins_to_fill = bins[start:end]
else:
bins_to_fill = bins[start:] + bins[:end]
# Bin trip.
# Do num trip starts.
series_by_route_by_indicator["num_trip_starts"][route][start] += 1
# Don't mark trip ends for trips that run past midnight;
# allows for easy resampling of num_trips later
if start <= end:
series_by_route_by_indicator["num_trip_ends"][route][end] += 1
# Do rest of indicators
for indicator in indicators[2:]:
if indicator == "num_trips":
weight = 1
elif indicator == "service_duration":
weight = 1 / 60
else:
weight = distance / len(bins_to_fill)
for bin in bins_to_fill:
series_by_route_by_indicator[indicator][route][bin] += weight
# Create one time series per indicator
rng = pd.date_range(date_str, periods=24 * 60, freq="Min")
series_by_indicator = {
indicator: pd.DataFrame(
series_by_route_by_indicator[indicator], index=rng
).fillna(0)
for indicator in indicators
}
# Combine all time series into one time series
g = hp.combine_time_series(
series_by_indicator, kind="route", split_directions=split_directions
)
return hp.downsample(g, freq=freq)
def get_routes(
feed: "Feed", date: Optional[str] = None, time: Optional[str] = None
) -> pd.DataFrame:
"""
Return ``feed.routes`` or a subset thereof.
If a YYYYMMDD date string is given, then restrict routes to only those active on
the date.
If a HH:MM:SS time string is given, possibly with HH > 23, then restrict routes
to only those active during the time.
"""
if date is None:
return feed.routes.copy()
trips = feed.get_trips(date, time)
R = trips["route_id"].unique()
return feed.routes[feed.routes["route_id"].isin(R)]
def compute_route_stats(
feed: "Feed",
trip_stats_subset: pd.DataFrame,
dates: List[str],
headway_start_time: str = "07:00:00",
headway_end_time: str = "19:00:00",
*,
split_directions: bool = False,
) -> pd.DataFrame:
"""
Compute route stats for all the trips that lie in the given subset
of trip stats (of the form output by the function :func:`.trips.compute_trip_stats`)
and that start on the given dates (YYYYMMDD date strings).
If ``split_directions``, then separate the stats by trip direction (0 or 1).
Use the headway start and end times to specify the time period for computing
headway stats.
Return a DataFrame with the columns
- ``'date'``
- the columns listed in :func:``compute_route_stats_0``
Exclude dates with no active trips, which could yield the empty DataFrame.
Notes
-----
- The route stats for date d contain stats for trips that start on
date d only and ignore trips that start on date d-1 and end on
date d
- Raise a ValueError if ``split_directions`` and no non-NaN
direction ID values present
"""
dates = feed.subset_dates(dates)
if not dates:
return pd.DataFrame()
# Collect stats for each date,
# memoizing stats the sequence of trip IDs active on the date
# to avoid unnecessary recomputations.
# Store in a dictionary of the form
# trip ID sequence -> stats DataFarme.
stats_by_ids = {}
activity = feed.compute_trip_activity(dates)
frames = []
for date in dates:
ids = tuple(activity.loc[activity[date] > 0, "trip_id"])
if ids in stats_by_ids:
stats = (
stats_by_ids[ids]
# Assign date
.assign(date=date)
)
elif ids:
# Compute stats
t = trip_stats_subset.loc[lambda x: x.trip_id.isin(ids)].copy()
stats = (
compute_route_stats_0(
t,
split_directions=split_directions,
headway_start_time=headway_start_time,
headway_end_time=headway_end_time,
)
# Assign date
.assign(date=date)
)
# Memoize stats
stats_by_ids[ids] = stats
else:
stats = pd.DataFrame()
frames.append(stats)
# Assemble stats into a single DataFrame
return pd.concat(frames)
def build_zero_route_time_series(
feed: "Feed",
date_label: str = "20010101",
freq: str = "5Min",
*,
split_directions: bool = False,
) -> pd.DataFrame:
"""
Return a route time series with the same index and hierarchical columns
as output by :func:`compute_route_time_series_0`,
but fill it full of zero values.
"""
start = date_label
end = pd.to_datetime(date_label + " 23:59:00")
rng = pd.date_range(start, end, freq=freq)
inds = [
"num_trip_starts",
"num_trip_ends",
"num_trips",
"service_duration",
"service_distance",
"service_speed",
]
rids = feed.routes.route_id
if split_directions:
product = [inds, rids, [0, 1]]
names = ["indicator", "route_id", "direction_id"]
else:
product = [inds, rids]
names = ["indicator", "route_id"]
cols = pd.MultiIndex.from_product(product, names=names)
return pd.DataFrame([[0 for c in cols]], index=rng, columns=cols).sort_index(
axis="columns"
)
def compute_route_time_series(
feed: "Feed",
trip_stats_subset: pd.DataFrame,
dates: List[str],
freq: str = "5Min",
*,
split_directions: bool = False,
) -> pd.DataFrame:
"""
Compute route stats in time series form for the trips that lie in
the trip stats subset (of the form output by the function
:func:`.trips.compute_trip_stats`) and that start on the given dates
(YYYYMMDD date strings).
If ``split_directions``, then separate each routes's stats by trip direction.
Specify the time series frequency with a Pandas frequency string, e.g. ``'5Min'``;
max frequency is one minute ('Min').
Return a DataFrame of the same format output by the function
:func:`compute_route_time_series_0` but with multiple dates
Exclude dates that lie outside of the Feed's date range.
If all dates lie outside the Feed's date range, then return an
empty DataFrame.
Notes
-----
- See the notes for :func:`compute_route_time_series_0`
- Raise a ValueError if ``split_directions`` and no non-NaN
direction ID values present
"""
dates = feed.subset_dates(dates)
if not dates:
return pd.DataFrame()
activity = feed.compute_trip_activity(dates)
ts = trip_stats_subset.copy()
# Collect stats for each date, memoizing stats by trip ID sequence
# to avoid unnecessary re-computations.
# Store in dictionary of the form
# trip ID sequence ->
# [stats DataFarme, date list that stats apply]
stats_by_ids = {}
zero_stats = build_zero_route_time_series(
feed, split_directions=split_directions, freq=freq
)
for date in dates:
ids = tuple(activity.loc[activity[date] > 0, "trip_id"])
if ids in stats_by_ids:
# Append date to date list
stats_by_ids[ids][1].append(date)
elif not ids:
# Null stats
stats_by_ids[ids] = [zero_stats, [date]]
else:
# Compute stats
t = ts[ts["trip_id"].isin(ids)].copy()
stats = compute_route_time_series_0(
t, split_directions=split_directions, freq=freq, date_label=date
)
# Remember stats
stats_by_ids[ids] = [stats, [date]]
# Assemble stats into DataFrame
frames = []
for stats, dates_ in stats_by_ids.values():
for date in dates_:
f = stats.copy()
# Replace date
d = hp.datestr_to_date(date)
f.index = f.index.map(
lambda t: t.replace(year=d.year, month=d.month, day=d.day)
)
frames.append(f)
f = pd.concat(frames).sort_index().sort_index(axis="columns")
if len(dates) > 1:
# Insert missing dates and zeros to complete series index
end_datetime = pd.to_datetime(dates[-1] + " 23:59:59")
new_index = pd.date_range(dates[0], end_datetime, freq=freq)
f = f.reindex(new_index)
else:
# Set frequency
f.index.freq = pd.tseries.frequencies.to_offset(freq)
return f.rename_axis("datetime", axis="index")
def build_route_timetable(
feed: "Feed", route_id: str, dates: List[str]
) -> pd.DataFrame:
"""
Return a timetable for the given route and dates (YYYYMMDD date strings).
Return a DataFrame with whose columns are all those in ``feed.trips`` plus those in
``feed.stop_times`` plus ``'date'``.
The trip IDs are restricted to the given route ID.
The result is sorted first by date and then by grouping by
trip ID and sorting the groups by their first departure time.
Skip dates outside of the Feed's dates.
If there is no route activity on the given dates, then return
an empty DataFrame.
"""
dates = feed.subset_dates(dates)
if not dates:
return pd.DataFrame()
t = pd.merge(feed.trips, feed.stop_times)
t = t[t["route_id"] == route_id].copy()
a = feed.compute_trip_activity(dates)
frames = []
for date in dates:
# Slice to trips active on date
ids = a.loc[a[date] == 1, "trip_id"]
f = t[t["trip_id"].isin(ids)].copy()
f["date"] = date
# Groupby trip ID and sort groups by their minimum departure time.
# For some reason NaN departure times mess up the transform below.
# So temporarily fill NaN departure times as a workaround.
f["dt"] = f["departure_time"].fillna(method="ffill")
f["min_dt"] = f.groupby("trip_id")["dt"].transform(min)
frames.append(f)
f = pd.concat(frames)
return f.sort_values(["date", "min_dt", "stop_sequence"]).drop(
["min_dt", "dt"], axis=1
)
def geometrize_routes(
feed: "Feed",
route_ids: Optional[Iterable[str]] = None,
*,
use_utm: bool = False,
split_directions: bool = False,
) -> gp.GeoDataFrame:
"""
Given a Feed, return a GeoDataFrame with all the columns of ``feed.routes``
plus a geometry column of (Multi)LineStrings, each of which represents the
corresponding routes's shape.
If an iterable of route IDs is given, then subset to those routes.
If ``use_utm``, then use local UTM coordinates for the geometries.
If ``split_directions``, then add the column ``direction_id`` and split each route
route shapes into shapes in trip direction 0 and its shapes in trip direction 1.
Raise a ValueError if the Feed has no shapes.
"""
if feed.shapes is None:
raise ValueError("This Feed has no shapes.")
# Subset routes
if route_ids is None:
route_ids = feed.routes.route_id
# Subset trips
trip_ids = (
feed.trips.loc[lambda x: x.route_id.isin(route_ids)]
# Drop unnecessary duplicate shapes
.drop_duplicates(subset="shape_id").loc[:, "trip_id"]
)
# Combine shape LineStrings within route and direction
if split_directions:
groupby_cols = ["route_id", "direction_id"]
else:
groupby_cols = ["route_id"]
def merge_lines(group):
d = {}
d["geometry"] = so.linemerge(group.geometry.tolist())
return pd.Series(d)
if use_utm:
lat, lon = feed.shapes[["shape_pt_lat", "shape_pt_lon"]].values[0]
crs = hp.get_utm_crs(lat, lon)
else:
crs = cs.WGS84
return (
feed.geometrize_trips(trip_ids)
.filter(["route_id", "direction_id", "geometry"])
# GeoDataFrame disappears here
.groupby(groupby_cols)
.apply(merge_lines)
.reset_index()
.merge(feed.routes)
.pipe(gp.GeoDataFrame, crs=crs)
)
def routes_to_geojson(
feed: "Feed",
route_ids: Optional[Iterable[str]] = None,
*,
split_directions: bool = False,
include_stops: bool = False,
) -> Dict:
"""
Return a GeoJSON FeatureCollection of MultiLineString features representing this Feed's routes.
The coordinates reference system is the default one for GeoJSON,
namely WGS84.
If ``include_stops``, then include the route stops as Point features .
If an iterable of route IDs is given, then subset to those routes.
If the subset is empty, then return a FeatureCollection with an empty list of
features.
If the Feed has no shapes, then raise a ValueError.
If any of the given route IDs are not found in the feed, then raise a ValueError.
"""
if route_ids is not None:
D = set(route_ids) - set(feed.routes.route_id)
if D:
raise ValueError(f"Route IDs {D} not found in feed.")
# Get routes
g = geometrize_routes(feed, route_ids=route_ids, split_directions=split_directions)
if g.empty:
collection = {"type": "FeatureCollection", "features": []}
else:
collection = json.loads(g.to_json())
# Get stops if desired
if include_stops:
if route_ids is not None:
stop_ids = (
feed.stop_times.merge(feed.trips.filter(["trip_id", "route_id"]))
.loc[lambda x: x.route_id.isin(route_ids), "stop_id"]
.unique()
)
else:
stop_ids = None
stops_gj = feed.stops_to_geojson(stop_ids=stop_ids)
collection["features"].extend(stops_gj["features"])
return hp.drop_feature_ids(collection)
def map_routes(
feed: "Feed",
route_ids: Iterable[str],
color_palette: List[str] = cs.COLORS_SET2,
*,
include_stops: bool = False,
):
"""
Return a Folium map showing the given routes and (optionally)
their stops.
If any of the given route IDs are not found in the feed, then raise a ValueError.
"""
# Initialize map
my_map = fl.Map(tiles="cartodbpositron", prefer_canvas=True)
# Create route colors
n = len(route_ids)
colors = [color_palette[i % len(color_palette)] for i in range(n)]
# Collect route bounding boxes to set map zoom later
bboxes = []
# Create a feature group for each route and add it to the map
for i, route_id in enumerate(route_ids):
collection = feed.routes_to_geojson(
route_ids=[route_id], include_stops=include_stops
)
# Use route short name for group name if possible; otherwise use route ID
route_name = route_id
for f in collection["features"]:
if "route_short_name" in f["properties"]:
route_name = f["properties"]["route_short_name"]
break
group = fl.FeatureGroup(name=f"Route {route_name}")
color = colors[i]
for f in collection["features"]:
prop = f["properties"]
# Add stop
if f["geometry"]["type"] == "Point":
lon, lat = f["geometry"]["coordinates"]
fl.CircleMarker(
location=[lat, lon],
radius=8,
fill=True,
color=color,
weight=1,
popup=fl.Popup(hp.make_html(prop)),
).add_to(group)
# Add path
else:
prop["color"] = color
path = fl.GeoJson(
f,
name=prop["route_short_name"],
style_function=lambda x: {"color": x["properties"]["color"]},
)
path.add_child(fl.Popup(hp.make_html(prop)))
path.add_to(group)
bboxes.append(sg.box(*sg.shape(f["geometry"]).bounds))
group.add_to(my_map)
fl.LayerControl().add_to(my_map)
# Fit map to bounds
bounds = so.unary_union(bboxes).bounds
bounds2 = [bounds[1::-1], bounds[3:1:-1]] # Folium expects this ordering
my_map.fit_bounds(bounds2)
return my_map
| 3,859 | 0 | 137 |
55261d12666c02ea8f67b72d2e1ed2b304a01b9b | 405 | py | Python | instagram_api/response/user_story_feed.py | Yuego/instagram_api | b53f72db36c505a2eb24ebac1ba8267a0cc295bb | [
"MIT"
] | 13 | 2019-08-07T21:24:34.000Z | 2020-12-12T12:23:50.000Z | instagram_api/response/user_story_feed.py | Yuego/instagram_api | b53f72db36c505a2eb24ebac1ba8267a0cc295bb | [
"MIT"
] | null | null | null | instagram_api/response/user_story_feed.py | Yuego/instagram_api | b53f72db36c505a2eb24ebac1ba8267a0cc295bb | [
"MIT"
] | null | null | null | from .mapper import ApiResponse, ApiResponseInterface
from .mapper.types import Timestamp, AnyType
from .model import Broadcast, PostLiveItem, Reel
__all__ = ['UserStoryFeedResponse']
| 25.3125 | 73 | 0.814815 | from .mapper import ApiResponse, ApiResponseInterface
from .mapper.types import Timestamp, AnyType
from .model import Broadcast, PostLiveItem, Reel
__all__ = ['UserStoryFeedResponse']
class UserStoryFeedResponseInterface(ApiResponseInterface):
broadcast: Broadcast
reel: Reel
post_live_item: PostLiveItem
class UserStoryFeedResponse(ApiResponse, UserStoryFeedResponseInterface):
pass
| 0 | 172 | 46 |
003530ab7e606f4f6731fe033677de163d5cdf63 | 2,640 | py | Python | b_tree/binary_search_tree/sum_of_common_nodes_in_bst.py | rjsnh1522/geeks-4-geeks-python | 9bea0ce4f3fae9b5f9e5952fb5b4b3a8c6186cf4 | [
"MIT"
] | null | null | null | b_tree/binary_search_tree/sum_of_common_nodes_in_bst.py | rjsnh1522/geeks-4-geeks-python | 9bea0ce4f3fae9b5f9e5952fb5b4b3a8c6186cf4 | [
"MIT"
] | 5 | 2021-03-10T11:49:39.000Z | 2022-02-27T01:35:59.000Z | b_tree/binary_search_tree/sum_of_common_nodes_in_bst.py | rjsnh1522/geeks-4-geeks-python | 9bea0ce4f3fae9b5f9e5952fb5b4b3a8c6186cf4 | [
"MIT"
] | null | null | null | # Definition for a binary tree node
# @param A : root node of tree
# @param B : root node of tree
# @return an integer
# tree 1
t5 = TreeNode(5)
t2 = TreeNode(2)
t3 = TreeNode(3)
t5.left = t2
t2.right = t3
t8 = TreeNode(8)
t5.right = t8
t15 = TreeNode(15)
t8.right = t15
t7 = TreeNode(7)
t15.left = t7
# tree 2
b7 = TreeNode(7)
b1 = TreeNode(1)
b2 = TreeNode(2)
b10 = TreeNode(10)
b15 = TreeNode(15)
b8 = TreeNode(8)
b7.left = b1
b7.right = b10
b1.right = b2
b10.right = b15
b15.left = b8
sol = Solution()
print(sol.solve(t5, b7))
# class Solution:
# # @param A : root node of tree
# # @param B : root node of tree
# # @return an integer
#
# def in_order_traversal(self, root):
# current = root
# stack = []
# anser = {}
# if not current:
# return anser
# while True:
# if current:
# stack.append(current)
# current = current.left
# elif len(stack) > 0:
# pop = stack.pop(-1)
# anser[pop.val] = True
# current = pop.right
# else:
# break
# return anser
#
# def solve(self, A, B):
# traversed_a = self.in_order_traversal(A)
# traversed_b = self.in_order_traversal(B)
# summer = 0
# for i in traversed_a:
# if i in traversed_b:
# summer += i
# return summer
| 20.787402 | 50 | 0.507197 | # Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param A : root node of tree
# @param B : root node of tree
# @return an integer
def in_order_traversal(self, root):
current = root
stack = []
anser = []
if not current:
return anser
while True:
if current:
stack.append(current)
current = current.left
elif len(stack) > 0:
pop = stack.pop(-1)
anser.append(pop.val)
current = pop.right
else:
break
return anser
def solve(self, A, B):
traversed_a = self.in_order_traversal(A)
traversed_b = self.in_order_traversal(B)
print(traversed_a, traversed_b)
summer = 0
while traversed_b and traversed_a:
pop_a = traversed_a.pop(-1)
pop_b = traversed_b.pop(-1)
print(pop_a, pop_b)
if pop_a == pop_b:
summer += pop_b
elif pop_a > pop_b:
traversed_b.append(pop_b)
else:
traversed_a.append(pop_a)
return summer
# tree 1
t5 = TreeNode(5)
t2 = TreeNode(2)
t3 = TreeNode(3)
t5.left = t2
t2.right = t3
t8 = TreeNode(8)
t5.right = t8
t15 = TreeNode(15)
t8.right = t15
t7 = TreeNode(7)
t15.left = t7
# tree 2
b7 = TreeNode(7)
b1 = TreeNode(1)
b2 = TreeNode(2)
b10 = TreeNode(10)
b15 = TreeNode(15)
b8 = TreeNode(8)
b7.left = b1
b7.right = b10
b1.right = b2
b10.right = b15
b15.left = b8
sol = Solution()
print(sol.solve(t5, b7))
# class Solution:
# # @param A : root node of tree
# # @param B : root node of tree
# # @return an integer
#
# def in_order_traversal(self, root):
# current = root
# stack = []
# anser = {}
# if not current:
# return anser
# while True:
# if current:
# stack.append(current)
# current = current.left
# elif len(stack) > 0:
# pop = stack.pop(-1)
# anser[pop.val] = True
# current = pop.right
# else:
# break
# return anser
#
# def solve(self, A, B):
# traversed_a = self.in_order_traversal(A)
# traversed_b = self.in_order_traversal(B)
# summer = 0
# for i in traversed_a:
# if i in traversed_b:
# summer += i
# return summer
| 1,055 | -12 | 125 |
2707c7185ae66c32193ea16931b8b78b32de7be0 | 1,039 | py | Python | autonomous-fleet/afm-auction_house/server/migrations/0002_auto_20200622_0946.py | CodexWorks/autonomous-fleet | 26fd71e9b2e6ee1d97e04959c3a31c094fc40bc6 | [
"BSD-3-Clause"
] | null | null | null | autonomous-fleet/afm-auction_house/server/migrations/0002_auto_20200622_0946.py | CodexWorks/autonomous-fleet | 26fd71e9b2e6ee1d97e04959c3a31c094fc40bc6 | [
"BSD-3-Clause"
] | 3 | 2020-05-14T14:30:00.000Z | 2020-06-09T15:03:12.000Z | autonomous-fleet/afm-auction_house/server/migrations/0002_auto_20200622_0946.py | CodexWorks/autonomous-fleet | 26fd71e9b2e6ee1d97e04959c3a31c094fc40bc6 | [
"BSD-3-Clause"
] | 1 | 2020-06-10T09:30:32.000Z | 2020-06-10T09:30:32.000Z | # Generated by Django 3.0.7 on 2020-06-22 06:46
from django.db import migrations, models
| 25.341463 | 74 | 0.533205 | # Generated by Django 3.0.7 on 2020-06-22 06:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('server', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='text',
name='main_title',
),
migrations.RemoveField(
model_name='text',
name='sub_title',
),
migrations.RemoveField(
model_name='text',
name='text_body',
),
migrations.AddField(
model_name='text',
name='body_text',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='text',
name='headline',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='text',
name='pub_date',
field=models.DateField(verbose_name='publishing date'),
),
]
| 0 | 925 | 23 |
702293d9777a391ba191cfd89a932a4b0167e14b | 268 | py | Python | BeyondChaos/constants.py | emberling/BeyondChaosRandomizer | 2f69752611e25146a29aef51044d0446d4e80332 | [
"MIT"
] | 1 | 2021-06-15T03:54:53.000Z | 2021-06-15T03:54:53.000Z | BeyondChaos/constants.py | emberling/BeyondChaosRandomizer | 2f69752611e25146a29aef51044d0446d4e80332 | [
"MIT"
] | 1 | 2021-09-13T04:32:43.000Z | 2021-09-13T04:32:43.000Z | BeyondChaos/Constants.py | razzlestorm/BeyondChaosRandomizer | 04a0acdcd9d4c3991a3e42cf1bba4299adda4435 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#plans a lot of the "magic string" text will move into here to make modifying them alot easier on the fly.
UpdateFound = "Applying update..."
UpdateLaunching = "Launching the updater application. This application will now close automatically." | 44.666667 | 106 | 0.776119 | #!/usr/bin/env python3
#plans a lot of the "magic string" text will move into here to make modifying them alot easier on the fly.
UpdateFound = "Applying update..."
UpdateLaunching = "Launching the updater application. This application will now close automatically." | 0 | 0 | 0 |
5028b9618c72260c18e7e30aa727af8e41dfc7a6 | 7,222 | py | Python | app/label_maker/real/generate_labels.py | yuecideng/Misc3D | a4554b171d72e03ac8d03880523934a39a9ff515 | [
"MIT"
] | 13 | 2022-02-09T11:56:20.000Z | 2022-03-31T15:45:04.000Z | app/label_maker/real/generate_labels.py | yuecideng/Misc3D | a4554b171d72e03ac8d03880523934a39a9ff515 | [
"MIT"
] | 7 | 2022-02-26T08:58:43.000Z | 2022-03-29T11:19:05.000Z | app/label_maker/real/generate_labels.py | yuecideng/Misc3D | a4554b171d72e03ac8d03880523934a39a9ff515 | [
"MIT"
] | 5 | 2022-02-16T06:59:00.000Z | 2022-03-31T12:03:11.000Z | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import time
import argparse
import os
import cv2
import shutil
import numpy as np
import open3d as o3d
import misc3d as m3d
import json
from utils import Colors, mask_to_bbox, rgbd_to_pointcloud
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', help='path to CAD model')
parser.add_argument("--data_path", default='dataset',
help="path to RGBD data set")
parser.add_argument("--local_refine", action='store_true',
help="use icp the refine model to the scene")
parser.add_argument("--minimum_obj_pixel", default=500, type=int,
help="the minimum number of pixel of an object in the rgb image")
parser.add_argument("--vis", action='store_true',
help="visualize the rendering results")
args = parser.parse_args()
remove_and_create_dir(args.data_path)
models, init_poses = read_model_and_init_poses(
args.model_path, args.data_path)
rgbds, file_names = read_rgbd_and_name(args.data_path)
camera = read_camera_intrinsic(args.data_path)
odometrys = read_odometry(args.data_path)
render = m3d.pose_estimation.RayCastRenderer(camera)
t0 = time.time()
data_labels = {}
for i in range(len(rgbds)):
render_mesh = []
mesh_pose = []
odom = odometrys[i]
for key, value in init_poses.items():
for arr in value:
pose = np.array(arr).reshape((4, 4))
render_mesh.append(models[key])
pose = np.linalg.inv(odom) @ pose
if args.local_refine:
pose = refine_local_pose(
models[key], rgbds[i][0], rgbds[i][1], camera, pose)
mesh_pose.append(pose)
ret = render.cast_rays(render_mesh, mesh_pose)
# rendering instance map
instance_map = render.get_instance_map().numpy()
label = generate_label_and_save_mask(
args.data_path, instance_map, init_poses, mesh_pose, file_names[i], args.minimum_obj_pixel)
data_labels[file_names[i]] = label
# create visible instance mask image
mask = np.zeros(
(instance_map.shape[0], instance_map.shape[1], 3), dtype=np.uint8)
index = np.zeros(
(instance_map.shape[0], instance_map.shape[1]), dtype=np.bool_)
color = rgbds[i][0]
for j in range(len(render_mesh)):
mask[instance_map == j] = Colors()(j, True)
index[instance_map == j] = True
color[index] = cv2.addWeighted(color, 0.6, mask, 0.3, 0)[index]
cv2.imwrite(os.path.join(args.data_path, 'mask_vis',
file_names[i] + '_vis.png'), color)
# visualization
if args.vis:
cv2.namedWindow('Instance Mask Rendering', cv2.WINDOW_AUTOSIZE)
cv2.imshow('Instance Mask Rendering', color)
key = cv2.waitKey(0)
print('Time:', time.time() - t0)
# save reuslts inside data path
with open(os.path.join(args.data_path, 'labels.json'), 'w') as f:
json.dump(data_labels, f, indent=4)
| 34.721154 | 123 | 0.620742 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import time
import argparse
import os
import cv2
import shutil
import numpy as np
import open3d as o3d
import misc3d as m3d
import json
from utils import Colors, mask_to_bbox, rgbd_to_pointcloud
def remove_and_create_dir(dir_path):
mask_path = os.path.join(dir_path, 'mask')
if os.path.exists(mask_path):
shutil.rmtree(mask_path)
os.makedirs(mask_path)
mask_vis_path = os.path.join(dir_path, 'mask_vis')
if os.path.exists(mask_vis_path):
shutil.rmtree(mask_vis_path)
os.makedirs(mask_vis_path)
def read_model_and_init_poses(model_path, data_path):
with open(os.path.join(data_path, 'init_poses.json'), 'r') as f:
init_poses = json.load(f)
models = {}
for file_name in os.listdir(model_path):
if file_name.endswith('.ply'):
name = os.path.splitext(file_name)[0]
if name in init_poses:
mesh = o3d.io.read_triangle_mesh(
os.path.join(model_path, file_name))
models[name] = mesh
return (models, init_poses)
def read_rgbd_and_name(path):
rgbds = []
names = []
color_files = os.listdir(os.path.join(path, 'color'))
depth_files = os.listdir(os.path.join(path, 'depth'))
for i in range(len(color_files)):
color = cv2.imread(os.path.join(path, 'color', color_files[i]))
depth = cv2.imread(os.path.join(
path, 'depth', depth_files[i]), cv2.IMREAD_UNCHANGED)
rgbds.append((color, depth))
names.append(os.path.splitext(color_files[i])[0])
return rgbds, names
def read_camera_intrinsic(path):
f = open(os.path.join(path, 'camera_intrinsic.json'), 'r')
data = json.load(f)
camera = o3d.camera.PinholeCameraIntrinsic(
data['width'], data['height'], data['fx'], data['fy'], data['cx'], data['cy'])
return camera
def read_odometry(path):
odometrys = []
f = open(os.path.join(path, 'scene/trajectory.json'), 'r')
data = json.load(f)
for key, value in data.items():
if key == 'class_name':
continue
odometrys.append(np.array(value).reshape((4, 4)))
return odometrys
def refine_local_pose(model, color, depth, camera, init_pose, threshold=0.005):
intrin = (camera.intrinsic_matrix[0, 0],
camera.intrinsic_matrix[1, 1], camera.intrinsic_matrix[0, 2], camera.intrinsic_matrix[1, 2])
scene = rgbd_to_pointcloud(color, depth, intrin, 1000.0, 3.0, True)
scene = scene.voxel_down_sample(voxel_size=0.01)
model = o3d.geometry.PointCloud(model.vertices)
bbox = model.get_oriented_bounding_box()
bbox.rotate(init_pose[:3, :3], bbox.center)
bbox.translate(init_pose[:3, 3])
bbox.scale(1.2, bbox.center)
scene = scene.crop(bbox)
result = o3d.pipelines.registration.registration_icp(model, scene, threshold, init_pose,
o3d.pipelines.registration.TransformationEstimationPointToPoint())
pose = result.transformation
return pose
def generate_label_and_save_mask(data_path, instance_map, init_poses, pose_list, name, minimum_obj_pixel):
# create new instance map and save it
instance_mask = np.zeros(instance_map.shape, dtype=np.uint16)
labels = []
instance = 0
for key, value in init_poses.items():
for i in range(len(value)):
mask_255 = np.zeros(instance_map.shape, dtype=np.uint8)
if len(mask_255[instance_map == instance]) < minimum_obj_pixel:
continue
label = {}
label['obj_id'] = int(key)
label['instance_id'] = instance
label['cam_R_m2c'] = pose_list[instance][:3, :3].tolist()
label['cam_t_m2c'] = pose_list[instance][:3, 3].tolist()
mask_255[instance_map == instance] = 255
bbox = mask_to_bbox(mask_255)
label['bbox'] = bbox
instance_value = int(key) * 1000 + instance
instance_mask[instance_map == instance] = instance_value
instance += 1
labels.append(label)
cv2.imwrite(os.path.join(data_path, 'mask', name + '.png'), instance_mask)
return labels
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', help='path to CAD model')
parser.add_argument("--data_path", default='dataset',
help="path to RGBD data set")
parser.add_argument("--local_refine", action='store_true',
help="use icp the refine model to the scene")
parser.add_argument("--minimum_obj_pixel", default=500, type=int,
help="the minimum number of pixel of an object in the rgb image")
parser.add_argument("--vis", action='store_true',
help="visualize the rendering results")
args = parser.parse_args()
remove_and_create_dir(args.data_path)
models, init_poses = read_model_and_init_poses(
args.model_path, args.data_path)
rgbds, file_names = read_rgbd_and_name(args.data_path)
camera = read_camera_intrinsic(args.data_path)
odometrys = read_odometry(args.data_path)
render = m3d.pose_estimation.RayCastRenderer(camera)
t0 = time.time()
data_labels = {}
for i in range(len(rgbds)):
render_mesh = []
mesh_pose = []
odom = odometrys[i]
for key, value in init_poses.items():
for arr in value:
pose = np.array(arr).reshape((4, 4))
render_mesh.append(models[key])
pose = np.linalg.inv(odom) @ pose
if args.local_refine:
pose = refine_local_pose(
models[key], rgbds[i][0], rgbds[i][1], camera, pose)
mesh_pose.append(pose)
ret = render.cast_rays(render_mesh, mesh_pose)
# rendering instance map
instance_map = render.get_instance_map().numpy()
label = generate_label_and_save_mask(
args.data_path, instance_map, init_poses, mesh_pose, file_names[i], args.minimum_obj_pixel)
data_labels[file_names[i]] = label
# create visible instance mask image
mask = np.zeros(
(instance_map.shape[0], instance_map.shape[1], 3), dtype=np.uint8)
index = np.zeros(
(instance_map.shape[0], instance_map.shape[1]), dtype=np.bool_)
color = rgbds[i][0]
for j in range(len(render_mesh)):
mask[instance_map == j] = Colors()(j, True)
index[instance_map == j] = True
color[index] = cv2.addWeighted(color, 0.6, mask, 0.3, 0)[index]
cv2.imwrite(os.path.join(args.data_path, 'mask_vis',
file_names[i] + '_vis.png'), color)
# visualization
if args.vis:
cv2.namedWindow('Instance Mask Rendering', cv2.WINDOW_AUTOSIZE)
cv2.imshow('Instance Mask Rendering', color)
key = cv2.waitKey(0)
print('Time:', time.time() - t0)
# save reuslts inside data path
with open(os.path.join(args.data_path, 'labels.json'), 'w') as f:
json.dump(data_labels, f, indent=4)
| 3,836 | 0 | 161 |
8e134039300d24901c980001289804f5839ff7cf | 20,908 | py | Python | demisto_sdk/commands/doc_reviewer/doc_reviewer.py | SergeBakharev/demisto-sdk | 17d00942a1bd33039a8aba9ddffecfd81008d275 | [
"MIT"
] | null | null | null | demisto_sdk/commands/doc_reviewer/doc_reviewer.py | SergeBakharev/demisto-sdk | 17d00942a1bd33039a8aba9ddffecfd81008d275 | [
"MIT"
] | null | null | null | demisto_sdk/commands/doc_reviewer/doc_reviewer.py | SergeBakharev/demisto-sdk | 17d00942a1bd33039a8aba9ddffecfd81008d275 | [
"MIT"
] | null | null | null | import os
import re
import ssl
import string
import sys
from configparser import ConfigParser
from pathlib import Path
from typing import Dict, List, Optional, Set, Tuple
import click
import nltk
from nltk.corpus import brown, webtext
from spellchecker import SpellChecker
from demisto_sdk.commands.common.constants import (PACKS_PACK_IGNORE_FILE_NAME,
FileType)
from demisto_sdk.commands.common.content import (Content, Integration,
Playbook, ReleaseNote, Script,
path_to_pack_object)
from demisto_sdk.commands.common.content.objects.abstract_objects import \
TextObject
from demisto_sdk.commands.common.content.objects.pack_objects.abstract_pack_objects.yaml_content_object import \
YAMLContentObject
from demisto_sdk.commands.common.git_util import GitUtil
from demisto_sdk.commands.common.tools import find_type
from demisto_sdk.commands.doc_reviewer.known_words import KNOWN_WORDS
from demisto_sdk.commands.doc_reviewer.rn_checker import ReleaseNotesChecker
class DocReviewer:
"""Perform a spell check on the given .yml or .md file.
"""
SUPPORTED_FILE_TYPES = [FileType.INTEGRATION, FileType.SCRIPT, FileType.PLAYBOOK, FileType.README,
FileType.DESCRIPTION, FileType.RELEASE_NOTES, FileType.BETA_INTEGRATION,
FileType.TEST_PLAYBOOK, FileType.TEST_SCRIPT]
@staticmethod
def find_known_words_from_pack(file_path: str) -> Tuple[str, list]:
"""Find known words in file_path's pack.
Args:
file_path: The path of the file within the pack
Return (the known words file path or '' if it was not found, list of known words)
"""
file_path_obj = Path(file_path)
if 'Packs' in file_path_obj.parts:
pack_name = file_path_obj.parts[file_path_obj.parts.index('Packs') + 1]
packs_ignore_path = os.path.join("Packs", pack_name, PACKS_PACK_IGNORE_FILE_NAME)
if os.path.isfile(packs_ignore_path):
config = ConfigParser(allow_no_value=True)
config.read(packs_ignore_path)
if 'known_words' in config.sections():
packs_known_words = [known_word for known_word in config['known_words']]
return (packs_ignore_path, packs_known_words)
else:
click.secho(f'\nNo [known_words] section was found within: {packs_ignore_path}', fg='yellow')
return (packs_ignore_path, [])
click.secho(f'\nNo .pack-ignore file was found within pack: {packs_ignore_path}', fg='yellow')
return '', []
click.secho(f'\nCould not load pack\'s known words file since no pack structure was found for {file_path}'
f'\nMake sure you are running from the content directory.', fg='bright_red')
return '', []
@staticmethod
def is_upper_case_word_plural(word):
"""check if a given word is an upper case word in plural, like: URLs, IPs, etc"""
if len(word) > 2 and word[-1] == 's':
singular_word = word[:-1]
return singular_word == singular_word.upper()
return False
def is_camel_case(self, word):
"""check if a given word is in camel case"""
if word != word.lower() and word != word.upper() and "_" not in word and word != word.title():
# check if word is an upper case plural, like IPs. If it is, then the word is not in camel case
return not self.is_upper_case_word_plural(self.remove_punctuation(word))
return False
@staticmethod
def camel_case_split(camel):
"""split camel case word into sub-words"""
tokens = re.compile('([A-Z]?[a-z]+)').findall(camel)
for token in tokens:
# double space to handle capital words like IP/URL/DNS that not included in the regex
camel = camel.replace(token, f' {token} ')
return camel.split()
def get_all_md_and_yml_files_in_dir(self, dir_name):
"""recursively get all the supported files from a given dictionary"""
for root, _, files in os.walk(dir_name):
for file_name in files:
full_path = (os.path.join(root, file_name))
if find_type(
full_path, ignore_invalid_schema_file=self.ignore_invalid_schema_file
) in self.SUPPORTED_FILE_TYPES:
self.files.append(str(full_path))
def get_files_to_run_on(self, file_path=None):
"""Get all the relevant files that the spell-check could work on"""
if self.git_util:
self.get_files_from_git()
elif os.path.isdir(file_path):
self.get_all_md_and_yml_files_in_dir(file_path)
elif find_type(
file_path, ignore_invalid_schema_file=self.ignore_invalid_schema_file
) in self.SUPPORTED_FILE_TYPES:
self.files.append(file_path)
@staticmethod
def run_doc_review(self):
"""Runs spell-check on the given file and release notes check if relevant.
Returns:
bool. True if no problematic words found, False otherwise.
"""
click.secho('\n================= Starting Doc Review =================', fg='bright_cyan')
if len(self.SUPPORTED_FILE_TYPES) == 1:
click.secho('Running only on release notes', fg='bright_cyan')
if self.file_paths:
for file_path in self.file_paths:
self.get_files_to_run_on(file_path)
else:
self.get_files_to_run_on()
# no eligible files found
if not self.files:
click.secho("Could not find any relevant files - Aborting.")
return True
self.add_known_words()
for file in self.files:
click.echo(f'\nChecking file {file}')
restarted_spellchecker = self.update_known_words_from_pack(file)
if restarted_spellchecker:
self.add_known_words()
self.unknown_words = {}
if file.endswith('.md'):
self.check_md_file(file)
elif file.endswith('.yml'):
self.check_yaml(file)
if self.unknown_words:
click.secho(f"\n - Words that might be misspelled were found in "
f"{file}:", fg='bright_red')
self.print_unknown_words(unknown_words=self.unknown_words)
self.found_misspelled = True
self.files_with_misspells.add(file)
else:
click.secho(f" - No misspelled words found in {file}", fg='green')
self.files_without_misspells.add(file)
self.print_file_report()
if self.found_misspelled and not self.no_failure:
return False
return True
def update_known_words_from_pack(self, file_path: str) -> bool:
"""Update spellchecker with the file's pack's known words.
Args:
file_path: The path of the file to update the spellchecker with the packs known words.
Return True if spellchecker was restarted, False otherwise
"""
restarted_spellchecker = False
if self.load_known_words_from_pack:
known_pack_words_file_path, known_words = self.find_known_words_from_pack(file_path)
if self.known_pack_words_file_path != known_pack_words_file_path:
click.secho(f'\nUsing known words file found within pack: {known_pack_words_file_path}', fg='yellow')
if self.known_pack_words_file_path:
# Restart Spellchecker to remove old known_words packs file
self.spellchecker = SpellChecker()
self.known_pack_words_file_path = ''
restarted_spellchecker = True
if known_pack_words_file_path:
self.known_pack_words_file_path = known_pack_words_file_path
if known_words:
# Add the new known_words packs file
self.spellchecker.word_frequency.load_words(known_words)
return restarted_spellchecker
def add_known_words(self):
"""Add known words to the spellchecker from external and internal files"""
# adding known words file if given - these words will not count as misspelled
if self.known_words_file_paths:
for known_words_file_path in self.known_words_file_paths:
self.spellchecker.word_frequency.load_text_file(known_words_file_path)
# adding the KNOWN_WORDS to the spellchecker recognized words.
self.spellchecker.word_frequency.load_words(KNOWN_WORDS)
if self.expand_dictionary:
# nltk - natural language tool kit - is a large package containing several dictionaries.
# to use it we need to download one of it's dictionaries - we will use the
# reasonably sized "brown" and "webtext" dicts.
# to avoid SSL download error we disable SSL connection.
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
# downloading "brown" and "webtext" sets from nltk.
click.secho("Downloading expanded dictionary, this may take a minute...", fg='yellow')
nltk.download('brown')
nltk.download('webtext')
# adding nltk's word set to spellchecker.
self.spellchecker.word_frequency.load_words(brown.words())
self.spellchecker.word_frequency.load_words(webtext.words())
@staticmethod
def remove_punctuation(word):
"""remove leading and trailing punctuation"""
return word.strip(string.punctuation)
def check_word(self, word):
"""Check if a word is legal"""
# check camel cases
if not self.no_camel_case and self.is_camel_case(word):
word = self.remove_punctuation(word)
sub_words = self.camel_case_split(word)
for sub_word in sub_words:
sub_word = self.remove_punctuation(sub_word)
if sub_word.isalpha() and self.spellchecker.unknown([sub_word]):
self.unknown_words[word] = list(self.spellchecker.candidates(sub_word))[:5]
else:
word = self.remove_punctuation(word)
if word.isalpha() and self.spellchecker.unknown([word]):
self.unknown_words[word] = list(self.spellchecker.candidates(word))[:5]
if word in self.unknown_words.keys() and word in self.unknown_words[word]:
# Do not suggest the same word as a correction.
self.unknown_words[word].remove(word)
def check_md_file(self, file_path):
"""Runs spell check on .md file. Adds unknown words to given unknown_words set.
Also if RN file will review it and add it to malformed RN file set if needed.
"""
pack_object: TextObject = path_to_pack_object(file_path)
md_file_lines = pack_object.to_str().split('\n')
if isinstance(pack_object, ReleaseNote):
good_rn = ReleaseNotesChecker(file_path, md_file_lines).check_rn()
if not good_rn:
self.malformed_rn_files.add(file_path)
for line in md_file_lines:
for word in line.split():
self.check_word(word)
def check_yaml(self, file_path):
"""Runs spell check on .yml file. Adds unknown words to given unknown_words set.
Args:
file_path (str): The file path to the yml file.
"""
pack_object: YAMLContentObject = path_to_pack_object(file_path)
yml_info = pack_object.to_dict()
if isinstance(pack_object, Integration):
self.check_spelling_in_integration(yml_info)
elif isinstance(pack_object, Script):
self.check_spelling_in_script(yml_info)
elif isinstance(pack_object, Playbook):
self.check_spelling_in_playbook(yml_info)
def check_spelling_in_integration(self, yml_file):
"""Check spelling on an integration file"""
self.check_params(yml_file.get('configuration', []))
self.check_commands(yml_file.get('script', {}).get('commands', []))
self.check_display_and_description(yml_file.get('display'), yml_file.get('description'))
def check_params(self, param_list):
"""Check spelling in integration parameters"""
for param_conf in param_list:
param_display = param_conf.get('display')
if param_display:
for word in param_display.split():
self.check_word(word)
param_toolip = param_conf.get('additionalinfo')
if param_toolip:
for word in param_toolip.split():
self.check_word(word)
def check_commands(self, command_list):
"""Check spelling in integration commands"""
for command in command_list:
command_arguments = command.get('arguments', [])
for argument in command_arguments:
arg_description = argument.get('description')
if arg_description:
for word in arg_description.split():
self.check_word(word)
command_description = command.get('description')
if command_description:
for word in command_description.split():
self.check_word(word)
command_outputs = command.get('outputs', [])
for output in command_outputs:
output_description = output.get('description')
if output_description:
for word in output_description.split():
self.check_word(word)
def check_display_and_description(self, display, description):
"""check integration display name and description"""
if display:
for word in display.split():
self.check_word(word)
if description:
for word in description.split():
self.check_word(word)
def check_spelling_in_script(self, yml_file):
"""Check spelling in script file"""
self.check_comment(yml_file.get('comment'))
self.check_script_args(yml_file.get('args', []))
self.check_script_outputs(yml_file.get('outputs', []))
def check_script_args(self, arg_list):
"""Check spelling in script arguments"""
for argument in arg_list:
arg_description = argument.get('description')
if arg_description:
for word in arg_description.split():
self.check_word(word)
def check_comment(self, comment):
"""Check spelling in script comment"""
if comment:
for word in comment.split():
self.check_word(word)
def check_script_outputs(self, outputs_list):
"""Check spelling in script outputs"""
for output in outputs_list:
output_description = output.get('description')
if output_description:
for word in output_description.split():
self.check_word(word)
def check_spelling_in_playbook(self, yml_file):
"""Check spelling in playbook file"""
self.check_playbook_description_and_name(yml_file.get('description'), yml_file.get('name'))
self.check_tasks(yml_file.get('tasks', {}))
def check_playbook_description_and_name(self, description, name):
"""Check spelling in playbook description and name"""
if name:
for word in name.split():
self.check_word(word)
if description:
for word in description.split():
self.check_word(word)
def check_tasks(self, task_dict):
"""Check spelling in playbook tasks"""
for task_key in task_dict.keys():
task_info = task_dict[task_key].get('task')
if task_info:
task_description = task_info.get('description')
if task_description:
for word in task_description.split():
self.check_word(word)
task_name = task_info.get('name')
if task_name:
for word in task_name.split():
self.check_word(word)
| 43.832285 | 131 | 0.626602 | import os
import re
import ssl
import string
import sys
from configparser import ConfigParser
from pathlib import Path
from typing import Dict, List, Optional, Set, Tuple
import click
import nltk
from nltk.corpus import brown, webtext
from spellchecker import SpellChecker
from demisto_sdk.commands.common.constants import (PACKS_PACK_IGNORE_FILE_NAME,
FileType)
from demisto_sdk.commands.common.content import (Content, Integration,
Playbook, ReleaseNote, Script,
path_to_pack_object)
from demisto_sdk.commands.common.content.objects.abstract_objects import \
TextObject
from demisto_sdk.commands.common.content.objects.pack_objects.abstract_pack_objects.yaml_content_object import \
YAMLContentObject
from demisto_sdk.commands.common.git_util import GitUtil
from demisto_sdk.commands.common.tools import find_type
from demisto_sdk.commands.doc_reviewer.known_words import KNOWN_WORDS
from demisto_sdk.commands.doc_reviewer.rn_checker import ReleaseNotesChecker
class DocReviewer:
"""Perform a spell check on the given .yml or .md file.
"""
SUPPORTED_FILE_TYPES = [FileType.INTEGRATION, FileType.SCRIPT, FileType.PLAYBOOK, FileType.README,
FileType.DESCRIPTION, FileType.RELEASE_NOTES, FileType.BETA_INTEGRATION,
FileType.TEST_PLAYBOOK, FileType.TEST_SCRIPT]
def __init__(self, file_paths: Optional[List] = None, known_words_file_paths: Optional[List] = None,
no_camel_case: bool = False, no_failure: bool = False, expand_dictionary: bool = False,
templates: bool = False, use_git: bool = False, prev_ver: str = None, release_notes_only: bool = False,
load_known_words_from_pack: bool = False):
if templates:
ReleaseNotesChecker(template_examples=True)
sys.exit(0)
# if nothing entered will default to use git
elif not file_paths and not use_git:
use_git = True
self.file_paths = file_paths if file_paths else []
self.git_util = None
if use_git:
self.git_util = GitUtil(repo=Content.git())
self.prev_ver = self.git_util.handle_prev_ver()[1]
else:
self.prev_ver = prev_ver if prev_ver else 'demisto/master'
if release_notes_only:
self.SUPPORTED_FILE_TYPES = [FileType.RELEASE_NOTES]
# if running doc-review --release-notes there is no need to consider invalid schema files of yml/json
self.ignore_invalid_schema_file = True
else:
self.ignore_invalid_schema_file = False
self.known_words_file_paths = known_words_file_paths if known_words_file_paths else []
self.load_known_words_from_pack = load_known_words_from_pack
self.known_pack_words_file_path = ''
self.current_pack = None
self.files: list = []
self.spellchecker = SpellChecker()
self.unknown_words = {} # type:Dict
self.no_camel_case = no_camel_case
self.found_misspelled = False
self.no_failure = no_failure
self.expand_dictionary = expand_dictionary
self.files_with_misspells = set() # type:Set
self.files_without_misspells = set() # type:Set
self.malformed_rn_files = set() # type:Set
@staticmethod
def find_known_words_from_pack(file_path: str) -> Tuple[str, list]:
"""Find known words in file_path's pack.
Args:
file_path: The path of the file within the pack
Return (the known words file path or '' if it was not found, list of known words)
"""
file_path_obj = Path(file_path)
if 'Packs' in file_path_obj.parts:
pack_name = file_path_obj.parts[file_path_obj.parts.index('Packs') + 1]
packs_ignore_path = os.path.join("Packs", pack_name, PACKS_PACK_IGNORE_FILE_NAME)
if os.path.isfile(packs_ignore_path):
config = ConfigParser(allow_no_value=True)
config.read(packs_ignore_path)
if 'known_words' in config.sections():
packs_known_words = [known_word for known_word in config['known_words']]
return (packs_ignore_path, packs_known_words)
else:
click.secho(f'\nNo [known_words] section was found within: {packs_ignore_path}', fg='yellow')
return (packs_ignore_path, [])
click.secho(f'\nNo .pack-ignore file was found within pack: {packs_ignore_path}', fg='yellow')
return '', []
click.secho(f'\nCould not load pack\'s known words file since no pack structure was found for {file_path}'
f'\nMake sure you are running from the content directory.', fg='bright_red')
return '', []
@staticmethod
def is_upper_case_word_plural(word):
"""check if a given word is an upper case word in plural, like: URLs, IPs, etc"""
if len(word) > 2 and word[-1] == 's':
singular_word = word[:-1]
return singular_word == singular_word.upper()
return False
def is_camel_case(self, word):
"""check if a given word is in camel case"""
if word != word.lower() and word != word.upper() and "_" not in word and word != word.title():
# check if word is an upper case plural, like IPs. If it is, then the word is not in camel case
return not self.is_upper_case_word_plural(self.remove_punctuation(word))
return False
@staticmethod
def camel_case_split(camel):
"""split camel case word into sub-words"""
tokens = re.compile('([A-Z]?[a-z]+)').findall(camel)
for token in tokens:
# double space to handle capital words like IP/URL/DNS that not included in the regex
camel = camel.replace(token, f' {token} ')
return camel.split()
def get_all_md_and_yml_files_in_dir(self, dir_name):
"""recursively get all the supported files from a given dictionary"""
for root, _, files in os.walk(dir_name):
for file_name in files:
full_path = (os.path.join(root, file_name))
if find_type(
full_path, ignore_invalid_schema_file=self.ignore_invalid_schema_file
) in self.SUPPORTED_FILE_TYPES:
self.files.append(str(full_path))
def gather_all_changed_files(self):
modified = self.git_util.modified_files(prev_ver=self.prev_ver) # type: ignore[union-attr]
added = self.git_util.added_files(prev_ver=self.prev_ver) # type: ignore[union-attr]
renamed = self.git_util.renamed_files(prev_ver=self.prev_ver, get_only_current_file_names=True) # type: ignore[union-attr]
return modified.union(added).union(renamed) # type: ignore[arg-type]
def get_files_from_git(self):
click.secho('Gathering all changed files from git', fg='bright_cyan')
for file in self.gather_all_changed_files():
file = str(file)
if os.path.isfile(file) and find_type(
file, ignore_invalid_schema_file=self.ignore_invalid_schema_file
) in self.SUPPORTED_FILE_TYPES:
self.files.append(file)
def get_files_to_run_on(self, file_path=None):
"""Get all the relevant files that the spell-check could work on"""
if self.git_util:
self.get_files_from_git()
elif os.path.isdir(file_path):
self.get_all_md_and_yml_files_in_dir(file_path)
elif find_type(
file_path, ignore_invalid_schema_file=self.ignore_invalid_schema_file
) in self.SUPPORTED_FILE_TYPES:
self.files.append(file_path)
@staticmethod
def print_unknown_words(unknown_words):
for word, corrections in unknown_words.items():
click.secho(f' - {word} - did you mean: {corrections}', fg='bright_red')
click.secho('If these are not misspelled consider adding them to a known_words file:\n'
' Pack related words: content/Packs/<PackName>/.pack-ignore under the [known_words] section.\n'
' Not pack specific words: content/Tests/known_words.txt\n'
'To test locally add --use-packs-known-words or --known-words flags.', fg='yellow')
def print_file_report(self):
if self.files_without_misspells:
click.secho('\n================= Files Without Misspells =================', fg='green')
no_misspells_string = '\n'.join(self.files_without_misspells)
click.secho(no_misspells_string, fg='green')
if self.files_with_misspells:
click.secho('\n================= Files With Misspells =================', fg='bright_red')
misspells_string = '\n'.join(self.files_with_misspells)
click.secho(misspells_string, fg='bright_red')
if self.malformed_rn_files:
click.secho('\n================= Malformed Release Notes =================', fg='bright_red')
bad_rn = '\n'.join(self.malformed_rn_files)
click.secho(bad_rn, fg='bright_red')
def run_doc_review(self):
"""Runs spell-check on the given file and release notes check if relevant.
Returns:
bool. True if no problematic words found, False otherwise.
"""
click.secho('\n================= Starting Doc Review =================', fg='bright_cyan')
if len(self.SUPPORTED_FILE_TYPES) == 1:
click.secho('Running only on release notes', fg='bright_cyan')
if self.file_paths:
for file_path in self.file_paths:
self.get_files_to_run_on(file_path)
else:
self.get_files_to_run_on()
# no eligible files found
if not self.files:
click.secho("Could not find any relevant files - Aborting.")
return True
self.add_known_words()
for file in self.files:
click.echo(f'\nChecking file {file}')
restarted_spellchecker = self.update_known_words_from_pack(file)
if restarted_spellchecker:
self.add_known_words()
self.unknown_words = {}
if file.endswith('.md'):
self.check_md_file(file)
elif file.endswith('.yml'):
self.check_yaml(file)
if self.unknown_words:
click.secho(f"\n - Words that might be misspelled were found in "
f"{file}:", fg='bright_red')
self.print_unknown_words(unknown_words=self.unknown_words)
self.found_misspelled = True
self.files_with_misspells.add(file)
else:
click.secho(f" - No misspelled words found in {file}", fg='green')
self.files_without_misspells.add(file)
self.print_file_report()
if self.found_misspelled and not self.no_failure:
return False
return True
def update_known_words_from_pack(self, file_path: str) -> bool:
"""Update spellchecker with the file's pack's known words.
Args:
file_path: The path of the file to update the spellchecker with the packs known words.
Return True if spellchecker was restarted, False otherwise
"""
restarted_spellchecker = False
if self.load_known_words_from_pack:
known_pack_words_file_path, known_words = self.find_known_words_from_pack(file_path)
if self.known_pack_words_file_path != known_pack_words_file_path:
click.secho(f'\nUsing known words file found within pack: {known_pack_words_file_path}', fg='yellow')
if self.known_pack_words_file_path:
# Restart Spellchecker to remove old known_words packs file
self.spellchecker = SpellChecker()
self.known_pack_words_file_path = ''
restarted_spellchecker = True
if known_pack_words_file_path:
self.known_pack_words_file_path = known_pack_words_file_path
if known_words:
# Add the new known_words packs file
self.spellchecker.word_frequency.load_words(known_words)
return restarted_spellchecker
def add_known_words(self):
"""Add known words to the spellchecker from external and internal files"""
# adding known words file if given - these words will not count as misspelled
if self.known_words_file_paths:
for known_words_file_path in self.known_words_file_paths:
self.spellchecker.word_frequency.load_text_file(known_words_file_path)
# adding the KNOWN_WORDS to the spellchecker recognized words.
self.spellchecker.word_frequency.load_words(KNOWN_WORDS)
if self.expand_dictionary:
# nltk - natural language tool kit - is a large package containing several dictionaries.
# to use it we need to download one of it's dictionaries - we will use the
# reasonably sized "brown" and "webtext" dicts.
# to avoid SSL download error we disable SSL connection.
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
# downloading "brown" and "webtext" sets from nltk.
click.secho("Downloading expanded dictionary, this may take a minute...", fg='yellow')
nltk.download('brown')
nltk.download('webtext')
# adding nltk's word set to spellchecker.
self.spellchecker.word_frequency.load_words(brown.words())
self.spellchecker.word_frequency.load_words(webtext.words())
@staticmethod
def remove_punctuation(word):
"""remove leading and trailing punctuation"""
return word.strip(string.punctuation)
def check_word(self, word):
"""Check if a word is legal"""
# check camel cases
if not self.no_camel_case and self.is_camel_case(word):
word = self.remove_punctuation(word)
sub_words = self.camel_case_split(word)
for sub_word in sub_words:
sub_word = self.remove_punctuation(sub_word)
if sub_word.isalpha() and self.spellchecker.unknown([sub_word]):
self.unknown_words[word] = list(self.spellchecker.candidates(sub_word))[:5]
else:
word = self.remove_punctuation(word)
if word.isalpha() and self.spellchecker.unknown([word]):
self.unknown_words[word] = list(self.spellchecker.candidates(word))[:5]
if word in self.unknown_words.keys() and word in self.unknown_words[word]:
# Do not suggest the same word as a correction.
self.unknown_words[word].remove(word)
def check_md_file(self, file_path):
"""Runs spell check on .md file. Adds unknown words to given unknown_words set.
Also if RN file will review it and add it to malformed RN file set if needed.
"""
pack_object: TextObject = path_to_pack_object(file_path)
md_file_lines = pack_object.to_str().split('\n')
if isinstance(pack_object, ReleaseNote):
good_rn = ReleaseNotesChecker(file_path, md_file_lines).check_rn()
if not good_rn:
self.malformed_rn_files.add(file_path)
for line in md_file_lines:
for word in line.split():
self.check_word(word)
def check_yaml(self, file_path):
"""Runs spell check on .yml file. Adds unknown words to given unknown_words set.
Args:
file_path (str): The file path to the yml file.
"""
pack_object: YAMLContentObject = path_to_pack_object(file_path)
yml_info = pack_object.to_dict()
if isinstance(pack_object, Integration):
self.check_spelling_in_integration(yml_info)
elif isinstance(pack_object, Script):
self.check_spelling_in_script(yml_info)
elif isinstance(pack_object, Playbook):
self.check_spelling_in_playbook(yml_info)
def check_spelling_in_integration(self, yml_file):
"""Check spelling on an integration file"""
self.check_params(yml_file.get('configuration', []))
self.check_commands(yml_file.get('script', {}).get('commands', []))
self.check_display_and_description(yml_file.get('display'), yml_file.get('description'))
def check_params(self, param_list):
"""Check spelling in integration parameters"""
for param_conf in param_list:
param_display = param_conf.get('display')
if param_display:
for word in param_display.split():
self.check_word(word)
param_toolip = param_conf.get('additionalinfo')
if param_toolip:
for word in param_toolip.split():
self.check_word(word)
def check_commands(self, command_list):
"""Check spelling in integration commands"""
for command in command_list:
command_arguments = command.get('arguments', [])
for argument in command_arguments:
arg_description = argument.get('description')
if arg_description:
for word in arg_description.split():
self.check_word(word)
command_description = command.get('description')
if command_description:
for word in command_description.split():
self.check_word(word)
command_outputs = command.get('outputs', [])
for output in command_outputs:
output_description = output.get('description')
if output_description:
for word in output_description.split():
self.check_word(word)
def check_display_and_description(self, display, description):
"""check integration display name and description"""
if display:
for word in display.split():
self.check_word(word)
if description:
for word in description.split():
self.check_word(word)
def check_spelling_in_script(self, yml_file):
"""Check spelling in script file"""
self.check_comment(yml_file.get('comment'))
self.check_script_args(yml_file.get('args', []))
self.check_script_outputs(yml_file.get('outputs', []))
def check_script_args(self, arg_list):
"""Check spelling in script arguments"""
for argument in arg_list:
arg_description = argument.get('description')
if arg_description:
for word in arg_description.split():
self.check_word(word)
def check_comment(self, comment):
"""Check spelling in script comment"""
if comment:
for word in comment.split():
self.check_word(word)
def check_script_outputs(self, outputs_list):
"""Check spelling in script outputs"""
for output in outputs_list:
output_description = output.get('description')
if output_description:
for word in output_description.split():
self.check_word(word)
def check_spelling_in_playbook(self, yml_file):
"""Check spelling in playbook file"""
self.check_playbook_description_and_name(yml_file.get('description'), yml_file.get('name'))
self.check_tasks(yml_file.get('tasks', {}))
def check_playbook_description_and_name(self, description, name):
"""Check spelling in playbook description and name"""
if name:
for word in name.split():
self.check_word(word)
if description:
for word in description.split():
self.check_word(word)
def check_tasks(self, task_dict):
"""Check spelling in playbook tasks"""
for task_key in task_dict.keys():
task_info = task_dict[task_key].get('task')
if task_info:
task_description = task_info.get('description')
if task_description:
for word in task_description.split():
self.check_word(word)
task_name = task_info.get('name')
if task_name:
for word in task_name.split():
self.check_word(word)
| 4,087 | 0 | 134 |
2fc2dadded385489f36ae8202bdaf7d1a553a3eb | 1,435 | py | Python | step by step/Dominant topic.py | YuchenTan777/CCI-S2-Coding-Two-Final | 0f2faacbae741c0809f6de3f8ef2d5c16a009583 | [
"MIT"
] | null | null | null | step by step/Dominant topic.py | YuchenTan777/CCI-S2-Coding-Two-Final | 0f2faacbae741c0809f6de3f8ef2d5c16a009583 | [
"MIT"
] | null | null | null | step by step/Dominant topic.py | YuchenTan777/CCI-S2-Coding-Two-Final | 0f2faacbae741c0809f6de3f8ef2d5c16a009583 | [
"MIT"
] | null | null | null |
df_topic_sents_keywords = format_topics_sentences(ldamodel=lda_model, corpus=corpus, texts=data_ready)
# Format
df_dominant_topic = df_topic_sents_keywords.reset_index()
df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']
df_dominant_topic.head(10)
| 44.84375 | 139 | 0.681533 | def format_topics_sentences(ldamodel=None, corpus=corpus, texts=data):
# Init output
sent_topics_df = pd.DataFrame()
# Get main topic in each document
for i, row_list in enumerate(ldamodel[corpus]):
row = row_list[0] if ldamodel.per_word_topics else row_list
# print(row)
row = sorted(row, key=lambda x: (x[1]), reverse=True)
# Get the Dominant topic, Perc Contribution and Keywords for each document
for j, (topic_num, prop_topic) in enumerate(row):
if j == 0: # => dominant topic
wp = ldamodel.show_topic(topic_num)
topic_keywords = ", ".join([word for word, prop in wp])
sent_topics_df = sent_topics_df.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)
else:
break
sent_topics_df.columns = ['Dominant_Topic', 'Perc_Contribution', 'Topic_Keywords']
# Add original text to the end of the output
contents = pd.Series(texts)
sent_topics_df = pd.concat([sent_topics_df, contents], axis=1)
return(sent_topics_df)
df_topic_sents_keywords = format_topics_sentences(ldamodel=lda_model, corpus=corpus, texts=data_ready)
# Format
df_dominant_topic = df_topic_sents_keywords.reset_index()
df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']
df_dominant_topic.head(10)
| 1,109 | 0 | 22 |
31b014feff5c20c649627b27c2443f43f373894c | 410 | py | Python | _init_paths.py | klo9klo9kloi/win_det_heatmaps | fc427bcd593831d627698455b8917eb37add3f6e | [
"MIT"
] | 29 | 2020-07-27T10:49:09.000Z | 2022-03-17T02:15:03.000Z | _init_paths.py | klo9klo9kloi/win_det_heatmaps | fc427bcd593831d627698455b8917eb37add3f6e | [
"MIT"
] | 6 | 2020-09-30T01:51:34.000Z | 2022-01-02T08:00:22.000Z | _init_paths.py | klo9klo9kloi/win_det_heatmaps | fc427bcd593831d627698455b8917eb37add3f6e | [
"MIT"
] | 10 | 2020-07-31T00:43:38.000Z | 2022-03-07T02:45:25.000Z | import os
import sys
this_dir = os.path.dirname(__file__)
add_path(os.path.join(this_dir))
add_path(os.path.join(this_dir, 'common'))
add_path(os.path.join(this_dir, 'common_pytorch'))
print("=================SYS PATH================")
for path in sys.path:
print(path)
print("=================SYS PATH================")
| 24.117647 | 50 | 0.595122 | import os
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = os.path.dirname(__file__)
add_path(os.path.join(this_dir))
add_path(os.path.join(this_dir, 'common'))
add_path(os.path.join(this_dir, 'common_pytorch'))
print("=================SYS PATH================")
for path in sys.path:
print(path)
print("=================SYS PATH================")
| 60 | 0 | 23 |
22d1d15046cddd021171b44b762d0bc39889be72 | 8,131 | py | Python | stephanie_protocol/step1_template.py | caseystone/ot2-protocols | ca8ae72b5298c8064d91139f266996d0cb4d03cc | [
"MIT"
] | null | null | null | stephanie_protocol/step1_template.py | caseystone/ot2-protocols | ca8ae72b5298c8064d91139f266996d0cb4d03cc | [
"MIT"
] | null | null | null | stephanie_protocol/step1_template.py | caseystone/ot2-protocols | ca8ae72b5298c8064d91139f266996d0cb4d03cc | [
"MIT"
] | null | null | null | ### start
import profile
import sys
import os
import pandas as pd
from opentrons import protocol_api, simulate, execute
import json
import argparse
### end
"""
step1_tepmplate.py
Description: Generates python protocol file that can be uploaded directly to Opentrons app
Usage: python step1_template.py -r <number of reactions> -f <output file name>
ex. pyhton step1_template.py -r 96 -f protocol1.py
Note: Make sure you have the opentrons package installed in your termial environment
General Protocol Steps:
1.) Make master mix
2.) Add 15uL master mix to each reaction well
3.) Run thermocycler
4.) Hold thermocycler plate at 4C until human intervention
After the output python file is generated, upload the output file to the Opentrons app to run the protocol
"""
# HELPER METHODS ------------------------------------------------------------------
def write_protocol(num_rxns, mm_volumes_dict, file_name):
""" write_protocol
Description: Copies anything between '### start' and '### end' comments in this file to new protocol file
Writes num_rxns and mm_volumes dict variables into output protocol at '### VARIABLES' location.
Output protocol will be in same directory with name specified by file_name variable (user provided)
Parameters:
num_rxns: (int) number of rxns to perform (1-96)
mm_volumes_dict: dictionatry of master mix source wells to volumes
file_name: (str) user specifiec output file name (ex. 'protocol_02.py')
"""
current_file_path = __file__
output_filepath = os.path.join(os.path.split(current_file_path)[0], file_name)
try:
with open(current_file_path, 'r') as open_this:
with open(output_filepath, 'w+') as open_that:
contents_this = open_this.readlines()
for i in range(len(contents_this)):
if contents_this[i].startswith("### start"):
j = i
while not contents_this[j].startswith("### end"):
j+=1
open_that.writelines(contents_this[i+1:j])
if contents_this[i].startswith("### VARIABLES"):
open_that.write(f"\nnum_rxns = {str(num_rxns)}")
open_that.write(f"\nmm_volumes_dict = {str(mm_volumes_dict)}\n\n")
return(f"Protocol created = {output_filepath} ")
except:
return(f"Error: Could not write to protocol file\n{current_file_path}\n{output_filepath}")
def calculate_mm_volumes(num_rxns):
""" calculate_mm_volumes
Description: Calculates volumes of reagents needed to make master mix depending on number of reactions (num_rxns)
Parameters:
num_rxns: (int) number of rxns to perform (1-96)
Output:
mm_volumes_dict: dictionatry of master mix source wells to volumes
NOTE: reagent source rack contains 5 1.5mL tubes
A1 - RP Primer
A2 - 5x Multimodal RT Buffer
A3 - Nuclease-free Water
A4 - Rnase Inhibitor
A5 - EZ Reverse Transcriptase
"""
rp_primer_vol = (num_rxns * 1) * 1.1
multi_buff_5x_vol = (num_rxns * 4) * 1.1
nuc_free_water_volume = (num_rxns * 8) * 1.1
rnase_inhibitor_vol = (num_rxns * 1) * 1.1
ez_rev_trans_vol = (num_rxns * 1) * 1.1
mm_volumes_dict = {
'A1': rp_primer_vol,
'A2': multi_buff_5x_vol,
'A3': nuc_free_water_volume,
'A4': rnase_inhibitor_vol,
'A5': ez_rev_trans_vol,
}
return mm_volumes_dict
# MAIN METHOD --------------------------------------------------------------------
def generate_step1_from_template(num_rxns, file_name):
""" generate_step1_from_template
Description: Handles num_rxns variable checking and pass to calculate_mm_volumes dict
Paramerers:
num_rxns: (int) number of rxns to perform (1-96).
file_name: (str) user specifiec output file name (ex. 'protocol_02.py')
"""
if num_rxns > 96 or num_rxns < 1:
print("number of reactions must be between 1 and 96")
exit
mm_volumes_dict = calculate_mm_volumes(num_rxns)
try:
print(write_protocol(num_rxns, mm_volumes_dict, file_name))
except OSError as e:
raise
return
if __name__ == "__main__":
# execute only if run as a script
main(sys.argv)
# ------------------------------------------ contents of protocol --------------------------------------------------
### start
# metadata
metadata = {
'protocolName': 'Step1',
'author': 'Name <email@address.com>',
'description': 'step1 of stephanies protocol',
'apiLevel': '2.12'
}
### end
### VARIABLES
### start
### end | 34.163866 | 136 | 0.64002 | ### start
import profile
import sys
import os
import pandas as pd
from opentrons import protocol_api, simulate, execute
import json
import argparse
### end
"""
step1_tepmplate.py
Description: Generates python protocol file that can be uploaded directly to Opentrons app
Usage: python step1_template.py -r <number of reactions> -f <output file name>
ex. pyhton step1_template.py -r 96 -f protocol1.py
Note: Make sure you have the opentrons package installed in your termial environment
General Protocol Steps:
1.) Make master mix
2.) Add 15uL master mix to each reaction well
3.) Run thermocycler
4.) Hold thermocycler plate at 4C until human intervention
After the output python file is generated, upload the output file to the Opentrons app to run the protocol
"""
# HELPER METHODS ------------------------------------------------------------------
def write_protocol(num_rxns, mm_volumes_dict, file_name):
""" write_protocol
Description: Copies anything between '### start' and '### end' comments in this file to new protocol file
Writes num_rxns and mm_volumes dict variables into output protocol at '### VARIABLES' location.
Output protocol will be in same directory with name specified by file_name variable (user provided)
Parameters:
num_rxns: (int) number of rxns to perform (1-96)
mm_volumes_dict: dictionatry of master mix source wells to volumes
file_name: (str) user specifiec output file name (ex. 'protocol_02.py')
"""
current_file_path = __file__
output_filepath = os.path.join(os.path.split(current_file_path)[0], file_name)
try:
with open(current_file_path, 'r') as open_this:
with open(output_filepath, 'w+') as open_that:
contents_this = open_this.readlines()
for i in range(len(contents_this)):
if contents_this[i].startswith("### start"):
j = i
while not contents_this[j].startswith("### end"):
j+=1
open_that.writelines(contents_this[i+1:j])
if contents_this[i].startswith("### VARIABLES"):
open_that.write(f"\nnum_rxns = {str(num_rxns)}")
open_that.write(f"\nmm_volumes_dict = {str(mm_volumes_dict)}\n\n")
return(f"Protocol created = {output_filepath} ")
except:
return(f"Error: Could not write to protocol file\n{current_file_path}\n{output_filepath}")
def calculate_mm_volumes(num_rxns):
""" calculate_mm_volumes
Description: Calculates volumes of reagents needed to make master mix depending on number of reactions (num_rxns)
Parameters:
num_rxns: (int) number of rxns to perform (1-96)
Output:
mm_volumes_dict: dictionatry of master mix source wells to volumes
NOTE: reagent source rack contains 5 1.5mL tubes
A1 - RP Primer
A2 - 5x Multimodal RT Buffer
A3 - Nuclease-free Water
A4 - Rnase Inhibitor
A5 - EZ Reverse Transcriptase
"""
rp_primer_vol = (num_rxns * 1) * 1.1
multi_buff_5x_vol = (num_rxns * 4) * 1.1
nuc_free_water_volume = (num_rxns * 8) * 1.1
rnase_inhibitor_vol = (num_rxns * 1) * 1.1
ez_rev_trans_vol = (num_rxns * 1) * 1.1
mm_volumes_dict = {
'A1': rp_primer_vol,
'A2': multi_buff_5x_vol,
'A3': nuc_free_water_volume,
'A4': rnase_inhibitor_vol,
'A5': ez_rev_trans_vol,
}
return mm_volumes_dict
# MAIN METHOD --------------------------------------------------------------------
def generate_step1_from_template(num_rxns, file_name):
""" generate_step1_from_template
Description: Handles num_rxns variable checking and pass to calculate_mm_volumes dict
Paramerers:
num_rxns: (int) number of rxns to perform (1-96).
file_name: (str) user specifiec output file name (ex. 'protocol_02.py')
"""
if num_rxns > 96 or num_rxns < 1:
print("number of reactions must be between 1 and 96")
exit
mm_volumes_dict = calculate_mm_volumes(num_rxns)
try:
print(write_protocol(num_rxns, mm_volumes_dict, file_name))
except OSError as e:
raise
return
def main(args):
# Parse args
parser = argparse.ArgumentParser()
parser.add_argument(
"-r",
"--num_rxns",
help="integer number of reactions to be completed in this step",
required=True,
type=int,
)
parser.add_argument(
"-f",
"--file_name",
help="output filename for the protocol. include .py extension. (ex. 'step1_protocol.py')",
required=True,
type=str,
)
args = vars(parser.parse_args())
# pass to method
generate_step1_from_template(
args["num_rxns"],
args["file_name"],
)
if __name__ == "__main__":
# execute only if run as a script
main(sys.argv)
# ------------------------------------------ contents of protocol --------------------------------------------------
### start
# metadata
metadata = {
'protocolName': 'Step1',
'author': 'Name <email@address.com>',
'description': 'step1 of stephanies protocol',
'apiLevel': '2.12'
}
### end
### VARIABLES
### start
def run(protocol: protocol_api.ProtocolContext):
# modules
thermo_mod = protocol.load_module('thermocycler module')
thermo_mod.open_lid() # opens lid and sets to 4C when it turns on
thermo_mod.set_block_temperature(4)
mag_mod = protocol.load_module("magnetic module gen2", '4')
mag_mod.calibrate() # calibrate and disengage mag deck when it turns on
mag_mod.disengage()
# labware
thermo_plate = thermo_mod.load_labware('nest_96_wellplate_100ul_pcr_full_skirt')
tiprack1 = protocol.load_labware('opentrons_96_tiprack_20ul', '9')
tiprack2 = protocol.load_labware('opentrons_96_tiprack_20ul', '6')
tiprack3 = protocol.load_labware('opentrons_96_tiprack_1000uL', '3')
tube_rack_1 = protocol.load_labware('opentrons_24_tuberack_eppendorf_1.5ml_safelock_snapcap', '2') # 1.5mL tubes for mm ingredients
tube_rack_2 = protocol.load_labware('opentrons_15_tuberack_nest_15ml_conical', '5') # 15 mL tubes to hold master mix
# pipettes
pipette_20uL_single = protocol.load_instrument('p20_single_gen2', 'right', tip_racks=[tiprack1, tiprack2])
pipette_1000uL_single = protocol.load_instrument('p1000_single_gen2', 'left', tip_racks=[tiprack3])
#* Protocol Commands
# step 1: make master mix
mm_total_volume = 0 # track volume in mm tube
for mm_ingredient_well in mm_volumes_dict:
volume = round(mm_volumes_dict[mm_ingredient_well],2)
mm_total_volume += volume # track volume in mm tube
pipette_1000uL_single.transfer(
volume,
tube_rack_1[mm_ingredient_well],
tube_rack_2['A1'],
new_tip='always',
mix_before=(3,volume))
# mix master mix
pipette_1000uL_single.pick_up_tip()
pipette_1000uL_single.mix(3, max(500,mm_total_volume*0.6),tube_rack_2['A1']) # mix with max of 500uL or 60% total mm volume in tube
pipette_1000uL_single.drop_tip()
# step 2: transfer 15uL mm to each rxn well on thermocycler
for i in range(num_rxns):
pipette_20uL_single.transfer(
15,
tube_rack_2['A1'],
thermo_plate.wells(i),
new_tip='always',
mix_after=(3,10))
# step 3: run thermocycler rounds
thermo_mod.close_lid()
thermo_mod.set_lid_temperature(105)
thermo_mod.set_block_temperature(25, hold_time_minutes=10, block_max_volume=20)
thermo_mod.set_block_temperature(42, hold_time_minutes=50, block_max_volume=20)
thermo_mod.set_block_temperature(85, hold_time_minutes=5, block_max_volume=20)
thermo_mod.set_block_temperature(4) # hold at 4C until user opens thermocycler manually
### end | 3,248 | 0 | 46 |
544feca6b337dade97f3293e72f1e900e9812832 | 6,173 | py | Python | ceilometer/tests/db.py | aristanetworks/ceilometer | 8776b137f82f71eef1241bcb1600de10c1f77394 | [
"Apache-2.0"
] | null | null | null | ceilometer/tests/db.py | aristanetworks/ceilometer | 8776b137f82f71eef1241bcb1600de10c1f77394 | [
"Apache-2.0"
] | null | null | null | ceilometer/tests/db.py | aristanetworks/ceilometer | 8776b137f82f71eef1241bcb1600de10c1f77394 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2013 eNovance
#
# Author: Doug Hellmann <doug.hellmann@dreamhost.com>
# Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for API tests."""
import fixtures
import os
import uuid
import warnings
import six
from six.moves.urllib import parse as urlparse
import testscenarios.testcase
from testtools import testcase
from ceilometer.openstack.common.fixture import config
import ceilometer.openstack.common.fixture.mockpatch as oslo_mock
from ceilometer import storage
from ceilometer.tests import base as test_base
def run_with(*drivers):
"""Used to mark tests that are only applicable for certain db driver.
Skips test if driver is not available.
"""
return decorator
@six.add_metaclass(test_base.SkipNotImplementedMeta)
| 32.319372 | 79 | 0.634861 | #
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2013 eNovance
#
# Author: Doug Hellmann <doug.hellmann@dreamhost.com>
# Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for API tests."""
import fixtures
import os
import uuid
import warnings
import six
from six.moves.urllib import parse as urlparse
import testscenarios.testcase
from testtools import testcase
from ceilometer.openstack.common.fixture import config
import ceilometer.openstack.common.fixture.mockpatch as oslo_mock
from ceilometer import storage
from ceilometer.tests import base as test_base
class MongoDbManager(fixtures.Fixture):
def __init__(self, url):
self._url = url
def setUp(self):
super(MongoDbManager, self).setUp()
with warnings.catch_warnings():
warnings.filterwarnings(
action='ignore',
message='.*you must provide a username and password.*')
try:
self.connection = storage.get_connection(
self.url, 'ceilometer.metering.storage')
self.alarm_connection = storage.get_connection(
self.url, 'ceilometer.alarm.storage')
except storage.StorageBadVersion as e:
raise testcase.TestSkipped(six.text_type(e))
@property
def url(self):
return '%(url)s_%(db)s' % {
'url': self._url,
'db': uuid.uuid4().hex
}
class HBaseManager(fixtures.Fixture):
def __init__(self, url):
self._url = url
def setUp(self):
super(HBaseManager, self).setUp()
self.connection = storage.get_connection(
self.url, 'ceilometer.metering.storage')
self.alarm_connection = storage.get_connection(
self.url, 'ceilometer.alarm.storage')
@property
def url(self):
return '%s?table_prefix=%s' % (
self._url,
uuid.uuid4().hex
)
class SQLiteManager(fixtures.Fixture):
def __init__(self, url):
self.url = url
def setUp(self):
super(SQLiteManager, self).setUp()
self.connection = storage.get_connection(
self.url, 'ceilometer.metering.storage')
self.alarm_connection = storage.get_connection(
self.url, 'ceilometer.alarm.storage')
class TestBase(testscenarios.testcase.WithScenarios, test_base.BaseTestCase):
DRIVER_MANAGERS = {
'mongodb': MongoDbManager,
'db2': MongoDbManager,
'sqlite': SQLiteManager,
'hbase': HBaseManager,
}
db_url = 'sqlite://' # NOTE(Alexei_987) Set default db url
def setUp(self):
super(TestBase, self).setUp()
engine = urlparse.urlparse(self.db_url).scheme
# NOTE(Alexei_987) Shortcut to skip expensive db setUp
test_method = self._get_test_method()
if (hasattr(test_method, '_run_with')
and engine not in test_method._run_with):
raise testcase.TestSkipped(
'Test is not applicable for %s' % engine)
self.db_manager = self._get_driver_manager(engine)(self.db_url)
self.useFixture(self.db_manager)
self.conn = self.db_manager.connection
self.conn.upgrade()
self.alarm_conn = self.db_manager.alarm_connection
self.alarm_conn.upgrade()
self.useFixture(oslo_mock.Patch('ceilometer.storage.get_connection',
side_effect=self._get_connection))
self.CONF = self.useFixture(config.Config()).conf
self.CONF([], project='ceilometer')
# Set a default location for the pipeline config file so the
# tests work even if ceilometer is not installed globally on
# the system.
self.CONF.import_opt('pipeline_cfg_file', 'ceilometer.pipeline')
self.CONF.set_override(
'pipeline_cfg_file',
self.path_get('etc/ceilometer/pipeline.yaml')
)
def tearDown(self):
self.alarm_conn.clear()
self.alarm_conn = None
self.conn.clear()
self.conn = None
super(TestBase, self).tearDown()
def _get_connection(self, url, namespace):
if namespace == "ceilometer.alarm.storage":
return self.alarm_conn
return self.conn
def _get_driver_manager(self, engine):
manager = self.DRIVER_MANAGERS.get(engine)
if not manager:
raise ValueError('No manager available for %s' % engine)
return manager
def run_with(*drivers):
"""Used to mark tests that are only applicable for certain db driver.
Skips test if driver is not available.
"""
def decorator(test):
if isinstance(test, type) and issubclass(test, TestBase):
# Decorate all test methods
for attr in dir(test):
value = getattr(test, attr)
if callable(value) and attr.startswith('test_'):
value.__func__._run_with = drivers
else:
test._run_with = drivers
return test
return decorator
@six.add_metaclass(test_base.SkipNotImplementedMeta)
class MixinTestsWithBackendScenarios(object):
scenarios = [
('sqlite', {'db_url': 'sqlite://'}),
('mongodb', {'db_url': os.environ.get('CEILOMETER_TEST_MONGODB_URL')}),
('hbase', {'db_url': os.environ.get('CEILOMETER_TEST_HBASE_URL',
'hbase://__test__')}),
('db2', {'db_url': (os.environ.get('CEILOMETER_TEST_DB2_URL') or
os.environ.get('CEILOMETER_TEST_MONGODB_URL'))})
]
| 3,516 | 1,094 | 194 |
44e883ffb62d069e4d49b74203000491b5ab0bb5 | 535 | py | Python | adjacency.py | rikusalminen/trimuncher | bdf534fdf382c750e0ec7a6031433de88014e656 | [
"Zlib"
] | 1 | 2018-11-06T05:11:08.000Z | 2018-11-06T05:11:08.000Z | adjacency.py | rikusalminen/trimuncher | bdf534fdf382c750e0ec7a6031433de88014e656 | [
"Zlib"
] | null | null | null | adjacency.py | rikusalminen/trimuncher | bdf534fdf382c750e0ec7a6031433de88014e656 | [
"Zlib"
] | null | null | null | from winged_edge import winged_edge_lookup
| 44.583333 | 92 | 0.68972 | from winged_edge import winged_edge_lookup
def build_adjacency_triangles(triangles, mapping, reverse_mapping, edges, position_idx = 0):
# TODO: This function shouldn't be hard to modify to handle triangle strips and fans
def adjacent_vertex(a, b):
ap, bp = [reverse_mapping[x][position_idx] for x in [a, b]]
nxt, adj = winged_edge_lookup(edges, (ap, bp))
return mapping[adj[2]]
for a, b, c in triangles:
yield (a, adjacent_vertex(a, b), b, adjacent_vertex(b, c), c, adjacent_vertex(c, a))
| 469 | 0 | 23 |
6050d946cca8c0645650b5ae57129fc0fbaca682 | 23,113 | py | Python | src/nli/modified_esim_v1_0.py | Derrors/Combine-FEVER-NSMN | e3458ee99f086e3d44c9da3ec3e2885511cd42c2 | [
"MIT"
] | 4 | 2020-10-09T16:46:56.000Z | 2022-01-03T18:42:24.000Z | src/nli/modified_esim_v1_0.py | Frankey419/combine-FEVER-NSMN | 8577ad47092c052d6c0456415cb2eebc2a392984 | [
"MIT"
] | 4 | 2020-11-02T01:00:33.000Z | 2020-11-02T01:07:45.000Z | src/nli/modified_esim_v1_0.py | Frankey419/combine-FEVER-NSMN | 8577ad47092c052d6c0456415cb2eebc2a392984 | [
"MIT"
] | 2 | 2020-12-10T12:50:45.000Z | 2021-03-06T11:26:53.000Z | import torch
from allennlp.data.iterators import BasicIterator
from allennlp.data.token_indexers import SingleIdTokenIndexer, ELMoTokenCharactersIndexer
from allennlp.modules import Embedding, Elmo
from torch import nn
import os
import config
from data_util.data_readers.fever_reader import BasicReader
from data_util.exvocab import load_vocab_embeddings
from log_util import save_tool
from flint import torch_util
import torch.optim as optim
import torch.nn.functional as F
from tqdm import tqdm
from neural_modules import biDafAttn
from sample_for_nli.tf_idf_sample_v1_0 import sample_v1_0, select_sent_for_eval, convert_evidence2scoring_format
from sentence_retrieval.nn_postprocess_ablation import score_converter_scaled
from utils import c_scorer, common
# This is ESIM sequence matching model
# lstm
if __name__ == "__main__":
# train_fever()
# hidden_eval_fever()
spectrum_eval_manual_check() | 37.76634 | 198 | 0.662008 | import torch
from allennlp.data.iterators import BasicIterator
from allennlp.data.token_indexers import SingleIdTokenIndexer, ELMoTokenCharactersIndexer
from allennlp.modules import Embedding, Elmo
from torch import nn
import os
import config
from data_util.data_readers.fever_reader import BasicReader
from data_util.exvocab import load_vocab_embeddings
from log_util import save_tool
from flint import torch_util
import torch.optim as optim
import torch.nn.functional as F
from tqdm import tqdm
from neural_modules import biDafAttn
from sample_for_nli.tf_idf_sample_v1_0 import sample_v1_0, select_sent_for_eval, convert_evidence2scoring_format
from sentence_retrieval.nn_postprocess_ablation import score_converter_scaled
from utils import c_scorer, common
class ESIM(nn.Module):
# This is ESIM sequence matching model
# lstm
def __init__(self, rnn_size_in=(1024 + 300, 1024 + 300), rnn_size_out=(300, 300), max_l=100,
mlp_d=300, num_of_class=3, drop_r=0.5, activation_type='relu'):
super(ESIM, self).__init__()
self.dropout_layer = nn.Dropout(drop_r)
self.lstm_1 = nn.LSTM(input_size=rnn_size_in[0], hidden_size=rnn_size_out[0],
num_layers=1, bidirectional=True, batch_first=True)
self.lstm_2 = nn.LSTM(input_size=rnn_size_in[1], hidden_size=rnn_size_out[1],
num_layers=1, bidirectional=True, batch_first=True)
self.projection = nn.Linear(rnn_size_out[0] * 2 * 4, rnn_size_out[0])
self.max_l = max_l
self.bidaf = biDafAttn(300)
self.mlp_1 = nn.Linear(rnn_size_out[1] * 2 * 4, mlp_d)
self.sm = nn.Linear(mlp_d, num_of_class)
if activation_type == 'relu':
activation = nn.ReLU()
elif activation_type == 'tanh':
activation = nn.Tanh()
else:
raise ValueError("Not a valid activation!")
self.classifier = nn.Sequential(*[nn.Dropout(drop_r), self.mlp_1, activation, nn.Dropout(drop_r), self.sm])
def count_params(self):
total_c = 0
for param in self.parameters():
if len(param.size()) == 2:
d1, d2 = param.size()[0], param.size()[1]
total_c += d1 * d2
print("Total count:", total_c)
def display(self):
for name, param in self.named_parameters():
print(name, param.data.size())
def forward(self, layer1_s1, layer2_s1, l1, layer1_s2, layer2_s2, l2): # [B, T]
p_s1 = self.dropout_layer(layer1_s1)
p_s2 = self.dropout_layer(layer1_s2)
s1_layer1_out = torch_util.auto_rnn(self.lstm_1, p_s1, l1)
s2_layer1_out = torch_util.auto_rnn(self.lstm_1, p_s2, l2)
S = self.bidaf.similarity(s1_layer1_out, l1, s2_layer1_out, l2)
s1_att, s2_att = self.bidaf.get_both_tile(S, s1_layer1_out, s2_layer1_out)
s1_coattentioned = torch.cat([s1_layer1_out, s1_att, s1_layer1_out - s1_att,
s1_layer1_out * s1_att], dim=2)
s2_coattentioned = torch.cat([s2_layer1_out, s2_att, s2_layer1_out - s2_att,
s2_layer1_out * s2_att], dim=2)
p_s1_coattentioned = F.relu(self.projection(s1_coattentioned))
p_s2_coattentioned = F.relu(self.projection(s2_coattentioned))
s1_coatt_features = torch.cat([p_s1_coattentioned, layer2_s1], dim=2)
s2_coatt_features = torch.cat([p_s2_coattentioned, layer2_s2], dim=2)
s1_coatt_features = self.dropout_layer(s1_coatt_features)
s2_coatt_features = self.dropout_layer(s2_coatt_features)
s1_layer2_out = torch_util.auto_rnn(self.lstm_2, s1_coatt_features, l1)
s2_layer2_out = torch_util.auto_rnn(self.lstm_2, s2_coatt_features, l2)
s1_lay2_maxout = torch_util.max_along_time(s1_layer2_out, l1)
s2_lay2_maxout = torch_util.max_along_time(s2_layer2_out, l2)
features = torch.cat([s1_lay2_maxout, s2_lay2_maxout,
torch.abs(s1_lay2_maxout - s2_lay2_maxout),
s1_lay2_maxout * s2_lay2_maxout], dim=1)
return self.classifier(features)
class Model(nn.Module):
def __init__(self, weight, vocab_size, embedding_dim,
rnn_size_in=(1024 + 300, 1024 + 300),
rnn_size_out=(300, 300), max_l=150,
mlp_d=300, num_of_class=3, drop_r=0.5, activation_type='relu'):
super(Model, self).__init__()
self.glove_embd_layer = Embedding(vocab_size, embedding_dim,
weight=weight, padding_index=0)
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5"
num_of_elmo = 1
self.max_l = max_l
self.elmo_embd_layer = Elmo(options_file, weight_file, num_of_elmo, dropout=0)
self.esim_layer = ESIM(rnn_size_in, rnn_size_out, max_l, mlp_d, num_of_class, drop_r, activation_type)
def display(self, exclude=None):
total_p_size = 0
if exclude is None:
exclude = {'glove'}
for name, param in self.named_parameters():
if param.requires_grad:
print(name, param.data.size())
exclude_this = False
for exclude_name in exclude:
if exclude_name in str(name):
exclude_this = True
if exclude_this:
continue
nn = 1
for s in list(param.size()):
nn = nn * s
total_p_size += nn
print('Total Size:', total_p_size)
def raw_input_to_esim_input(self, s_tokens, s_elmo_chars):
s_tokens = torch_util.length_truncate(s_tokens, self.max_l)
s1_glove_embd = self.glove_embd_layer(s_tokens)
s1_elmo_out = self.elmo_embd_layer(s_elmo_chars)
s1_elmo_embd = torch_util.length_truncate(s1_elmo_out, self.max_l, is_elmo=True)
s1_mask, s1_len = torch_util.get_length_and_mask(s_tokens)
assert torch.equal(s1_elmo_embd['mask'], s1_mask)
return s1_glove_embd, s1_elmo_embd['elmo_representations'][0], s1_len
def forward(self, batch):
s1_tokens = batch['premise']['tokens']
s1_elmo_chars = batch['premise']['elmo_chars']
s2_tokens = batch['hypothesis']['tokens']
s2_elmo_chars = batch['hypothesis']['elmo_chars']
s1_glove_embd, s1_elmo_embd, s1_len = self.raw_input_to_esim_input(s1_tokens, s1_elmo_chars)
s2_glove_embd, s2_elmo_embd, s2_len = self.raw_input_to_esim_input(s2_tokens, s2_elmo_chars)
s1_layer1_in = torch.cat((s1_glove_embd, s1_elmo_embd), dim=2)
s1_layer2_in = s1_elmo_embd
s2_layer1_in = torch.cat((s2_glove_embd, s2_elmo_embd), dim=2)
s2_layer2_in = s2_elmo_embd
# print(s1_layer1_in.size())
# print(s1_layer2_in.size())
# print(s2_layer1_in.size())
# print(s2_layer2_in.size())
esim_out = self.esim_layer(s1_layer1_in, s1_layer2_in, s1_len,
s2_layer1_in, s2_layer2_in, s2_len)
return esim_out
def eval_model(model, data_iter, criterion):
print("Evaluating ...")
model.eval()
n_correct = loss = 0
totoal_size = 0
y_pred_list = []
y_true_list = []
for batch_idx, batch in enumerate(data_iter):
out = model(batch)
y = batch['label']
n_correct += (torch.max(out, 1)[1].view(y.size()) == y).sum().item()
y_pred_list.extend(torch.max(out, 1)[1].view(y.size()).tolist())
y_true_list.extend(y.tolist())
loss += criterion(out, y).item() * y.size(0)
totoal_size += y.size(0)
print('n_correct:', n_correct)
print('total_size:', totoal_size)
avg_acc = 100. * n_correct / totoal_size
avg_loss = loss / totoal_size
return avg_acc, avg_loss
def full_eval_model(model, data_iter, criterion, dev_data_list):
# SUPPORTS < (-.-) > 0
# REFUTES < (-.-) > 1
# NOT ENOUGH INFO < (-.-) > 2
with torch.no_grad():
id2label = {
0: "SUPPORTS",
1: "REFUTES",
2: "NOT ENOUGH INFO"
}
print("Evaluating ...")
model.eval()
n_correct = loss = 0
totoal_size = 0
y_pred_list = []
y_true_list = []
y_id_list = []
for batch_idx, batch in enumerate(data_iter):
out = model(batch)
y = batch['label']
y_id_list.extend(list(batch['pid']))
n_correct += (torch.max(out, 1)[1].view(y.size()) == y).sum().item()
y_pred_list.extend(torch.max(out, 1)[1].view(y.size()).tolist())
y_true_list.extend(y.tolist())
loss += criterion(out, y).item() * y.size(0)
totoal_size += y.size(0)
assert len(y_id_list) == len(dev_data_list)
assert len(y_pred_list) == len(dev_data_list)
assert len(y_true_list) == len(dev_data_list)
for i in range(len(dev_data_list)):
assert str(y_id_list[i]) == str(dev_data_list[i]['id'])
# Matching id
dev_data_list[i]['predicted_label'] = id2label[y_pred_list[i]]
# Reset neural set
if len(dev_data_list[i]['predicted_sentids']) == 0:
dev_data_list[i]['predicted_label'] = "NOT ENOUGH INFO"
# This has been done
# dev_data_list[i]['predicted_evidence'] = convert_evidence2scoring_format(dev_data_list[i]['predicted_sentids'])
print('n_correct:', n_correct)
print('total_size:', totoal_size)
eval_mode = {'check_sent_id_correct': True, 'standard': True}
strict_score, acc_score, pr, rec, f1 = c_scorer.fever_score(dev_data_list, dev_data_list, mode=eval_mode, verbose=False)
print("Fever Score(Strict/Acc./Precision/Recall/F1):", strict_score, acc_score, pr, rec, f1)
avg_acc = 100. * n_correct / totoal_size
avg_loss = loss / totoal_size
return strict_score, avg_loss
def hidden_eval(model, data_iter, dev_data_list):
# SUPPORTS < (-.-) > 0
# REFUTES < (-.-) > 1
# NOT ENOUGH INFO < (-.-) > 2
id2label = {
0: "SUPPORTS",
1: "REFUTES",
2: "NOT ENOUGH INFO"
}
print("Evaluating ...")
with torch.no_grad():
model.eval()
totoal_size = 0
y_pred_list = []
y_id_list = []
for batch_idx, batch in enumerate(data_iter):
out = model(batch)
y_id_list.extend(list(batch['pid']))
y_pred_list.extend(torch.max(out, 1)[1].view(out.size(0)).tolist())
totoal_size += out.size(0)
assert len(y_id_list) == len(dev_data_list)
assert len(y_pred_list) == len(dev_data_list)
for i in range(len(dev_data_list)):
assert str(y_id_list[i]) == str(dev_data_list[i]['id'])
# Matching id
dev_data_list[i]['predicted_label'] = id2label[y_pred_list[i]]
# Reset neural set
if len(dev_data_list[i]['predicted_sentids']) == 0:
dev_data_list[i]['predicted_label'] = "NOT ENOUGH INFO"
print('total_size:', totoal_size)
return dev_data_list
def eval_fever():
# save_path = "/home/easonnie/projects/MiscEnc/saved_models/06-07-21:58:06_esim_elmo/i(60900)_epoch(4)_um_dev(80.03458096013019)_m_dev(79.174732552216)_seed(12)"
save_path = "/home/easonnie/projects/MiscEnc/saved_models/07-02-14:40:01_esim_elmo_linear_amr_cs_score_filtering_0.5/i(5900)_epoch(3)_um_dev(39.73759153783564)_m_dev(40.18339276617422)_seed(12)"
# save_path = "/home/easonnie/projects/MiscEnc/saved_models/07-02-14:42:34_esim_elmo_cs_score_filtering_0.7/i(1300)_epoch(4)_um_dev(32.55695687550855)_m_dev(32.42995415180846)_seed(12)"
batch_size = 32
# Prepare Data
token_indexers = {
'tokens': SingleIdTokenIndexer(namespace='tokens'), # This is the raw tokens
'elmo_chars': ELMoTokenCharactersIndexer(namespace='elmo_characters') # This is the elmo_characters
}
csnli_dataset_reader = CNLIReader(token_indexers=token_indexers,
example_filter=lambda x: float(x['cs_score']) >= 0.7)
# mnli_train_data_path = config.DATA_ROOT / "mnli/multinli_1.0_train.jsonl"
mnli_m_dev_data_path = config.DATA_ROOT / "amrs/mnli_amr_ln/mnli_mdev.jsonl.cs"
mnli_um_dev_data_path = config.DATA_ROOT / "amrs/mnli_amr_ln/mnli_umdev.jsonl.cs"
# mnli_train_instances = csnli_dataset_reader.read(mnli_train_data_path)
mnli_m_dev_instances = csnli_dataset_reader.read(mnli_m_dev_data_path)
mnli_um_dev_instances = csnli_dataset_reader.read(mnli_um_dev_data_path)
# Load Vocabulary
biterator = BasicIterator(batch_size=batch_size)
vocab, weight_dict = load_vocab_embeddings(config.DATA_ROOT / "vocab_cache" / "nli")
vocab.change_token_with_index_to_namespace('hidden', -2, namespace='labels')
print(vocab.get_token_to_index_vocabulary('labels'))
print(vocab.get_vocab_size('tokens'))
biterator.index_with(vocab)
# Build Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", index=0)
device_num = -1 if device.type == 'cpu' else 0
model = Model(weight=weight_dict['glove.840B.300d'],
vocab_size=vocab.get_vocab_size('tokens'),
embedding_dim=300)
model.load_state_dict(torch.load(save_path))
model.display()
model.to(device)
# Create Log File
criterion = nn.CrossEntropyLoss()
eval_iter = biterator(mnli_m_dev_instances, shuffle=False, num_epochs=1, cuda_device=device_num)
m_dev_score, m_dev_loss = eval_model(model, eval_iter, criterion)
eval_iter = biterator(mnli_um_dev_instances, shuffle=False, num_epochs=1, cuda_device=device_num)
um_dev_score, um_dev_loss = eval_model(model, eval_iter, criterion)
print(f"Dev(M):{m_dev_score}/{m_dev_loss}")
print(f"Dev(UM):{um_dev_score}/{um_dev_loss}")
def get_sampled_data(tokenized_data_file, additional_data_file):
# This is for sampling training data.
sampled_d_list = sample_v1_0(tokenized_data_file, additional_data_file, tokenized=True)
return sampled_d_list
def get_actual_data(tokenized_data_file, additional_data_file):
# This is for get actual data.
actual_d_list = select_sent_for_eval(tokenized_data_file, additional_data_file, tokenized=True)
return actual_d_list
def train_fever():
num_epoch = 8
seed = 12
batch_size = 32
experiment_name = "mesim_elmo"
lazy = True
dev_upstream_file = config.RESULT_PATH / "sent_retri/2018_07_05_17:17:50_r/dev.jsonl"
train_upstream_file = config.RESULT_PATH / "sent_retri/2018_07_05_17:17:50_r/train.jsonl"
# Prepare Data
token_indexers = {
'tokens': SingleIdTokenIndexer(namespace='tokens'), # This is the raw tokens
'elmo_chars': ELMoTokenCharactersIndexer(namespace='elmo_characters') # This is the elmo_characters
}
train_fever_data_reader = BasicReader(token_indexers=token_indexers, lazy=lazy, max_l=360)
dev_fever_data_reader = BasicReader(token_indexers=token_indexers, lazy=lazy, max_l=360)
complete_upstream_dev_data = get_actual_data(config.T_FEVER_DEV_JSONL, dev_upstream_file)
dev_instances = dev_fever_data_reader.read(complete_upstream_dev_data)
# Load Vocabulary
biterator = BasicIterator(batch_size=batch_size)
vocab, weight_dict = load_vocab_embeddings(config.DATA_ROOT / "vocab_cache" / "nli_basic")
vocab.change_token_with_index_to_namespace('hidden', -2, namespace='labels')
print(vocab.get_token_to_index_vocabulary('labels'))
print(vocab.get_vocab_size('tokens'))
biterator.index_with(vocab)
# Build Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", index=0)
device_num = -1 if device.type == 'cpu' else 0
model = Model(weight=weight_dict['glove.840B.300d'],
vocab_size=vocab.get_vocab_size('tokens'),
embedding_dim=300, max_l=300)
model.display()
model.to(device)
# Create Log File
file_path_prefix, date = save_tool.gen_file_prefix(f"{experiment_name}")
best_dev = -1
iteration = 0
start_lr = 0.0002
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=start_lr)
criterion = nn.CrossEntropyLoss()
for i_epoch in range(num_epoch):
print("Resampling...")
# Resampling
complete_upstream_train_data = get_sampled_data(config.T_FEVER_TRAIN_JSONL, train_upstream_file)
sampled_train_instances = train_fever_data_reader.read(complete_upstream_train_data)
train_iter = biterator(sampled_train_instances, shuffle=True, num_epochs=1, cuda_device=device_num)
for i, batch in tqdm(enumerate(train_iter)):
model.train()
out = model(batch)
y = batch['label']
loss = criterion(out, y)
# No decay
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
if i_epoch <= 4:
mod = 5000
else:
mod = 200
if iteration % mod == 0:
eval_iter = biterator(dev_instances, shuffle=False, num_epochs=1, cuda_device=device_num)
dev_score, dev_loss = full_eval_model(model, eval_iter, criterion, complete_upstream_dev_data)
print(f"Dev:{dev_score}/{dev_loss}")
need_save = False
if dev_score > best_dev:
best_dev = dev_score
need_save = True
if need_save:
save_path = os.path.join(
file_path_prefix,
f'i({iteration})_epoch({i_epoch})_dev({dev_score})_loss({dev_loss})_seed({seed})'
)
torch.save(model.state_dict(), save_path)
def hidden_eval_fever():
batch_size = 64
lazy = True
SAVE_PATH = "/home/easonnie/projects/FunEver/saved_models/07-08-19:04:33_mesim_elmo/i(39700)_epoch(6)_dev(0.5251525152515252)_loss(1.5931938096682707)_seed(12)"
dev_upstream_file = config.RESULT_PATH / "sent_retri/2018_07_05_17:17:50_r/dev.jsonl"
# Prepare Data
token_indexers = {
'tokens': SingleIdTokenIndexer(namespace='tokens'), # This is the raw tokens
'elmo_chars': ELMoTokenCharactersIndexer(namespace='elmo_characters') # This is the elmo_characters
}
dev_fever_data_reader = BasicReader(token_indexers=token_indexers, lazy=lazy)
complete_upstream_dev_data = get_actual_data(config.T_FEVER_DEV_JSONL, dev_upstream_file)
dev_instances = dev_fever_data_reader.read(complete_upstream_dev_data)
# Load Vocabulary
biterator = BasicIterator(batch_size=batch_size)
vocab, weight_dict = load_vocab_embeddings(config.DATA_ROOT / "vocab_cache" / "nli_basic")
vocab.change_token_with_index_to_namespace('hidden', -2, namespace='labels')
print(vocab.get_token_to_index_vocabulary('labels'))
print(vocab.get_vocab_size('tokens'))
biterator.index_with(vocab)
# Build Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", index=0)
device_num = -1 if device.type == 'cpu' else 0
model = Model(weight=weight_dict['glove.840B.300d'],
vocab_size=vocab.get_vocab_size('tokens'),
embedding_dim=300, max_l=300)
model.load_state_dict(torch.load(SAVE_PATH))
model.display()
model.to(device)
eval_iter = biterator(dev_instances, shuffle=False, num_epochs=1, cuda_device=device_num)
builded_dev_data = hidden_eval(model, eval_iter, complete_upstream_dev_data)
eval_mode = {'check_sent_id_correct': True, 'standard': True}
print(c_scorer.fever_score(builded_dev_data, config.T_FEVER_DEV_JSONL, mode=eval_mode))
# print(f"Dev:{dev_score}/{dev_loss}")
def spectrum_eval_manual_check():
batch_size = 64
lazy = True
SAVE_PATH = "/home/easonnie/projects/FunEver/saved_models/07-17-12:10:35_mesim_elmo/i(34800)_epoch(5)_dev(0.5563056305630563)_loss(1.6648460462434564)_seed(12)"
# IN_FILE = config.RESULT_PATH / "sent_retri_nn/2018_07_17_15:52:19_r/dev_sent.jsonl"
IN_FILE = config.RESULT_PATH / "sent_retri_nn/2018_07_17_16:34:19_r/dev_sent.jsonl"
# IN_FILE = config.RESULT_PATH / "sent_retri_nn/2018_07_17_16-34-19_r/dev_sent.jsonl"
dev_sent_result_lsit = common.load_jsonl(IN_FILE)
# Prepare Data
token_indexers = {
'tokens': SingleIdTokenIndexer(namespace='tokens'), # This is the raw tokens
'elmo_chars': ELMoTokenCharactersIndexer(namespace='elmo_characters') # This is the elmo_characters
}
# Load Vocabulary
biterator = BasicIterator(batch_size=batch_size)
vocab, weight_dict = load_vocab_embeddings(config.DATA_ROOT / "vocab_cache" / "nli_basic")
vocab.change_token_with_index_to_namespace('hidden', -2, namespace='labels')
print(vocab.get_token_to_index_vocabulary('labels'))
print(vocab.get_vocab_size('tokens'))
biterator.index_with(vocab)
# Build Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", index=0)
device_num = -1 if device.type == 'cpu' else 0
model = Model(weight=weight_dict['glove.840B.300d'],
vocab_size=vocab.get_vocab_size('tokens'),
embedding_dim=300, max_l=300)
model.load_state_dict(torch.load(SAVE_PATH))
model.display()
model.to(device)
for sc_prob in [0.5, 0.7, 0.8, 0.9, 0.95, 0.98]:
upstream_dev_list = score_converter_scaled(config.T_FEVER_DEV_JSONL, dev_sent_result_lsit, scale_prob=sc_prob,
delete_prob=False)
dev_fever_data_reader = BasicReader(token_indexers=token_indexers, lazy=lazy)
complete_upstream_dev_data = get_actual_data(config.T_FEVER_DEV_JSONL, upstream_dev_list)
dev_instances = dev_fever_data_reader.read(complete_upstream_dev_data)
eval_iter = biterator(dev_instances, shuffle=False, num_epochs=1, cuda_device=device_num)
builded_dev_data = hidden_eval(model, eval_iter, complete_upstream_dev_data)
print("------------------------------------")
print("Scaling_prob:", sc_prob)
eval_mode = {'check_sent_id_correct': True, 'standard': True}
print(c_scorer.fever_score(builded_dev_data, config.T_FEVER_DEV_JSONL, mode=eval_mode))
# del upstream_dev_list
# del complete_upstream_dev_data
del dev_fever_data_reader
del dev_instances
print("------------------------------------")
if __name__ == "__main__":
# train_fever()
# hidden_eval_fever()
spectrum_eval_manual_check() | 21,706 | 3 | 467 |
3ccf1d20e9550ceb4e18ff07cf68d4676f4502b9 | 537 | py | Python | educative/course1/arrays/ch3_add_upto_n_1.py | liveroot/ambition2020 | f312aa684caab5da82504c2bcd98bef64d102caf | [
"MIT"
] | null | null | null | educative/course1/arrays/ch3_add_upto_n_1.py | liveroot/ambition2020 | f312aa684caab5da82504c2bcd98bef64d102caf | [
"MIT"
] | null | null | null | educative/course1/arrays/ch3_add_upto_n_1.py | liveroot/ambition2020 | f312aa684caab5da82504c2bcd98bef64d102caf | [
"MIT"
] | 1 | 2019-12-27T01:08:08.000Z | 2019-12-27T01:08:08.000Z | array = [1, 21, 3, 14, 5, 60, 7, 6]
n = 27
result = "[21, 6] or [6, 21]"
# iterate both arrays in a nested loop, check for sum == n, return when pair is found
print("Input: " + "array = " + str(array) + ", " + "n = " + str(n))
print("Expected: " + str(result))
print("Output: " + str(add_upto_n_1(array, n)))
| 22.375 | 85 | 0.510242 | array = [1, 21, 3, 14, 5, 60, 7, 6]
n = 27
result = "[21, 6] or [6, 21]"
# iterate both arrays in a nested loop, check for sum == n, return when pair is found
def add_upto_n_1(arr, n):
output = []
for x in arr:
for y in arr:
if x + y == n:
output.append(x)
output.append(y)
return output
return None
print("Input: " + "array = " + str(array) + ", " + "n = " + str(n))
print("Expected: " + str(result))
print("Output: " + str(add_upto_n_1(array, n)))
| 201 | 0 | 23 |
90ef0d2c16b49b287d1c5badd6ad2ca0f4bad954 | 7,959 | py | Python | nlp_toolkit/tools/farasa.py | abdelrahman-t/nlp-toolkit | 2617c5c91d369279fe73ea213709a4c29ffc6605 | [
"MIT"
] | 1 | 2019-05-22T12:12:28.000Z | 2019-05-22T12:12:28.000Z | nlp_toolkit/tools/farasa.py | abdelrahman-t/nlp-toolkit | 2617c5c91d369279fe73ea213709a4c29ffc6605 | [
"MIT"
] | null | null | null | nlp_toolkit/tools/farasa.py | abdelrahman-t/nlp-toolkit | 2617c5c91d369279fe73ea213709a4c29ffc6605 | [
"MIT"
] | 1 | 2020-04-04T14:30:29.000Z | 2020-04-04T14:30:29.000Z | """
Pythonic and thread-safe wrapper around Farasa.
Farasa is developed at QCRI and can be found at http://qatsdemo.cloudapp.net/farasa/
Paper can be found at http://www.aclweb.org/anthology/N16-3003
"""
import logging
from collections import defaultdict
from operator import concat, itemgetter
from threading import RLock
from typing import Dict, List, Optional, Tuple
from functional import seq
from py4j.java_gateway import GatewayParameters, JavaGateway, launch_gateway
import nlp_toolkit.dependencies as dependencies
from .utils import break_input_into_chuncks, setup_logger
LOGGER = setup_logger('farasa', logging.INFO)
FARASA_JARS = [
dependencies.get_language_model_path('ner'),
dependencies.get_language_model_path('pos'),
dependencies.get_language_model_path('diacritizer')
]
CLASS_PATH = ':'.join(FARASA_JARS)
class Farasa:
"""
Pythonic wrapper around Farasa.
Supports Farasa Segmenter, POS and NER taggers.
"""
SEGMENT_TYPES = ['S', 'E',
'V', 'NOUN', 'ADJ', 'NUM',
'CONJ', 'PART', 'NSUFF', 'CASE', 'FOREIGN',
'DET', 'PREP', 'ABBREV', 'PUNC']
NER_TOKEN_TYPES = ['B-LOC', 'B-ORG', 'B-PERS',
'I-LOC', 'I-ORG', 'I-PERS']
__instance: Optional['Farasa'] = None
__global_lock: RLock = RLock()
def __new__(cls, singelton: bool) -> 'Farasa':
"""
Create a Farasa instance.
:param singelton: whether to create a single shared instance of Farasa.
"""
if singelton:
with cls.__global_lock:
return cls.__instance or super(Farasa, cls).__new__(cls) # type: ignore
return super(Farasa, cls).__new__(cls) # type: ignore
def __init__(self, singelton: bool = True) -> None:
"""
Initialize Farasa.
:param singelton: whether to create a single shared instance of Farasa.
"""
if not self.__class__.__instance or not singelton:
self.gateway = self.__launch_java_gateway()
base = self.gateway.jvm.com.qcri.farasa
self.segmenter = base.segmenter.Farasa()
self.pos_tagger = base.pos.FarasaPOSTagger(self.segmenter)
self.ner = base.ner.ArabicNER(self.segmenter, self.pos_tagger)
self.diacritizer = base.diacritize.DiacritizeText(self.segmenter, self.pos_tagger)
if singelton:
self.__class__.__instance = self
self.__lock = self.__global_lock
else:
self.__lock = RLock()
self.is_singelton = singelton
@break_input_into_chuncks(concat=concat)
def tag_pos(self, text: str) -> List[Tuple[str, str]]:
"""
Tag part of speech.
:param text: text to process.
:returns: List of (token, token_type) pairs.
"""
text = text.replace(';', ' ') # to handle a bug in FARASA.
result = []
segments = self.segment(text)
for segment in self.pos_tagger.tagLine(segments).clitics:
result.append(
(segment.surface, segment.guessPOS)
)
return result
def merge_iffix(self, tags):
"""Merge iffix."""
length = len(tags)
for i in range(length):
word, pos = tags[i]
if word.startswith('+'):
tags[i-1] = (tags[i-1][0] + word.replace('+', ''),
tags[i-1][1])
elif word.endswith('+'):
tags[i+1] = (word.replace('+', '') + tags[i+1][0],
tags[i+1][1])
return tags
@break_input_into_chuncks(concat=lambda x, y: x + ' ' + y)
def filter_pos(self, text: str, parts_of_speech_to_keep: List[str]) -> str:
"""
Break text into chuncks and then calls _filter_pos.
:param text: text to process.
:param parts_of_speech_to_keep: list of parts of speech to keep
SEGMENT_TYPES = ['S', 'E',
'V', 'NOUN', 'PRON', 'ADJ', 'NUM',
'CONJ', 'PART', 'NSUFF', 'CASE', 'FOREIGN',
'DET', 'PREP', 'ABBREV', 'PUNC'].
:returns: filtered text.
"""
if 'VERB' in parts_of_speech_to_keep:
parts_of_speech_to_keep = parts_of_speech_to_keep + ['V']
pos = self.merge_iffix(self.tag_pos(text))
return ' '.join(seq(pos)
.filter(lambda x: x[1] in parts_of_speech_to_keep and '+' not in x[0])
.map(itemgetter(0))
)
@break_input_into_chuncks(concat=concat)
def lemmetize(self, text: str) -> str:
"""
Lemmetize text.
:param text: text to process.
"""
text = text.replace(';', ' ') # to handle a bug in FARASA.
return ' '.join(self.segmenter.lemmatizeLine(text))
@break_input_into_chuncks(concat=concat)
def segment(self, text: str) -> List[str]:
"""
Segment piece of text.
:param text: text to process.
:returns: Unaltered Farasa segmenter output.
"""
text = text.replace(';', ' ') # to handle a bug in FARASA.
return self.segmenter.segmentLine(text)
@break_input_into_chuncks(concat=concat)
def _get_named_entities(self, text: str, lemmatize: bool) -> List[Tuple[str, str]]:
"""
Get named entities.
:param text: text to process.
:param lemmatize: whether to lemmatize results.
:returns: List of (token, token_type) pairs.
"""
text = text.replace(';', ' ') # to handle a bug in FARASA.
tokens = (seq(self.ner.tagLine(text))
.map(lambda token: token.split('/'))
.filter(lambda token: token[1] in self.NER_TOKEN_TYPES)
)
result: Dict[Tuple[int, str], List[str]] = defaultdict(list)
entities: List[Tuple[str, str]] = []
index = -1
# Farasa returns named entities in IOB Style (Inside, Outside and Begninning).
# Related Entities are grouped together.
for token, info in tokens:
position, token_type = info.split('-')
if position == 'B':
index += 1
result[(index, token_type)].append(token)
# Return NE as a name and type pairs, i.e. ('Egypt', 'LOC').
for key in sorted(result.keys(), key=lambda value: value[0]):
entity = ' '.join(result[key])
if lemmatize:
entity = self.lemmetize(entity)
entities.append(
(entity, key[1])
)
return seq(entities).to_list()
def get_named_entities(self, text: str, lemmatize: bool = False) -> List[Tuple[str, str]]:
"""
Wrap _get_named_entities.
:param text: text to process.
:param lemmatize: whether to lemmatize results.
:returns: List of (token, token_type) pairs.
"""
return seq(self._get_named_entities(text, lemmatize=lemmatize)).to_list()
@break_input_into_chuncks(concat=lambda x, y: x + ' ' + y)
def diacritize(self, text: str, keep_original_diacritics: bool = False) -> str:
"""
Diacritize.
:param text: text to process.
:param keep_original_diacritics: whether to keep original diacritics.
"""
raise NotImplementedError('This feature is currently disabled')
return self.diacritizer.diacritize(text, keep_original_diacritics)
@classmethod
def __launch_java_gateway(cls) -> JavaGateway:
"""Launch java gateway."""
LOGGER.info('Initializing Farasa..')
port = launch_gateway(classpath=CLASS_PATH, die_on_exit=True)
params = GatewayParameters(
port=port, auto_convert=True, auto_field=True, eager_load=True
)
return JavaGateway(gateway_parameters=params)
| 31.963855 | 94 | 0.582108 | """
Pythonic and thread-safe wrapper around Farasa.
Farasa is developed at QCRI and can be found at http://qatsdemo.cloudapp.net/farasa/
Paper can be found at http://www.aclweb.org/anthology/N16-3003
"""
import logging
from collections import defaultdict
from operator import concat, itemgetter
from threading import RLock
from typing import Dict, List, Optional, Tuple
from functional import seq
from py4j.java_gateway import GatewayParameters, JavaGateway, launch_gateway
import nlp_toolkit.dependencies as dependencies
from .utils import break_input_into_chuncks, setup_logger
LOGGER = setup_logger('farasa', logging.INFO)
FARASA_JARS = [
dependencies.get_language_model_path('ner'),
dependencies.get_language_model_path('pos'),
dependencies.get_language_model_path('diacritizer')
]
CLASS_PATH = ':'.join(FARASA_JARS)
class Farasa:
"""
Pythonic wrapper around Farasa.
Supports Farasa Segmenter, POS and NER taggers.
"""
SEGMENT_TYPES = ['S', 'E',
'V', 'NOUN', 'ADJ', 'NUM',
'CONJ', 'PART', 'NSUFF', 'CASE', 'FOREIGN',
'DET', 'PREP', 'ABBREV', 'PUNC']
NER_TOKEN_TYPES = ['B-LOC', 'B-ORG', 'B-PERS',
'I-LOC', 'I-ORG', 'I-PERS']
__instance: Optional['Farasa'] = None
__global_lock: RLock = RLock()
def __new__(cls, singelton: bool) -> 'Farasa':
"""
Create a Farasa instance.
:param singelton: whether to create a single shared instance of Farasa.
"""
if singelton:
with cls.__global_lock:
return cls.__instance or super(Farasa, cls).__new__(cls) # type: ignore
return super(Farasa, cls).__new__(cls) # type: ignore
def __init__(self, singelton: bool = True) -> None:
"""
Initialize Farasa.
:param singelton: whether to create a single shared instance of Farasa.
"""
if not self.__class__.__instance or not singelton:
self.gateway = self.__launch_java_gateway()
base = self.gateway.jvm.com.qcri.farasa
self.segmenter = base.segmenter.Farasa()
self.pos_tagger = base.pos.FarasaPOSTagger(self.segmenter)
self.ner = base.ner.ArabicNER(self.segmenter, self.pos_tagger)
self.diacritizer = base.diacritize.DiacritizeText(self.segmenter, self.pos_tagger)
if singelton:
self.__class__.__instance = self
self.__lock = self.__global_lock
else:
self.__lock = RLock()
self.is_singelton = singelton
@break_input_into_chuncks(concat=concat)
def tag_pos(self, text: str) -> List[Tuple[str, str]]:
"""
Tag part of speech.
:param text: text to process.
:returns: List of (token, token_type) pairs.
"""
text = text.replace(';', ' ') # to handle a bug in FARASA.
result = []
segments = self.segment(text)
for segment in self.pos_tagger.tagLine(segments).clitics:
result.append(
(segment.surface, segment.guessPOS)
)
return result
def merge_iffix(self, tags):
"""Merge iffix."""
length = len(tags)
for i in range(length):
word, pos = tags[i]
if word.startswith('+'):
tags[i-1] = (tags[i-1][0] + word.replace('+', ''),
tags[i-1][1])
elif word.endswith('+'):
tags[i+1] = (word.replace('+', '') + tags[i+1][0],
tags[i+1][1])
return tags
@break_input_into_chuncks(concat=lambda x, y: x + ' ' + y)
def filter_pos(self, text: str, parts_of_speech_to_keep: List[str]) -> str:
"""
Break text into chuncks and then calls _filter_pos.
:param text: text to process.
:param parts_of_speech_to_keep: list of parts of speech to keep
SEGMENT_TYPES = ['S', 'E',
'V', 'NOUN', 'PRON', 'ADJ', 'NUM',
'CONJ', 'PART', 'NSUFF', 'CASE', 'FOREIGN',
'DET', 'PREP', 'ABBREV', 'PUNC'].
:returns: filtered text.
"""
if 'VERB' in parts_of_speech_to_keep:
parts_of_speech_to_keep = parts_of_speech_to_keep + ['V']
pos = self.merge_iffix(self.tag_pos(text))
return ' '.join(seq(pos)
.filter(lambda x: x[1] in parts_of_speech_to_keep and '+' not in x[0])
.map(itemgetter(0))
)
@break_input_into_chuncks(concat=concat)
def lemmetize(self, text: str) -> str:
"""
Lemmetize text.
:param text: text to process.
"""
text = text.replace(';', ' ') # to handle a bug in FARASA.
return ' '.join(self.segmenter.lemmatizeLine(text))
@break_input_into_chuncks(concat=concat)
def segment(self, text: str) -> List[str]:
"""
Segment piece of text.
:param text: text to process.
:returns: Unaltered Farasa segmenter output.
"""
text = text.replace(';', ' ') # to handle a bug in FARASA.
return self.segmenter.segmentLine(text)
@break_input_into_chuncks(concat=concat)
def _get_named_entities(self, text: str, lemmatize: bool) -> List[Tuple[str, str]]:
"""
Get named entities.
:param text: text to process.
:param lemmatize: whether to lemmatize results.
:returns: List of (token, token_type) pairs.
"""
text = text.replace(';', ' ') # to handle a bug in FARASA.
tokens = (seq(self.ner.tagLine(text))
.map(lambda token: token.split('/'))
.filter(lambda token: token[1] in self.NER_TOKEN_TYPES)
)
result: Dict[Tuple[int, str], List[str]] = defaultdict(list)
entities: List[Tuple[str, str]] = []
index = -1
# Farasa returns named entities in IOB Style (Inside, Outside and Begninning).
# Related Entities are grouped together.
for token, info in tokens:
position, token_type = info.split('-')
if position == 'B':
index += 1
result[(index, token_type)].append(token)
# Return NE as a name and type pairs, i.e. ('Egypt', 'LOC').
for key in sorted(result.keys(), key=lambda value: value[0]):
entity = ' '.join(result[key])
if lemmatize:
entity = self.lemmetize(entity)
entities.append(
(entity, key[1])
)
return seq(entities).to_list()
def get_named_entities(self, text: str, lemmatize: bool = False) -> List[Tuple[str, str]]:
"""
Wrap _get_named_entities.
:param text: text to process.
:param lemmatize: whether to lemmatize results.
:returns: List of (token, token_type) pairs.
"""
return seq(self._get_named_entities(text, lemmatize=lemmatize)).to_list()
@break_input_into_chuncks(concat=lambda x, y: x + ' ' + y)
def diacritize(self, text: str, keep_original_diacritics: bool = False) -> str:
"""
Diacritize.
:param text: text to process.
:param keep_original_diacritics: whether to keep original diacritics.
"""
raise NotImplementedError('This feature is currently disabled')
return self.diacritizer.diacritize(text, keep_original_diacritics)
@classmethod
def __launch_java_gateway(cls) -> JavaGateway:
"""Launch java gateway."""
LOGGER.info('Initializing Farasa..')
port = launch_gateway(classpath=CLASS_PATH, die_on_exit=True)
params = GatewayParameters(
port=port, auto_convert=True, auto_field=True, eager_load=True
)
return JavaGateway(gateway_parameters=params)
| 0 | 0 | 0 |
e151479c93a744211f6cfbfa208a26ae6d6d7fb4 | 597 | py | Python | tests/cars.py | ganmodokix/vaetc | 866b79677b4f06603203376d967989dedadbffae | [
"MIT"
] | null | null | null | tests/cars.py | ganmodokix/vaetc | 866b79677b4f06603203376d967989dedadbffae | [
"MIT"
] | null | null | null | tests/cars.py | ganmodokix/vaetc | 866b79677b4f06603203376d967989dedadbffae | [
"MIT"
] | null | null | null | import os, sys
import yaml
import torch
sys.path.append(os.path.dirname(__file__) + '/../')
import vaetc
if __name__ == "__main__":
checkpoint = vaetc.Checkpoint(options={
"model_name": "vae",
"dataset": "cars",
"epochs": 512,
"batch_size": 256,
"logger_path": "runs.tests/cars",
"hyperparameters": yaml.safe_dump({
"lr": 1e-4,
"z_dim": 2,
}),
"cuda_sync": True,
"very_verbose": True,
})
print(checkpoint.dataset.test_set[0])
vaetc.fit(checkpoint)
vaetc.evaluate(checkpoint) | 20.586207 | 51 | 0.564489 | import os, sys
import yaml
import torch
sys.path.append(os.path.dirname(__file__) + '/../')
import vaetc
if __name__ == "__main__":
checkpoint = vaetc.Checkpoint(options={
"model_name": "vae",
"dataset": "cars",
"epochs": 512,
"batch_size": 256,
"logger_path": "runs.tests/cars",
"hyperparameters": yaml.safe_dump({
"lr": 1e-4,
"z_dim": 2,
}),
"cuda_sync": True,
"very_verbose": True,
})
print(checkpoint.dataset.test_set[0])
vaetc.fit(checkpoint)
vaetc.evaluate(checkpoint) | 0 | 0 | 0 |
15c8c9a3ffaf7fa04e27d51c7f0037d2ac150f9b | 1,224 | py | Python | rex/exploit/shellcodes/linux_amd64_connectback.py | tiedaoxiaotubie/rex | 049bbce3ab2717cbb4d2f0fc10fe8c0433b39c1d | [
"BSD-2-Clause"
] | 1 | 2021-01-22T11:25:40.000Z | 2021-01-22T11:25:40.000Z | rex/exploit/shellcodes/linux_amd64_connectback.py | tiedaoxiaotubie/rex | 049bbce3ab2717cbb4d2f0fc10fe8c0433b39c1d | [
"BSD-2-Clause"
] | null | null | null | rex/exploit/shellcodes/linux_amd64_connectback.py | tiedaoxiaotubie/rex | 049bbce3ab2717cbb4d2f0fc10fe8c0433b39c1d | [
"BSD-2-Clause"
] | 1 | 2020-09-16T07:07:28.000Z | 2020-09-16T07:07:28.000Z | import socket
import struct
import logging
from rex.exploit.shellcode import Shellcode
l = logging.getLogger("rex.exploit.shellcodes.linux_amd64_connectback")
| 33.081081 | 245 | 0.716503 | import socket
import struct
import logging
from rex.exploit.shellcode import Shellcode
l = logging.getLogger("rex.exploit.shellcodes.linux_amd64_connectback")
class LinuxAMD64Connectback(Shellcode):
# TODO: should be 'linux' once CLE can identify linux files
supported_platforms = ["unix"]
arch = "AMD64"
name = "connectback"
hex_code = "4831c04831ff4831f64831d24d31c06a025f6a015e6a065a6a29580f054989c04831f64d31d24152c604240266c7442402%sc7442404%s4889e66a105a41505f6a2a580f054831f66a035e48ffce6a21580f0575f64831ff57575e5a48bf2f2f62696e2f736848c1ef0857545f6a3b580f05"
def to_raw(self, host, port):
#pylint:disable=arguments-differ
'''
:param ip: string representing the ip address or domain name to connect back to
:param port: port to connect to on the remote host
'''
l.debug("Connecting back to %s:%d", host, port)
target_ip = socket.gethostbyname(host)
raw_ip = socket.inet_aton(target_ip).encode('hex')
if port < 0 or port >= 65535:
raise ValueError("invalid port specified")
raw_port = struct.pack("!H", port).encode('hex')
return (self.hex_code % (raw_port, raw_ip)).decode('hex')
| 0 | 1,041 | 23 |
6fdb2a7342567f7fdb0c92740396a30b4402c9c7 | 100 | py | Python | app/idealers/api/v1/views/__init__.py | leydson-vieira/dealers | 14f2f307f0f4497eec92f65d01ef111b42d528b9 | [
"MIT"
] | null | null | null | app/idealers/api/v1/views/__init__.py | leydson-vieira/dealers | 14f2f307f0f4497eec92f65d01ef111b42d528b9 | [
"MIT"
] | null | null | null | app/idealers/api/v1/views/__init__.py | leydson-vieira/dealers | 14f2f307f0f4497eec92f65d01ef111b42d528b9 | [
"MIT"
] | null | null | null | from .cashback import cashback_view
from .dealers import dealer_view
from .orders import order_view
| 25 | 35 | 0.85 | from .cashback import cashback_view
from .dealers import dealer_view
from .orders import order_view
| 0 | 0 | 0 |
f6c2251d9ad5a3d88156559765bec4c7ee6737d7 | 25,182 | py | Python | gluon/packages/dal/pydal/adapters/mongo.py | crania/dockconfig | ed981b9efefd6de8a00e0ec9d6c2530e1452fd17 | [
"BSD-3-Clause"
] | 2 | 2015-07-05T12:25:08.000Z | 2015-07-05T15:39:32.000Z | gluon/packages/dal/pydal/adapters/mongo.py | crania/dockconfig | ed981b9efefd6de8a00e0ec9d6c2530e1452fd17 | [
"BSD-3-Clause"
] | null | null | null | gluon/packages/dal/pydal/adapters/mongo.py | crania/dockconfig | ed981b9efefd6de8a00e0ec9d6c2530e1452fd17 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
import re
from .._globals import IDENTITY
from .._compat import integer_types, basestring
from ..objects import Table, Query, Field, Expression
from ..helpers.classes import SQLALL, Reference
from ..helpers.methods import use_common_filters, xorify
from .base import NoSQLAdapter
try:
from bson import Binary
from bson.binary import USER_DEFINED_SUBTYPE
except:
USER_DEFINED_SUBTYPE = 0
long = integer_types[-1]
| 37.529061 | 80 | 0.560797 | # -*- coding: utf-8 -*-
import datetime
import re
from .._globals import IDENTITY
from .._compat import integer_types, basestring
from ..objects import Table, Query, Field, Expression
from ..helpers.classes import SQLALL, Reference
from ..helpers.methods import use_common_filters, xorify
from .base import NoSQLAdapter
try:
from bson import Binary
from bson.binary import USER_DEFINED_SUBTYPE
except:
class Binary(object):
pass
USER_DEFINED_SUBTYPE = 0
long = integer_types[-1]
class MongoDBAdapter(NoSQLAdapter):
drivers = ('pymongo',)
driver_auto_json = ['loads', 'dumps']
uploads_in_blob = False
types = {
'boolean': bool,
'string': str,
'text': str,
'json': str,
'password': str,
'blob': str,
'upload': str,
'integer': long,
'bigint': long,
'float': float,
'double': float,
'date': datetime.date,
'time': datetime.time,
'datetime': datetime.datetime,
'id': long,
'reference': long,
'list:string': list,
'list:integer': list,
'list:reference': list,
}
error_messages = {"javascript_needed": "This must yet be replaced" +
" with javascript in order to work."}
def __init__(self, db, uri='mongodb://127.0.0.1:5984/db',
pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.uri = uri
if do_connect: self.find_driver(adapter_args)
import random
from bson.objectid import ObjectId
from bson.son import SON
import pymongo.uri_parser
from pymongo.write_concern import WriteConcern
m = pymongo.uri_parser.parse_uri(uri)
self.SON = SON
self.ObjectId = ObjectId
self.random = random
self.WriteConcern = WriteConcern
self.dbengine = 'mongodb'
self.folder = folder
db['_lastsql'] = ''
self.db_codec = 'UTF-8'
self._after_connection = after_connection
self.pool_size = pool_size
self.find_or_make_work_folder()
#this is the minimum amount of replicates that it should wait
# for on insert/update
self.minimumreplication = adapter_args.get('minimumreplication', 0)
# by default all inserts and selects are performand asynchronous,
# but now the default is
# synchronous, except when overruled by either this default or
# function parameter
self.safe = 1 if adapter_args.get('safe', True) else 0
if isinstance(m, tuple):
m = {"database": m[1]}
if m.get('database') is None:
raise SyntaxError("Database is required!")
def connector(uri=self.uri, m=m):
return self.driver.MongoClient(uri, w=self.safe)[m.get('database')]
self.reconnect(connector, cursor=False)
def object_id(self, arg=None):
""" Convert input to a valid Mongodb ObjectId instance
self.object_id("<random>") -> ObjectId (not unique) instance """
if not arg:
arg = 0
if isinstance(arg, basestring):
# we assume an integer as default input
rawhex = len(arg.replace("0x", "").replace("L", "")) == 24
if arg.isdigit() and (not rawhex):
arg = int(arg)
elif arg == "<random>":
arg = int("0x%sL" % \
"".join([self.random.choice("0123456789abcdef") \
for x in range(24)]), 0)
elif arg.isalnum():
if not arg.startswith("0x"):
arg = "0x%s" % arg
try:
arg = int(arg, 0)
except ValueError as e:
raise ValueError(
"invalid objectid argument string: %s" % e)
else:
raise ValueError("Invalid objectid argument string. " +
"Requires an integer or base 16 value")
elif isinstance(arg, self.ObjectId):
return arg
if not isinstance(arg, (int, long)):
raise TypeError("object_id argument must be of type " +
"ObjectId or an objectid representable integer")
hexvalue = hex(arg)[2:].rstrip('L').zfill(24)
return self.ObjectId(hexvalue)
def parse_reference(self, value, field_type):
# here we have to check for ObjectID before base parse
if isinstance(value, self.ObjectId):
value = long(str(value), 16)
return super(MongoDBAdapter,
self).parse_reference(value, field_type)
def parse_id(self, value, field_type):
if isinstance(value, self.ObjectId):
value = long(str(value), 16)
return super(MongoDBAdapter,
self).parse_id(value, field_type)
def represent(self, obj, fieldtype):
# the base adapter does not support MongoDB ObjectId
if isinstance(obj, self.ObjectId):
value = obj
else:
value = NoSQLAdapter.represent(self, obj, fieldtype)
# reference types must be convert to ObjectID
if fieldtype =='date':
if value is None:
return value
# this piece of data can be stripped off based on the fieldtype
t = datetime.time(0, 0, 0)
# mongodb doesn't has a date object and so it must datetime,
# string or integer
return datetime.datetime.combine(value, t)
elif fieldtype == 'time':
if value is None:
return value
# this piece of data can be stripped of based on the fieldtype
d = datetime.date(2000, 1, 1)
# mongodb doesn't has a time object and so it must datetime,
# string or integer
return datetime.datetime.combine(d, value)
elif fieldtype == "blob":
return MongoBlob(value)
elif isinstance(fieldtype, basestring):
if fieldtype.startswith('list:'):
if fieldtype.startswith('list:reference'):
newval = []
for v in value:
newval.append(self.object_id(v))
value = newval
elif fieldtype.startswith("reference") or fieldtype=="id":
value = self.object_id(value)
elif fieldtype == "string":
value = str(value)
elif isinstance(fieldtype, Table):
value = self.object_id(value)
return value
def parse_blob(self, value, field_type):
return MongoBlob.decode(value)
def _expand_query(self, query, tablename=None, safe=None):
""" Return a tuple containing query and ctable """
if not tablename:
tablename = self.get_table(query)
ctable = self._get_collection(tablename, safe)
_filter = None
if query:
if use_common_filters(query):
query = self.common_filter(query,[tablename])
_filter = self.expand(query)
return (ctable, _filter)
def _get_collection(self, tablename, safe=None):
ctable = self.connection[tablename]
if safe is not None and safe != self.safe:
wc = self.WriteConcern(w=self._get_safe(safe))
ctable = ctable.with_options(write_concern=wc)
return ctable
def _get_safe(self, val=None):
if val is None:
return self.safe
return 1 if val else 0
def create_table(self, table, migrate=True, fake_migrate=False,
polymodel=None, isCapped=False):
if isCapped:
raise RuntimeError("Not implemented")
table._dbt = None
def expand(self, expression, field_type=None):
if isinstance(expression, Query):
# any query using 'id':=
# set name as _id (as per pymongo/mongodb primary key)
# convert second arg to an objectid field
# (if its not already)
# if second arg is 0 convert to objectid
if isinstance(expression.first,Field) and \
((expression.first.type == 'id') or \
("reference" in expression.first.type)):
if expression.first.type == 'id':
expression.first.name = '_id'
# cast to Mongo ObjectId
if isinstance(expression.second, (tuple, list, set)):
expression.second = [self.object_id(item) for
item in expression.second]
else:
expression.second = self.object_id(expression.second)
if isinstance(expression, Field):
if expression.type=='id':
result = "_id"
else:
result = expression.name
elif isinstance(expression, (Expression, Query)):
first = expression.first
second = expression.second
op = expression.op
optional_args = expression.optional_args or {}
if not second is None:
result = op(first, second, **optional_args)
elif not first is None:
result = op(first, **optional_args)
else:
result = op if isinstance(op, str) else op(**optional_args)
elif field_type:
result = self.represent(expression,field_type)
elif isinstance(expression,(list,tuple)):
result = [self.represent(item,field_type) for
item in expression]
else:
result = expression
return result
def drop(self, table, mode=''):
ctable = self.connection[table._tablename]
ctable.drop()
self._drop_cleanup(table)
return
def truncate(self, table, mode, safe=None):
ctable = self.connection[table._tablename]
ctable.remove(None, w=self._get_safe(safe))
def count(self, query, distinct=None, snapshot=True):
if distinct:
raise RuntimeError("COUNT DISTINCT not supported")
if not isinstance(query, Query):
raise SyntaxError("Not Supported")
(ctable, _filter) = self._expand_query(query)
result = ctable.count(filter=_filter)
return result
def select(self, query, fields, attributes, snapshot=False):
mongofields_dict = self.SON()
new_fields, mongosort_list = [], []
# try an orderby attribute
orderby = attributes.get('orderby', False)
limitby = attributes.get('limitby', False)
# distinct = attributes.get('distinct', False)
if 'for_update' in attributes:
self.db.logger.warning('mongodb does not support for_update')
for key in set(attributes.keys())-set(('limitby', 'orderby',
'for_update')):
if attributes[key] is not None:
self.db.logger.warning(
'select attribute not implemented: %s' % key)
if limitby:
limitby_skip, limitby_limit = limitby[0], int(limitby[1]) - 1
else:
limitby_skip = limitby_limit = 0
if orderby:
if isinstance(orderby, (list, tuple)):
orderby = xorify(orderby)
# !!!! need to add 'random'
for f in self.expand(orderby).split(','):
if f.startswith('-'):
mongosort_list.append((f[1:], -1))
else:
mongosort_list.append((f, 1))
for item in fields:
if isinstance(item, SQLALL):
new_fields += item._table
else:
new_fields.append(item)
fields = new_fields
if isinstance(query, Query):
tablename = self.get_table(query)
elif len(fields) != 0:
tablename = fields[0].tablename
else:
raise SyntaxError("The table name could not be found in " +
"the query nor from the select statement.")
if query:
if use_common_filters(query):
query = self.common_filter(query,[tablename])
mongoqry_dict = self.expand(query)
fields = fields or self.db[tablename]
for field in fields:
mongofields_dict[field.name] = 1
ctable = self.connection[tablename]
modifiers={'snapshot':snapshot}
mongo_list_dicts = ctable.find(
mongoqry_dict, mongofields_dict, skip=limitby_skip,
limit=limitby_limit, sort=mongosort_list, modifiers=modifiers)
rows = []
# populate row in proper order
# Here we replace ._id with .id to follow the standard naming
colnames = []
newnames = []
for field in fields:
colname = str(field)
colnames.append(colname)
tablename, fieldname = colname.split(".")
if fieldname == "_id":
# Mongodb reserved uuid key
field.name = "id"
newnames.append(".".join((tablename, field.name)))
for record in mongo_list_dicts:
row = []
for colname in colnames:
tablename, fieldname = colname.split(".")
# switch to Mongo _id uuids for retrieving
# record id's
if fieldname == "id": fieldname = "_id"
if fieldname in record:
value = record[fieldname]
else:
value = None
row.append(value)
rows.append(row)
processor = attributes.get('processor', self.parse)
result = processor(rows, fields, newnames, blob_decode=True)
return result
def insert(self, table, fields, safe=None):
"""Safe determines whether a asynchronous request is done or a
synchronous action is done
For safety, we use by default synchronous requests"""
values = {}
ctable = self._get_collection(table._tablename, safe)
for k, v in fields:
if not k.name in ["id", "safe"]:
fieldname = k.name
fieldtype = table[k.name].type
values[fieldname] = self.represent(v, fieldtype)
result = ctable.insert_one(values)
if result.acknowledged:
Oid = result.inserted_id
rid = Reference(long(str(Oid), 16))
(rid._table, rid._record) = (table, None)
return rid
else:
return None
def update(self, tablename, query, fields, safe=None):
# return amount of adjusted rows or zero, but no exceptions
# @ related not finding the result
if not isinstance(query, Query):
raise RuntimeError("Not implemented")
amount = self.count(query, False)
if not isinstance(query, Query):
raise SyntaxError("Not Supported")
(ctable, _filter) = self._expand_query(query, tablename, safe)
# do not try to update id fields to avoid backend errors
modify = {'$set': dict((k.name, self.represent(v, k.type)) for
k, v in fields if (not k.name in ("_id", "id")))}
try:
result = ctable.update_many(filter=_filter,
update=modify)
if result.acknowledged:
amount = result.matched_count
return amount
except Exception as e:
# TODO Reverse update query to verifiy that the query succeded
raise RuntimeError("uncaught exception when updating rows: %s" % e)
def delete(self, tablename, query, safe=None):
amount = self.count(query, False)
if not isinstance(query, Query):
raise RuntimeError("query type %s is not supported" % \
type(query))
(ctable, _filter) = self._expand_query(query, safe)
result = ctable.delete_many(_filter)
if result.acknowledged:
return result.deleted_count
else:
return amount
return amount
def bulk_insert(self, table, items):
return [self.insert(table,item) for item in items]
## OPERATORS
def INVERT(self, first):
#print "in invert first=%s" % first
return '-%s' % self.expand(first)
def NOT(self, first):
op = self.expand(first)
op_k = list(op)[0]
op_body = op[op_k]
r = None
if type(op_body) is list:
# apply De Morgan law for and/or
# not(A and B) -> not(A) or not(B)
# not(A or B) -> not(A) and not(B)
not_op = '$and' if op_k == '$or' else '$or'
r = {not_op: [self.NOT(first.first), self.NOT(first.second)]}
else:
try:
sub_ops = list(op_body.keys())
if len(sub_ops) == 1 and sub_ops[0] == '$ne':
r = {op_k: op_body['$ne']}
except:
r = {op_k: {'$ne': op_body}}
if r == None:
r = {op_k: {'$not': op_body}}
return r
def AND(self,first,second):
# pymongo expects: .find({'$and': [{'x':'1'}, {'y':'2'}]})
return {'$and': [self.expand(first),self.expand(second)]}
def OR(self,first,second):
# pymongo expects: .find({'$or': [{'name':'1'}, {'name':'2'}]})
return {'$or': [self.expand(first),self.expand(second)]}
def BELONGS(self, first, second):
if isinstance(second, str):
# this is broken, the only way second is a string is if it has
# been converted to SQL. This no worky. This might be made to
# work if _select did not return SQL.
raise RuntimeError("nested queries not supported")
items = [self.expand(item, first.type) for item in second]
return {self.expand(first) : {"$in" : items} }
def EQ(self,first,second=None):
result = {}
result[self.expand(first)] = self.expand(second, first.type)
return result
def NE(self, first, second=None):
result = {}
result[self.expand(first)] = {'$ne': self.expand(second, first.type)}
return result
def LT(self,first,second=None):
if second is None:
raise RuntimeError("Cannot compare %s < None" % first)
result = {}
result[self.expand(first)] = {'$lt': self.expand(second, first.type)}
return result
def LE(self,first,second=None):
if second is None:
raise RuntimeError("Cannot compare %s <= None" % first)
result = {}
result[self.expand(first)] = {'$lte': self.expand(second, first.type)}
return result
def GT(self,first,second):
result = {}
result[self.expand(first)] = {'$gt': self.expand(second, first.type)}
return result
def GE(self,first,second=None):
if second is None:
raise RuntimeError("Cannot compare %s >= None" % first)
result = {}
result[self.expand(first)] = {'$gte': self.expand(second, first.type)}
return result
def ADD(self, first, second):
raise NotImplementedError(self.error_messages["javascript_needed"])
return '%s + %s' % (self.expand(first),
self.expand(second, first.type))
def SUB(self, first, second):
raise NotImplementedError(self.error_messages["javascript_needed"])
return '(%s - %s)' % (self.expand(first),
self.expand(second, first.type))
def MUL(self, first, second):
raise NotImplementedError(self.error_messages["javascript_needed"])
return '(%s * %s)' % (self.expand(first),
self.expand(second, first.type))
def DIV(self, first, second):
raise NotImplementedError(self.error_messages["javascript_needed"])
return '(%s / %s)' % (self.expand(first),
self.expand(second, first.type))
def MOD(self, first, second):
raise NotImplementedError(self.error_messages["javascript_needed"])
return '(%s %% %s)' % (self.expand(first),
self.expand(second, first.type))
def AS(self, first, second):
raise NotImplementedError(self.error_messages["javascript_needed"])
return '%s AS %s' % (self.expand(first), second)
# We could implement an option that simulates a full featured SQL
# database. But I think the option should be set explicit or
# implemented as another library.
def ON(self, first, second):
raise NotImplementedError("This is not possible in NoSQL" +
" but can be simulated with a wrapper.")
return '%s ON %s' % (self.expand(first), self.expand(second))
def COMMA(self, first, second):
return '%s, %s' % (self.expand(first), self.expand(second))
#TODO verify full compatibilty with official SQL Like operator
def _build_like_regex(self, arg,
case_sensitive=True,
ends_with=False,
starts_with=False,
whole_string=True,
like_wildcards=False):
import re
base = self.expand(arg,'string')
need_regex = (whole_string or not case_sensitive
or starts_with or ends_with
or like_wildcards and ('_' in base or '%' in base))
if not need_regex:
return base
else:
expr = re.escape(base)
if like_wildcards:
expr = expr.replace('\\%','.*')
expr = expr.replace('\\_','.').replace('_','.')
if starts_with:
pattern = '^%s'
elif ends_with:
pattern = '%s$'
elif whole_string:
pattern = '^%s$'
else:
pattern = '%s'
regex = { '$regex': pattern % expr }
if not case_sensitive:
regex['$options'] = 'i'
return regex
def LIKE(self, first, second, case_sensitive=True):
regex = self._build_like_regex(
second, case_sensitive=case_sensitive, like_wildcards=True)
return { self.expand(first): regex }
def ILIKE(self, first, second):
return self.LIKE(first, second, case_sensitive=False)
def STARTSWITH(self, first, second):
regex = self._build_like_regex(second, starts_with=True)
return { self.expand(first): regex }
def ENDSWITH(self, first, second):
regex = self._build_like_regex(second, ends_with=True)
return { self.expand(first): regex }
#TODO verify full compatibilty with official oracle contains operator
def CONTAINS(self, first, second, case_sensitive=True):
ret = None
if isinstance(second, self.ObjectId):
val = second
elif isinstance(first, Field) and first.type == 'list:string':
if isinstance(second, Field) and second.type == 'string':
ret = {
'$where' :
"this.%s.indexOf(this.%s) > -1" % (first.name, second.name)
}
else:
val = self._build_like_regex(
second, case_sensitive=case_sensitive, whole_string=True)
else:
val = self._build_like_regex(
second, case_sensitive=case_sensitive, whole_string=False)
if not ret:
ret = {self.expand(first): val}
return ret
class MongoBlob(Binary):
MONGO_BLOB_BYTES = USER_DEFINED_SUBTYPE
MONGO_BLOB_NON_UTF8_STR = USER_DEFINED_SUBTYPE + 1
def __new__(cls, value):
# return None and Binary() unmolested
if value is None or isinstance(value, Binary):
return value
# bytearray is marked as MONGO_BLOB_BYTES
if isinstance(value, bytearray):
return Binary.__new__(cls, bytes(value), MongoBlob.MONGO_BLOB_BYTES)
# return non-strings as Binary(), eg: PY3 bytes()
if not isinstance(value, basestring):
return Binary(value)
# if string is encodable as UTF-8, then return as string
try:
value.encode('utf-8')
return value
except:
# string which can not be UTF-8 encoded, eg: pickle strings
return Binary.__new__(cls, value, MongoBlob.MONGO_BLOB_NON_UTF8_STR)
def __repr__(self):
return repr(MongoBlob.decode(self))
@staticmethod
def decode(value):
if isinstance(value, Binary):
if value.subtype == MongoBlob.MONGO_BLOB_BYTES:
return bytearray(value)
if value.subtype == MongoBlob.MONGO_BLOB_NON_UTF8_STR:
return str(value)
return value
| 19,479 | 5,164 | 72 |
54ed7db99e67625149aca3b5d1418ff1460c4d3f | 279 | py | Python | harness/determined/experimental/__init__.py | brain-good/determined | 4a80514f514b1cc2aa5dcd604e17c18876b3b76e | [
"Apache-2.0"
] | null | null | null | harness/determined/experimental/__init__.py | brain-good/determined | 4a80514f514b1cc2aa5dcd604e17c18876b3b76e | [
"Apache-2.0"
] | null | null | null | harness/determined/experimental/__init__.py | brain-good/determined | 4a80514f514b1cc2aa5dcd604e17c18876b3b76e | [
"Apache-2.0"
] | null | null | null | from determined_common.experimental import (
Checkpoint,
Determined,
ExperimentReference,
TrialReference,
)
from determined.experimental._native import (
create,
create_trial_instance,
test_one_batch,
init_native,
_local_execution_manager,
)
| 18.6 | 45 | 0.741935 | from determined_common.experimental import (
Checkpoint,
Determined,
ExperimentReference,
TrialReference,
)
from determined.experimental._native import (
create,
create_trial_instance,
test_one_batch,
init_native,
_local_execution_manager,
)
| 0 | 0 | 0 |
751978667c9b933dfa751e93be53aa234dc1a2f2 | 897 | py | Python | ogs5py/fileclasses/asc/core.py | MuellerSeb/ogs5py | 752e7bd2298fbd476406d168f6b7d1a85863dccd | [
"MIT"
] | 3 | 2018-05-27T15:39:07.000Z | 2018-10-29T17:02:11.000Z | ogs5py/fileclasses/asc/core.py | MuellerSeb/ogs5py | 752e7bd2298fbd476406d168f6b7d1a85863dccd | [
"MIT"
] | 1 | 2018-11-12T11:32:12.000Z | 2018-11-12T13:07:48.000Z | ogs5py/fileclasses/asc/core.py | MuellerSeb/ogs5py | 752e7bd2298fbd476406d168f6b7d1a85863dccd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Class for the ogs ASC file."""
from ogs5py.fileclasses.base import LineFile
class ASC(LineFile):
"""
Class for the ogs ASC file.
Parameters
----------
lines : list of str, optional
content of the file as a list of lines
Default: None
name : str, optional
name of the file without extension
Default: "textfile"
task_root : str, optional
Path to the destiny folder.
Default: cwd+"ogs5model"
task_id : str, optional
Name for the ogs task. (a place holder)
Default: "model"
Notes
-----
This is just handled as a line-wise file. You can access the data by line
with:
ASC.lines
This file type comes either from .tim .pcs or .gem
"""
| 23.605263 | 77 | 0.595318 | # -*- coding: utf-8 -*-
"""Class for the ogs ASC file."""
from ogs5py.fileclasses.base import LineFile
class ASC(LineFile):
"""
Class for the ogs ASC file.
Parameters
----------
lines : list of str, optional
content of the file as a list of lines
Default: None
name : str, optional
name of the file without extension
Default: "textfile"
task_root : str, optional
Path to the destiny folder.
Default: cwd+"ogs5model"
task_id : str, optional
Name for the ogs task. (a place holder)
Default: "model"
Notes
-----
This is just handled as a line-wise file. You can access the data by line
with:
ASC.lines
This file type comes either from .tim .pcs or .gem
"""
def __init__(self, **OGS_Config):
super().__init__(**OGS_Config)
self.file_ext = ".asc"
| 82 | 0 | 27 |
d2ba4b0ec04f23a3715468b1721b22e8bd62bf9f | 4,361 | py | Python | Newcastle University/FewCloudComputing/Load.py | haoranD/Postgraduate-Master | b297211eeb20e3dc5535fefc84e1e0c2b71ddaa3 | [
"MIT"
] | null | null | null | Newcastle University/FewCloudComputing/Load.py | haoranD/Postgraduate-Master | b297211eeb20e3dc5535fefc84e1e0c2b71ddaa3 | [
"MIT"
] | null | null | null | Newcastle University/FewCloudComputing/Load.py | haoranD/Postgraduate-Master | b297211eeb20e3dc5535fefc84e1e0c2b71ddaa3 | [
"MIT"
] | 1 | 2019-12-08T03:14:15.000Z | 2019-12-08T03:14:15.000Z | import sys
import numpy
import time
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import urllib
import csv
import time
#Generate a list of random number from Normal distribution
#Generate a list of random number from Poisson distribution
# Visualize the number we generated
#Connect to url
if __name__ == '__main__':
# How many parameters we used
# 0: this .py file
# 1: url
# 2: mean for Normal Distribution
# 3: standard distribution for Normal Distribution
# 4: Lambda for Poisson Distribution
#Open the csv file
#with open('./CSV_Data/infor.csv', 'a', newline='') as f:
# fieldnames = ['Time', 'infor']
# writer = csv.DictWriter(f, fieldnames=fieldnames)
# writer.writeheader()
if len(sys.argv) == 5:#Judge we have 4 parameters
if sys.argv[4] == 'None':
with open('inforN.csv', 'a', newline='') as f:
fieldnames = ['Time', 'inforN']
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
# When Lambda equal None means using Normal Distribution
timelistN = gettimelistbyN(float(sys.argv[2]), float(sys.argv[3]), 1000)
Show(timelistN)
length = len(timelistN)
whlieConnect(length, sys.argv[1], 'N', timelistN)
elif (sys.argv[3] == 'None') or (sys.argv[2] == 'None'):
with open('inforP.csv', 'a', newline='') as f:
fieldnames = ['Time', 'inforP']
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
#When mean or sd equal to None means using Poisson Distribution
timelistP = gettimelistbyP(float(sys.argv[4]), 1000)
Show(timelistP)
length = len(timelistP)
whlieConnect(length, sys.argv[1], 'P', timelistP)
else:#check the input
print("para2 or para3 or para4 must be one None")
else:#check the input
print("Please input four parameters: url; µ(int&None); σ(int&None); λ(int&None)")
| 39.645455 | 90 | 0.575327 | import sys
import numpy
import time
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import urllib
import csv
import time
#Generate a list of random number from Normal distribution
def gettimelistbyN(para1, para2, size):
timelistN = numpy.random.normal(loc=para1, scale=para2, size=size)
return timelistN
#Generate a list of random number from Poisson distribution
def gettimelistbyP(para1, size):
timelistP = numpy.random.poisson(lam=para1, size=size)
return timelistP
# Visualize the number we generated
def Show(datas):
plt.hist(datas, bins=100, normed=True)
plt.show()
#Connect to url
def whlieConnect(length, url, Type, timelist):
count = 0
while count != length:#Choose Normal or Poisson
if Type == 'N':#When it is Normal Distribution
#Generate a array of number
#timelistN = gettimelistbyN(float(sys.argv[2]), float(sys.argv[3]), None)
print('Start:' + str(count) + 'time')
print('the number is:' + str(abs(timelist[count])))
print('wait secends:' + str(timelist[count]))
time.sleep(abs(timelist[count]))
response = urllib.request.urlopen('http://' + url)
data = response.read()
_time = time.strftime('%Y.%m.%d %H:%M:%S',time.localtime(time.time()))
data={'Time':_time,'infor':data}
fieldnames = ['Time', 'infor']
with open('inforN.csv', 'a', newline='',encoding='gb18030') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writerows([data])
elif Type == 'P':#When it is Poisson Distribution
#Generate a array of number
#timelistP = gettimelistbyP(float(sys.argv[4]), None)
print('Start:' + str(count) + 'time')
print('the number is:' + str(abs(timelist[count])))
print('wait secends:' + str(timelist[count]))
time.sleep(abs(timelist[count]))
response = urllib.request.urlopen('http://' + url)
data = response.read()
_time = time.strftime('%Y.%m.%d %H:%M:%S',time.localtime(time.time()))
data={'Time':_time,'infor':data}
fieldnames = ['Time', 'infor']
#Save as csv
with open('inforP.csv', 'a', newline='',encoding='gb18030') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writerows([data])
count += 1
if __name__ == '__main__':
# How many parameters we used
# 0: this .py file
# 1: url
# 2: mean for Normal Distribution
# 3: standard distribution for Normal Distribution
# 4: Lambda for Poisson Distribution
#Open the csv file
#with open('./CSV_Data/infor.csv', 'a', newline='') as f:
# fieldnames = ['Time', 'infor']
# writer = csv.DictWriter(f, fieldnames=fieldnames)
# writer.writeheader()
if len(sys.argv) == 5:#Judge we have 4 parameters
if sys.argv[4] == 'None':
with open('inforN.csv', 'a', newline='') as f:
fieldnames = ['Time', 'inforN']
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
# When Lambda equal None means using Normal Distribution
timelistN = gettimelistbyN(float(sys.argv[2]), float(sys.argv[3]), 1000)
Show(timelistN)
length = len(timelistN)
whlieConnect(length, sys.argv[1], 'N', timelistN)
elif (sys.argv[3] == 'None') or (sys.argv[2] == 'None'):
with open('inforP.csv', 'a', newline='') as f:
fieldnames = ['Time', 'inforP']
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
#When mean or sd equal to None means using Poisson Distribution
timelistP = gettimelistbyP(float(sys.argv[4]), 1000)
Show(timelistP)
length = len(timelistP)
whlieConnect(length, sys.argv[1], 'P', timelistP)
else:#check the input
print("para2 or para3 or para4 must be one None")
else:#check the input
print("Please input four parameters: url; µ(int&None); σ(int&None); λ(int&None)")
| 2,124 | 0 | 92 |
1ebb0e2aefaa78274c44b66323485a579a2f7e3a | 3,675 | py | Python | src/python/nimbusml/examples/EnsembleClassifier.py | michaelgsharp/NimbusML | 50031157265f49eec85d27fe67582d9ddaf01ef9 | [
"MIT"
] | 134 | 2018-11-01T22:15:24.000Z | 2019-05-04T11:30:08.000Z | src/python/nimbusml/examples/EnsembleClassifier.py | michaelgsharp/NimbusML | 50031157265f49eec85d27fe67582d9ddaf01ef9 | [
"MIT"
] | 226 | 2019-05-07T19:00:44.000Z | 2021-01-06T07:59:48.000Z | src/python/nimbusml/examples/EnsembleClassifier.py | michaelgsharp/NimbusML | 50031157265f49eec85d27fe67582d9ddaf01ef9 | [
"MIT"
] | 43 | 2019-05-15T20:19:42.000Z | 2022-03-30T10:26:07.000Z | ###############################################################################
# EnsembleClassifier
from nimbusml import Pipeline, FileDataStream
from nimbusml.datasets import get_dataset
from nimbusml.feature_extraction.categorical import OneHotVectorizer
from nimbusml.ensemble import EnsembleClassifier
from nimbusml.ensemble.feature_selector import RandomFeatureSelector
from nimbusml.ensemble.output_combiner import ClassifierVoting
from nimbusml.ensemble.subset_selector import RandomPartitionSelector
from nimbusml.ensemble.sub_model_selector import ClassifierBestDiverseSelector
# data input (as a FileDataStream)
path = get_dataset('infert').as_filepath()
data = FileDataStream.read_csv(path)
print(data.head())
# age case education induced parity ... row_num spontaneous ...
# 0 26 1 0-5yrs 1 6 ... 1 2 ...
# 1 42 1 0-5yrs 1 1 ... 2 0 ...
# 2 39 1 0-5yrs 2 6 ... 3 0 ...
# 3 34 1 0-5yrs 2 4 ... 4 0 ...
# 4 35 1 6-11yrs 1 3 ... 5 1 ...
# define the training pipeline using default sampling and ensembling parameters
pipeline_with_defaults = Pipeline([
OneHotVectorizer(columns={'edu': 'education'}),
EnsembleClassifier(feature=['age', 'edu', 'parity'],
label='induced',
num_models=3)
])
# train, predict, and evaluate
metrics, predictions = pipeline_with_defaults.fit(data).test(data, output_scores=True)
# print predictions
print(predictions.head())
# PredictedLabel Score.0 Score.1 Score.2
# 0 2 0.202721 0.186598 0.628115
# 1 0 0.716737 0.190289 0.092974
# 2 2 0.201026 0.185602 0.624761
# 3 0 0.423328 0.235074 0.365649
# 4 0 0.577509 0.220827 0.201664
# print evaluation metrics
print(metrics)
# Accuracy(micro-avg) Accuracy(macro-avg) Log-loss ... (class 0) ...
# 0 0.612903 0.417519 0.846467 ... 0.504007 ...
# (class 1) (class 2)
# 1.244033 1.439364
# define the training pipeline with specific sampling and ensembling options
pipeline_with_options = Pipeline([
OneHotVectorizer(columns={'edu': 'education'}),
EnsembleClassifier(feature=['age', 'edu', 'parity'],
label='induced',
num_models=3,
sampling_type = RandomPartitionSelector(
feature_selector=RandomFeatureSelector(
features_selction_proportion=0.7)),
sub_model_selector_type=ClassifierBestDiverseSelector(),
output_combiner=ClassifierVoting())
])
# train, predict, and evaluate
metrics, predictions = pipeline_with_options.fit(data).test(data, output_scores=True)
# print predictions
print(predictions.head())
# PredictedLabel Score.0 Score.1 Score.2
# 0 2 0.0 0.0 1.0
# 1 0 1.0 0.0 0.0
# 2 2 0.0 0.0 1.0
# 3 0 1.0 0.0 0.0
# 4 0 1.0 0.0 0.0
# print evaluation metrics
# note that accuracy metrics are lower than with defaults as this is a small
# dataset that we partition into 3 chunks for each classifier, which decreases
# model quality.
print(metrics)
# Accuracy(micro-avg) Accuracy(macro-avg) Log-loss ... (class 0) ...
# 0 0.596774 0.38352 13.926926 ... 0.48306 ...
# (class 1) (class 2)
# 33.52293 29.871374 | 43.235294 | 86 | 0.578503 | ###############################################################################
# EnsembleClassifier
from nimbusml import Pipeline, FileDataStream
from nimbusml.datasets import get_dataset
from nimbusml.feature_extraction.categorical import OneHotVectorizer
from nimbusml.ensemble import EnsembleClassifier
from nimbusml.ensemble.feature_selector import RandomFeatureSelector
from nimbusml.ensemble.output_combiner import ClassifierVoting
from nimbusml.ensemble.subset_selector import RandomPartitionSelector
from nimbusml.ensemble.sub_model_selector import ClassifierBestDiverseSelector
# data input (as a FileDataStream)
path = get_dataset('infert').as_filepath()
data = FileDataStream.read_csv(path)
print(data.head())
# age case education induced parity ... row_num spontaneous ...
# 0 26 1 0-5yrs 1 6 ... 1 2 ...
# 1 42 1 0-5yrs 1 1 ... 2 0 ...
# 2 39 1 0-5yrs 2 6 ... 3 0 ...
# 3 34 1 0-5yrs 2 4 ... 4 0 ...
# 4 35 1 6-11yrs 1 3 ... 5 1 ...
# define the training pipeline using default sampling and ensembling parameters
pipeline_with_defaults = Pipeline([
OneHotVectorizer(columns={'edu': 'education'}),
EnsembleClassifier(feature=['age', 'edu', 'parity'],
label='induced',
num_models=3)
])
# train, predict, and evaluate
metrics, predictions = pipeline_with_defaults.fit(data).test(data, output_scores=True)
# print predictions
print(predictions.head())
# PredictedLabel Score.0 Score.1 Score.2
# 0 2 0.202721 0.186598 0.628115
# 1 0 0.716737 0.190289 0.092974
# 2 2 0.201026 0.185602 0.624761
# 3 0 0.423328 0.235074 0.365649
# 4 0 0.577509 0.220827 0.201664
# print evaluation metrics
print(metrics)
# Accuracy(micro-avg) Accuracy(macro-avg) Log-loss ... (class 0) ...
# 0 0.612903 0.417519 0.846467 ... 0.504007 ...
# (class 1) (class 2)
# 1.244033 1.439364
# define the training pipeline with specific sampling and ensembling options
pipeline_with_options = Pipeline([
OneHotVectorizer(columns={'edu': 'education'}),
EnsembleClassifier(feature=['age', 'edu', 'parity'],
label='induced',
num_models=3,
sampling_type = RandomPartitionSelector(
feature_selector=RandomFeatureSelector(
features_selction_proportion=0.7)),
sub_model_selector_type=ClassifierBestDiverseSelector(),
output_combiner=ClassifierVoting())
])
# train, predict, and evaluate
metrics, predictions = pipeline_with_options.fit(data).test(data, output_scores=True)
# print predictions
print(predictions.head())
# PredictedLabel Score.0 Score.1 Score.2
# 0 2 0.0 0.0 1.0
# 1 0 1.0 0.0 0.0
# 2 2 0.0 0.0 1.0
# 3 0 1.0 0.0 0.0
# 4 0 1.0 0.0 0.0
# print evaluation metrics
# note that accuracy metrics are lower than with defaults as this is a small
# dataset that we partition into 3 chunks for each classifier, which decreases
# model quality.
print(metrics)
# Accuracy(micro-avg) Accuracy(macro-avg) Log-loss ... (class 0) ...
# 0 0.596774 0.38352 13.926926 ... 0.48306 ...
# (class 1) (class 2)
# 33.52293 29.871374 | 0 | 0 | 0 |
c50880c9f81ec471512cd26e3870a7cb328aba45 | 2,365 | py | Python | lte/gateway/python/integ_tests/s1aptests/test_s1setup_incorrect_plmn.py | saurabhsoni88/magma | 4236c9d8edb7bd203707ff7e861b1f7c12fb84c7 | [
"BSD-3-Clause"
] | 2 | 2020-12-09T11:42:30.000Z | 2021-09-26T03:28:33.000Z | lte/gateway/python/integ_tests/s1aptests/test_s1setup_incorrect_plmn.py | saurabhsoni88/magma | 4236c9d8edb7bd203707ff7e861b1f7c12fb84c7 | [
"BSD-3-Clause"
] | 151 | 2020-09-03T20:44:13.000Z | 2022-03-31T20:28:52.000Z | lte/gateway/python/integ_tests/s1aptests/test_s1setup_incorrect_plmn.py | kkahrs/magma | 73e666627dc28e0c492feab7321bb7d6dd433b09 | [
"BSD-3-Clause"
] | 2 | 2021-05-27T18:15:16.000Z | 2021-05-27T18:41:39.000Z | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import ctypes
import s1ap_types
from integ_tests.s1aptests.s1ap_utils import S1ApUtil
if __name__ == "__main__":
unittest.main()
| 36.953125 | 78 | 0.668922 | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import ctypes
import s1ap_types
from integ_tests.s1aptests.s1ap_utils import S1ApUtil
class TestS1SetupFailureIncorrectPlmn(unittest.TestCase):
def setUp(self):
self._s1_util = S1ApUtil()
def tearDown(self):
print("************************* Sending SCTP SHUTDOWN")
self._s1_util.issue_cmd(s1ap_types.tfwCmd.SCTP_SHUTDOWN_REQ, None)
self._s1_util.cleanup()
def test_s1setup_incorrect_plmn(self):
""" S1 Setup with incorrect plmn ID """
print("************************* Enb tester configuration")
req = s1ap_types.FwNbConfigReq_t()
req.cellId_pr.pres = True
req.cellId_pr.cell_id = 10
req.plmnId_pr.pres = True
# Convert PLMN to ASCII character array of MCC and MNC digits
# For 5 digit PLMN add \0 in the end, e.g., "00101\0"
req.plmnId_pr.plmn_id = (ctypes.c_ubyte * 6).from_buffer_copy(
bytearray(b"333333")
)
print("************************* Sending ENB configuration Request")
assert self._s1_util.issue_cmd(s1ap_types.tfwCmd.ENB_CONFIG, req) == 0
response = self._s1_util.get_response()
assert response.msg_type == s1ap_types.tfwCmd.ENB_CONFIG_CONFIRM.value
res = response.cast(s1ap_types.FwNbConfigCfm_t)
assert res.status == s1ap_types.CfgStatus.CFG_DONE.value
print("************************* Sending S1-setup Request")
req = None
assert (
self._s1_util.issue_cmd(s1ap_types.tfwCmd.ENB_S1_SETUP_REQ, req)
== 0
)
response = self._s1_util.get_response()
assert response.msg_type == s1ap_types.tfwCmd.ENB_S1_SETUP_RESP.value
res = response.cast(s1ap_types.FwNbS1setupRsp_t)
assert res.res == s1ap_types.S1_setp_Result.S1_SETUP_FAILED.value
if __name__ == "__main__":
unittest.main()
| 200 | 1,513 | 23 |
ec7e896333522e77965ed3aa0a3b8527b6fb7d82 | 2,714 | py | Python | api/views/estate.py | sirghiny/Real-Estate-Manager | 10272feec22c40da7f927219225b8d2e27a20e38 | [
"MIT"
] | null | null | null | api/views/estate.py | sirghiny/Real-Estate-Manager | 10272feec22c40da7f927219225b8d2e27a20e38 | [
"MIT"
] | 1 | 2018-05-09T13:17:41.000Z | 2018-05-09T13:17:41.000Z | api/views/estate.py | sirghiny/Real-Estate-Manager | 10272feec22c40da7f927219225b8d2e27a20e38 | [
"MIT"
] | 2 | 2018-05-01T15:03:13.000Z | 2019-10-28T13:59:29.000Z | """Estate manipulation functionality."""
from flask import request
from flask_restful import Resource
from api.helpers.modelops import get_boards, get_estates
from api.helpers.validation import validate_json
from api.models import Estate
class EstateResource(Resource):
"""View functions for estates."""
def get(self, estate_id=None):
"""View an estate(s)."""
result = get_estates(estate_id)
if isinstance(result, dict):
return result, 404
elif isinstance(result, list):
return {
'status': 'success',
'data': {'estates': [estate.view() for estate in result]}
}, 200
else:
return {
'status': 'success',
'data': {'estate': result.view()}
}, 200
def post(self):
"""Create an estate."""
payload = request.get_json()
required = ['address', 'board_id']
result = validate_json(required, payload)
if isinstance(result, bool) is True:
board = get_boards(payload['board_id'])
if isinstance(board, dict):
return board, 404
else:
new_estate = Estate(
address=payload['address'])
new_id = new_estate.save()
board.insert('estates_owned', [Estate.get(id=new_id)])
return {
'status': 'success',
'message': 'Estate with id {} created.'.format(new_id)
}, 201
else:
return {
'status': 'fail',
'message': 'Not all fields were provided.',
'missing': result
}, 400
def patch(self, estate_id):
"""Edit an estate."""
pass
def delete(self, estate_id):
"""Delete an estate."""
pass
class EstatePaymentResource(Resource):
"""View functions for estate payments."""
def get(self, estate_id):
"""View an estate's payment details."""
result = get_estates(estate_id)
if isinstance(result, dict):
return result, 404
else:
payment = result.payment.view()
return {
'status': 'success',
'data': {'payment': payment}
}, 200
def delete(self, estate_id):
"""Clear an estate's payment history."""
result = get_estates(estate_id)
if isinstance(result, dict):
return result, 404
else:
result.delete()
return {
'status': 'success',
'message': 'The estate has been deleted.'
}, 200
| 30.155556 | 74 | 0.517686 | """Estate manipulation functionality."""
from flask import request
from flask_restful import Resource
from api.helpers.modelops import get_boards, get_estates
from api.helpers.validation import validate_json
from api.models import Estate
class EstateResource(Resource):
"""View functions for estates."""
def get(self, estate_id=None):
"""View an estate(s)."""
result = get_estates(estate_id)
if isinstance(result, dict):
return result, 404
elif isinstance(result, list):
return {
'status': 'success',
'data': {'estates': [estate.view() for estate in result]}
}, 200
else:
return {
'status': 'success',
'data': {'estate': result.view()}
}, 200
def post(self):
"""Create an estate."""
payload = request.get_json()
required = ['address', 'board_id']
result = validate_json(required, payload)
if isinstance(result, bool) is True:
board = get_boards(payload['board_id'])
if isinstance(board, dict):
return board, 404
else:
new_estate = Estate(
address=payload['address'])
new_id = new_estate.save()
board.insert('estates_owned', [Estate.get(id=new_id)])
return {
'status': 'success',
'message': 'Estate with id {} created.'.format(new_id)
}, 201
else:
return {
'status': 'fail',
'message': 'Not all fields were provided.',
'missing': result
}, 400
def patch(self, estate_id):
"""Edit an estate."""
pass
def delete(self, estate_id):
"""Delete an estate."""
pass
class EstatePaymentResource(Resource):
"""View functions for estate payments."""
def get(self, estate_id):
"""View an estate's payment details."""
result = get_estates(estate_id)
if isinstance(result, dict):
return result, 404
else:
payment = result.payment.view()
return {
'status': 'success',
'data': {'payment': payment}
}, 200
def delete(self, estate_id):
"""Clear an estate's payment history."""
result = get_estates(estate_id)
if isinstance(result, dict):
return result, 404
else:
result.delete()
return {
'status': 'success',
'message': 'The estate has been deleted.'
}, 200
| 0 | 0 | 0 |
fed235140b76762ddbbdc8dc73d0f0099b3df56a | 1,119 | py | Python | proto_1/ddq/topics/set_theory/topic.py | jadnohra/connect | 8eb21e6f122898094447bc3d5edb3053d5a2adf2 | [
"Unlicense"
] | null | null | null | proto_1/ddq/topics/set_theory/topic.py | jadnohra/connect | 8eb21e6f122898094447bc3d5edb3053d5a2adf2 | [
"Unlicense"
] | 6 | 2021-03-19T12:06:56.000Z | 2022-03-12T00:23:09.000Z | proto_1/ddq/topics/set_theory/topic.py | jadnohra/connect | 8eb21e6f122898094447bc3d5edb3053d5a2adf2 | [
"Unlicense"
] | null | null | null | from typing import List
from ddq.universe import Universe
from ddq.builder import Builder
from ddq.topic import Topic, Predicate, Constant, Axiom, Definition
from ddq.topic import Topic as BaseTopic
from .membership import Membership
from .empty_set import EmptySetConstant, EmptySetAxiom
from .non_membership import NonMembeshipDefinition
| 30.243243 | 67 | 0.710456 | from typing import List
from ddq.universe import Universe
from ddq.builder import Builder
from ddq.topic import Topic, Predicate, Constant, Axiom, Definition
from ddq.topic import Topic as BaseTopic
from .membership import Membership
from .empty_set import EmptySetConstant, EmptySetAxiom
from .non_membership import NonMembeshipDefinition
class SetTheoryTopic(BaseTopic):
def __init__(self):
build = Builder()
self._predicates = [Membership()]
self._constants = [build.put("empty", EmptySetConstant())]
self._axioms = [EmptySetAxiom(build.get("empty"))]
self._definitions = [NonMembeshipDefinition()]
def get_references(self) -> List[str]:
return ["Elements of Set Theory (Henle)"]
def get_predicates(self) -> List[Predicate]:
return self._predicates
def get_constants(self) -> List[Constant]:
return self._constants
def get_axioms(self) -> List[Axiom]:
return self._axioms
def get_definitions(self) -> List[Definition]:
return self._definitions
def topic() -> SetTheoryTopic:
return SetTheoryTopic()
| 559 | 11 | 207 |
ef257b6c367a828748bef558db92abca08e69c06 | 326 | py | Python | locustfile.py | vandmo/threlos-spring-example-1 | f95771c5936189b7b966c6baf42595e30fb45c75 | [
"Apache-2.0"
] | null | null | null | locustfile.py | vandmo/threlos-spring-example-1 | f95771c5936189b7b966c6baf42595e30fb45c75 | [
"Apache-2.0"
] | null | null | null | locustfile.py | vandmo/threlos-spring-example-1 | f95771c5936189b7b966c6baf42595e30fb45c75 | [
"Apache-2.0"
] | null | null | null | import random
from locust import HttpUser, task, between
| 19.176471 | 52 | 0.619632 | import random
from locust import HttpUser, task, between
def randomUser():
return random.choice([
('admin', 'verysecret'),
('normaluser', 'secret'),
])
class QuickstartUser(HttpUser):
wait_time = between(5, 9)
@task
def hello(self):
self.client.get("/hello", auth=randomUser())
| 145 | 77 | 46 |
5ae4dfd0e7e700b5e5ca748a5ebfbe8d81ff66ab | 1,192 | py | Python | ensemble.py | piotrmirowski/DependencyTreeRnn | 8211a4ff4d0708dab83079295f9b768ebaeae68a | [
"BSD-3-Clause"
] | 39 | 2015-07-07T06:17:06.000Z | 2021-05-02T23:14:41.000Z | ensemble.py | zjh-nudger/DependencyRNN | c48c25c153551f340de704edf35b8831eab6e7a2 | [
"BSD-3-Clause"
] | 3 | 2016-06-21T02:58:04.000Z | 2016-07-05T17:27:55.000Z | ensemble.py | zjh-nudger/DependencyRNN | c48c25c153551f340de704edf35b8831eab6e7a2 | [
"BSD-3-Clause"
] | 11 | 2015-07-07T17:43:30.000Z | 2019-09-18T03:39:52.000Z | # first arg gold, following ones files with scores to ensemble
import sys
goldFile = sys.argv[1]
answers = []
for line in open(goldFile).readlines():
answers.append(int(line.strip()))
print "loaded " + str(len(answers)) + " answers"
# an array with an array per model to be ensebled
individualSentencePredictions = []
for file in sys.argv[2:]:
sentencePredictions = []
for line in open(file).readlines():
sentencePredictions.append(float(line.strip()))
individualSentencePredictions.append(sentencePredictions)
# now for each answer
# take the scores for 5 sentence predictions
# add them
# pick the highest one and compare
correct = 0.0
indiCounter= 0
for answer in answers:
maxScore = float("-inf")
bestAnswer = None
for i in xrange(5):
scoreSum = 0.0
for preds in individualSentencePredictions:
scoreSum += preds[indiCounter]
#print scoreSum
if scoreSum > maxScore:
maxScore = scoreSum
bestAnswer = i
indiCounter += 1
#print bestAnswer
#print maxScore
if answer == bestAnswer:
correct += 1
print "accuracy: " + str(correct/len(answers))
| 23.84 | 62 | 0.661074 | # first arg gold, following ones files with scores to ensemble
import sys
goldFile = sys.argv[1]
answers = []
for line in open(goldFile).readlines():
answers.append(int(line.strip()))
print "loaded " + str(len(answers)) + " answers"
# an array with an array per model to be ensebled
individualSentencePredictions = []
for file in sys.argv[2:]:
sentencePredictions = []
for line in open(file).readlines():
sentencePredictions.append(float(line.strip()))
individualSentencePredictions.append(sentencePredictions)
# now for each answer
# take the scores for 5 sentence predictions
# add them
# pick the highest one and compare
correct = 0.0
indiCounter= 0
for answer in answers:
maxScore = float("-inf")
bestAnswer = None
for i in xrange(5):
scoreSum = 0.0
for preds in individualSentencePredictions:
scoreSum += preds[indiCounter]
#print scoreSum
if scoreSum > maxScore:
maxScore = scoreSum
bestAnswer = i
indiCounter += 1
#print bestAnswer
#print maxScore
if answer == bestAnswer:
correct += 1
print "accuracy: " + str(correct/len(answers))
| 0 | 0 | 0 |
444c86d26250a7f57c0ea4bb01d3e3286fcbf2ee | 54,114 | py | Python | animation/face_editor.py | jzboylxj/XDLibs | 76ab640502d7e254bc98930d6ebb9e870476ed9a | [
"MIT"
] | 1 | 2021-03-11T02:24:08.000Z | 2021-03-11T02:24:08.000Z | animation/face_editor.py | jzboylxj/XDLibs | 76ab640502d7e254bc98930d6ebb9e870476ed9a | [
"MIT"
] | null | null | null | animation/face_editor.py | jzboylxj/XDLibs | 76ab640502d7e254bc98930d6ebb9e870476ed9a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# @Time : 2020/9/7 9:39
# @Author : Li XiaoJun
# @Site :
# @File : face_editor.py
import json
import os
from imp import reload
from animation import common
from animation import test_node
from animation.helper import manager_version
from pymel import core as pm
reload(common)
reload(test_node)
def get_channel_values(jnt, pre=5):
"""
获取骨骼在通道里面的值
列取骨骼在通道栏里面的属性及当前的值,数值小数点后保留5位,
其中位移属性的值需要缩小100倍,也就是乘以0.01,
这是为了解决FBX文件在MAYA,U3D这两个软件内比例单位的差异化造成的错误
:param jnt: 目标骨骼的名称
:param pre: 小数点后面保留几位
:return
"""
jnt_value = [
round(pm.PyNode(jnt).translateX.get() * 0.01, pre),
round(pm.PyNode(jnt).translateY.get() * 0.01, pre),
round(pm.PyNode(jnt).translateZ.get() * 0.01, pre),
round(pm.PyNode(jnt).rotateX.get(), pre),
round(pm.PyNode(jnt).rotateY.get(), pre),
round(pm.PyNode(jnt).rotateZ.get(), pre),
round(pm.PyNode(jnt).scaleX.get(), pre),
round(pm.PyNode(jnt).scaleY.get(), pre),
round(pm.PyNode(jnt).scaleZ.get(), pre),
]
return jnt_value
class FaceEditor(common.Singleton):
"""
Json数据文件管理工具
"""
def show(self):
"""
显示工具窗口
:return: window
"""
if pm.window("faceEditorWnd", ex=True):
pm.deleteUI("faceEditorWnd")
pm.window(
"faceEditorWnd",
t=u"Face Editor %s" % manager_version,
mb=True,
cc=lambda *args: self._closed_window_cmd())
self.menu_list()
pm.formLayout("editor_main_layout")
pm.textFieldButtonGrp(
"config_file_widget",
label="Config",
bl="Setting",
cw3=[50, 100, 50],
adj=2,
p="editor_main_layout",
bc=lambda *args: self.action_json_folder())
self.widget_module_selector()
pm.frameLayout(
"controller_list_grp", label="Controller List:", w=180, h=180)
pm.textScrollList(
"controller_list_widget",
sc=lambda *args: self.action_selected_controller())
pm.popupMenu()
pm.menuItem(
label=u"创建新的控制器",
c=lambda *args: self.new_controller_wnd())
pm.menuItem(
label=u"删除选择控制器")
# pm.menuItem(divider=True)
# pm.menuItem(
# label=u"恢复初始状态")
pm.setParent("..")
# Controller basic info frame
pm.frameLayout(
"controller_info_grp",
label="Controller Basic info:", bgs=True, mh=6)
pm.textFieldGrp(
"controller_name_widget",
label=u"控制器名",
cw2=[60, 200])
pm.textFieldGrp(
"controller_group_widget",
label=u"控制器组",
cw2=[60, 200],
# tcc=lambda *args: self.action_controller_group_widget()
)
pm.textFieldButtonGrp(
"controller_bone_widget",
label=u"挂点位置",
cw3=[60, 200, 140],
bl=u"更新",
# bc=lambda *args: self.action_controller_bone_widget(
# method="button"),
# tcc=lambda *args: self.action_controller_bone_widget(
# method="text")
)
pm.floatFieldGrp(
"controller_offset_widget",
numberOfFields=3,
pre=3,
label=u'挂点偏移',
cw4=[60, 50, 50, 50],
# cc=lambda *args: self.action_controller_off_widget()
)
pm.checkBoxGrp(
"axis_group_widget",
label=u"Axis:",
labelArray3=['X', 'Y', 'Z'],
cw4=[60, 50, 50, 50],
# cc=lambda *args: self.action_change_axis_state(),
numberOfCheckBoxes=3)
pm.setParent("..")
separator1 = pm.separator(style="in", h=10)
pm.tabLayout("axis_setting_grp")
axis_x_tab = self.axis_attr_tab(attr="x")
axis_y_tab = self.axis_attr_tab(attr="y")
axis_z_tab = self.axis_attr_tab(attr="z")
pm.tabLayout(
"axis_setting_grp", e=True,
tl=[(axis_x_tab, 'XAxis'),
(axis_y_tab, 'YAxis'),
(axis_z_tab, 'ZAxis')])
pm.setParent("..")
pm.formLayout(
"editor_main_layout", edit=True,
attachForm=[
("config_file_widget", 'left', 2),
("config_file_widget", 'right', 2),
("config_file_widget", 'top', 5),
("module_selector_widget", 'left', 2),
("module_selector_widget", 'right', 2),
("controller_list_grp", 'left', 2),
("controller_info_grp", 'right', 2),
(separator1, 'left', 2),
(separator1, 'right', 2),
("axis_setting_grp", 'left', 2),
("axis_setting_grp", 'right', 2),
("axis_setting_grp", 'bottom', 5),
],
attachControl=[
("module_selector_widget", 'top', 5,
"config_file_widget"),
("controller_list_grp", 'top', 5,
"module_selector_widget"),
("controller_info_grp", 'top', 5,
"module_selector_widget"),
("controller_info_grp", 'left', 2,
"controller_list_grp"),
(separator1, 'top', 5, "controller_list_grp"),
("axis_setting_grp", 'top', 5, separator1),
])
pm.showWindow("faceEditorWnd")
def action_change_module(self):
"""
切换模块时调用的方法
:return:
"""
selected_module = pm.optionMenuGrp(
"module_selector_widget", q=True, value=True)
# print(selected_module)
self.current_module = self.face_data[selected_module]
# print(self.select_module)
pm.textScrollList("controller_list_widget", e=True, ra=True)
self.clean_controller_widget_data()
self.clean_axis_widget("x")
self.clean_axis_widget("y")
self.clean_axis_widget("z")
self.init_system()
return
def menu_list(self):
"""
工具菜单栏
:return:
"""
pm.menu(label=u"文件", tearOff=False)
pm.menuItem(
label=u"保存数据",
c=lambda *args: self.save_face_data())
pm.menuItem(divider=True)
pm.menuItem(
label=u"创建新模块",
c=lambda *args: self.new_module())
pm.menuItem(
label=u"创建控制器",
c=lambda *args: self.new_controller_wnd())
# pm.menu(label=u"设置", tearOff=False)
# pm.menuItem(
# label=u"设置Json存放目录",
# c=lambda *args: self.setting_json_folder())
# pm.menuItem(
# label=u"调试模式", cb=False)
pm.menu(label=u"测试", tearOff=False)
pm.menuItem(
label=u"创建测试用控制器",
c=lambda *args: self.new_test_controller())
return
def init_system(self):
"""
初始化,将配置信息填充到面板上
:return:
"""
pm.textFieldButtonGrp(
"config_file_widget", e=True, text=self.json_folder)
# 填充controller list
current_module = pm.optionMenuGrp(
"module_selector_widget", q=True, value=True)
self.current_module = self.face_data[current_module]
if len(self.current_module) > 0:
for controller in self.current_module:
pm.textScrollList(
"controller_list_widget",
e=True,
a=controller["ControllerName"])
# textScrollList这个份控件的下标默认为1,和python列表默认下标为0不同
pm.textScrollList("controller_list_widget", e=True, sii=1)
self.update_controller_widget_data(
controller_data=self.face_data[current_module][0])
# def generate_custom_data(self):
# dict_data = {}
# self.dict_data = list()
#
# for index in range(0, pm.scrollLayout(
# "controllerListLayout", q=True, nch=True)):
# dict_data = {}
# axis_control = {}
#
# dict_data["controllerName"] = pm.textFieldButtonGrp(
# "controllerNameWidget%s" % index, q=True, text=True)
# dict_data["ControllerGroupName"] = pm.textFieldButtonGrp(
# "controllerGrpNameWidget%s" % index, q=True, text=True)
# dict_data["ControllerBoneName"] = pm.textFieldButtonGrp(
# "controllerBoneNameWidget%s" % index, q=True, text=True)
# dict_data["ControllerPositionOffset"] = pm.floatFieldGrp(
# "controllerBoneOffsetWidget%s" % index, q=True, value=True)
#
# axis_control["XAxis"] = pm.textFieldGrp(
# "controller%sAxisX" % index, q=True, text=True)
# axis_control["YAxis"] = pm.textFieldGrp(
# "controller%sAxisY" % index, q=True, text=True)
# axis_control["ZAxis"] = pm.textFieldGrp(
# "controller%sAxisZ" % index, q=True, text=True)
#
# dict_data["AxisControl"] = axis_control
#
# self.dict_data.append(dict_data)
# def save_custom_data(self):
# """
# 保存自定义捏脸数据
#
# :return: None
# """
# menu_item_selected = pm.optionMenuGrp(
# "faceModuleOptionsWidget", q=True, value=True)
#
# self.generate_custom_data()
# control_file_path = "%s/%s/%sController.json" % (
# self.json_folder, menu_item_selected, menu_item_selected)
#
# controller_data = {}
# controller_data["%sController" % menu_item_selected] = self.dict_data
# with open(control_file_path, "w") as f:
# json.dump(controller_data, f, indent=4)
#
# self.generate_custom_detail_data()
# detail_file_path = "%s/%s/%sControlGroup.json" % (
# self.json_folder, menu_item_selected, menu_item_selected)
# with open(detail_file_path, "w") as f:
# json.dump(self.detail_data, f, indent=4)
#
# print(u"保存成功")
#
# return
def new_module(self):
"""
模块创建引导窗口
:return:
"""
if pm.window("moduleBuilderWnd", ex=True):
pm.deleteUI("moduleBuilderWnd")
pm.window("moduleBuilderWnd", title="Module Builder")
main_layout = pm.columnLayout(adj=1)
base_frame = pm.frameLayout(
label="Module Base", p=main_layout, mw=5, mh=5,
cll=False, cl=True)
module_selector = pm.optionMenuGrp(
label="Module:", p=base_frame, cw2=[48, 150])
pm.menuItem(label="eye")
pm.menuItem(label="brow")
pm.menuItem(label="nose")
pm.menuItem(label="mouth")
pm.menuItem(label="ear")
pm.menuItem(label="feature")
pm.button(
label="Build Module",
p=base_frame,
c=lambda *args: self.build_module(
module=pm.optionMenuGrp(module_selector, q=True, value=True)))
pm.showWindow("moduleBuilderWnd")
def clean_controller_widget_data(self):
"""
清空控制器面板部件内的数据
:return:
"""
# Controller Basic info frame
pm.textFieldGrp("controller_name_widget", e=True, text="")
pm.textFieldGrp("controller_group_widget", e=True, text="")
pm.textFieldButtonGrp("controller_bone_widget", e=True, text="")
pm.floatFieldGrp("controller_offset_widget", e=True,
value=[0, 0, 0, 0])
pm.checkBoxGrp("axis_group_widget", e=True, v1=False, v2=False,
v3=False)
# 清除Axis面板部件内的数据
pm.textScrollList("axis_x_joint_list", e=True, ra=True)
pm.textScrollList("axis_y_joint_list", e=True, ra=True)
pm.textScrollList("axis_z_joint_list", e=True, ra=True)
# 清除Axis面部骨骼运动范围的数据
self.clean_axis_widget("x")
self.clean_axis_widget("y")
self.clean_axis_widget("z")
return
def update_controller_widget_data(self, controller_data):
"""
为控制器面板部件填充数据
:param controller_data: 单个控制器的字典类型数据
:return:
"""
pm.textFieldGrp(
"controller_name_widget",
e=True, text=controller_data["ControllerName"])
pm.textFieldGrp(
"controller_group_widget", e=True,
text=controller_data["ControllerGroupName"])
pm.textFieldButtonGrp(
"controller_bone_widget", e=True,
text=controller_data["ControllerBoneName"])
pm.floatFieldGrp(
"controller_offset_widget",
e=True,
v1=controller_data["ControllerPositionOffset"][0] * 100,
v2=controller_data["ControllerPositionOffset"][1] * 100,
v3=controller_data["ControllerPositionOffset"][2] * 100)
if (controller_data["AxisControl"]["XAxis"]["GroupName"]) == "":
pm.checkBoxGrp(
"axis_group_widget", e=True, v1=False)
else:
pm.checkBoxGrp(
"axis_group_widget", e=True, v1=True)
if (controller_data["AxisControl"]["YAxis"]["GroupName"]) == "":
pm.checkBoxGrp(
"axis_group_widget", e=True, v2=False)
else:
pm.checkBoxGrp(
"axis_group_widget", e=True, v2=True)
if (controller_data["AxisControl"]["ZAxis"]["GroupName"]) == "":
pm.checkBoxGrp(
"axis_group_widget", e=True, v3=False)
else:
pm.checkBoxGrp(
"axis_group_widget", e=True, v3=True)
# 为Axis部分填充数据
axis_x_joints_grp = controller_data["AxisControl"]["XAxis"]
for axis_x_joint in axis_x_joints_grp["BoneRange"]:
pm.textScrollList(
"axis_x_joint_list", e=True, a=axis_x_joint["BoneName"])
if len(axis_x_joints_grp["BoneRange"]) > 0:
pm.textScrollList("axis_x_joint_list", e=True, sii=1)
# 为XAxis骨骼的控制范围填充数据
self.update_axis_widget("x", controller_data)
axis_y_joints_grp = controller_data["AxisControl"]["YAxis"]
for axis_y_joint in axis_y_joints_grp["BoneRange"]:
pm.textScrollList(
"axis_y_joint_list", e=True, a=axis_y_joint["BoneName"])
if len(axis_y_joints_grp["BoneRange"]) > 0:
pm.textScrollList("axis_y_joint_list", e=True, sii=1)
# 为YAxis骨骼的控制范围填充数据
self.update_axis_widget("y", controller_data)
axis_z_joints_grp = controller_data["AxisControl"]["ZAxis"]
for axis_z_joint in axis_z_joints_grp["BoneRange"]:
pm.textScrollList(
"axis_z_joint_list", e=True, a=axis_z_joint["BoneName"])
if len(axis_z_joints_grp["BoneRange"]) > 0:
pm.textScrollList("axis_z_joint_list", e=True, sii=1)
# 为ZAxis骨骼的控制范围填充数据
self.update_axis_widget("z", controller_data)
return
def action_selected_controller(self):
"""
controller list控件里面选择controller时调用的函数
:return:
"""
current_module = pm.optionMenuGrp(
"module_selector_widget", q=True, value=True)
selected_index = int(pm.textScrollList(
"controller_list_widget", q=True, sii=True)[0])
# 清除当前面板上面的数据
self.clean_controller_widget_data()
# 填充数据
controller_data = self.face_data[current_module][selected_index - 1]
self.update_controller_widget_data(controller_data=controller_data)
def new_test_controller(self):
"""
创建测试用控制器
"""
if not pm.objExists("TestGeoGrp"):
pm.createNode("transform", name="TestGeoGrp")
selected_controller = pm.textScrollList(
"controller_list_widget", q=True, si=True)[0]
print(selected_controller)
if pm.objExists(selected_controller):
pm.error("The {} was exists in scene".format(selected_controller))
else:
test_controller = pm.polySphere(
r=0.5, sx=20, sy=20, ax=[0, 1, 0], cuv=2, ch=1,
name=selected_controller)[0]
test_grp = pm.createNode("transform",
name="{}Grp".format(selected_controller))
pm.parent(test_controller, test_grp)
pm.parent(test_grp, "TestGeoGrp")
controller_bone_name = pm.textFieldButtonGrp(
"controller_bone_widget", q=True, text=True)
pm.parentConstraint(controller_bone_name, test_grp, mo=False)
controller_offset = pm.floatFieldGrp(
"controller_offset_widget", q=True, value=True)
common.lock_and_hide_attr(
test_controller, translate=False, vis=True)
test_controller.translate.set([
controller_offset[0],
controller_offset[1],
controller_offset[2]])
current_module = pm.optionMenuGrp(
"module_selector_widget", q=True, value=True)
current_controller = int(pm.textScrollList(
"controller_list_widget", q=True, sii=True)[0])
axis_data = self.face_data[current_module][
current_controller - 1]["AxisControl"]
# 为测试控制器添加测试属性
if pm.checkBoxGrp("axis_group_widget", q=True, v1=True):
pm.addAttr(test_controller,
ln="sliderX", at="double",
min=-1, max=1,
dv=0)
pm.setAttr("{}.sliderX".format(test_controller), e=True,
keyable=True)
driver_data_list = axis_data["XAxis"]["BoneRange"]
for driver_data in driver_data_list:
build_driven(
driver=test_controller,
axis_data=driver_data,
driver_attr="sliderX")
pm.floatSliderGrp("axis_x_test_widget", e=True, en=True)
pm.connectControl(
"axis_x_test_widget",
test_controller.attr("sliderX"))
if pm.checkBoxGrp("axis_group_widget", q=True, v2=True):
pm.addAttr(test_controller,
ln="sliderY", at="double",
min=-1, max=1,
dv=0)
pm.setAttr("{}.sliderY".format(test_controller), e=True,
keyable=True)
driver_data_list = axis_data["YAxis"]["BoneRange"]
# print(driver_data_list)
for driver_data in driver_data_list:
build_driven(
driver=test_controller,
axis_data=driver_data,
driver_attr="sliderY")
pm.floatSliderGrp("axis_y_test_widget", e=True, en=True)
pm.connectControl(
"axis_y_test_widget",
test_controller.attr("sliderY"))
if pm.checkBoxGrp("axis_group_widget", q=True, v3=True):
pm.addAttr(test_controller,
ln="sliderZ", at="double",
min=-1, max=1,
dv=0)
pm.setAttr("{}.sliderZ".format(test_controller), e=True,
keyable=True)
driver_data_list = axis_data["ZAxis"]["BoneRange"]
for driver_data in driver_data_list:
build_driven(
driver=test_controller,
axis_data=driver_data,
driver_attr="sliderZ")
pm.floatSliderGrp("axis_z_test_widget", e=True, en=True)
pm.connectControl(
"axis_z_test_widget",
test_controller.attr("sliderZ"))
# print(controller_offset)
print("Done!")
# pm.deleteUI("controllerBuilderWnd")
return
def axis_list_signal(self, attr="", method="",
update="", source="", target="select"):
"""
XAxis内的控件的信号
:return:
"""
current_module = pm.optionMenuGrp(
"module_selector_widget", q=True, value=True)
controller_index = int(pm.textScrollList(
"controller_list_widget", q=True, sii=True)[0])
axis_tab_list = pm.tabLayout("axis_setting_grp", q=True, tl=True)
axis_tab_index = pm.tabLayout("axis_setting_grp", q=True, sti=True)
axis_tab_label = axis_tab_list[axis_tab_index - 1]
if method == "delete":
if target == "select":
current_selected = int(pm.textScrollList(
"axis_{}_joint_list".format(attr),
q=True, sii=True)[0])
self.face_data[current_module][controller_index - 1][
"AxisControl"][axis_tab_label]["BoneRange"].pop(
current_selected - 1)
pm.textScrollList(
"axis_{}_joint_list".format(attr),
e=True, rii=current_selected)
elif target == "all":
self.face_data[current_module][controller_index - 1][
"AxisControl"][axis_tab_label]["BoneRange"] = []
pm.textScrollList(
"axis_{}_joint_list".format(attr), e=True, ra=True)
if method == "post":
for new_jnt in pm.ls(sl=True):
# new_jnt = pm.ls(sl=True)[0]
if new_jnt.controller_name() not in (pm.textScrollList(
"axis_{}_joint_list".format(attr), q=True, ai=True)):
new_jnt_default_value = get_channel_values(new_jnt.controller_name())
new_jnt_data = {
"BoneName": new_jnt.controller_name(),
"Max": [0, 0, 0, 0, 0, 0, 1, 1, 1],
"Min": [0, 0, 0, 0, 0, 0, 1, 1, 1],
"def": new_jnt_default_value,
}
self.face_data[current_module][controller_index - 1][
"AxisControl"][axis_tab_label]["BoneRange"].append(
new_jnt_data)
pm.textScrollList(
"axis_{}_joint_list".format(attr), e=True,
a=new_jnt.controller_name())
pm.textScrollList(
"axis_{}_joint_list".format(attr), e=True,
si=new_jnt.controller_name())
if method == "update":
update_joints_index = pm.textScrollList(
"axis_{}_joint_list".format(attr), q=True, sii=True)
all_controller = pm.textScrollList(
"axis_{}_joint_list".format(attr), q=True, ai=True)
for update_joint_index in update_joints_index:
current_selected_index = int(update_joint_index)
current_selected = all_controller[current_selected_index - 1]
default_jnt_value = self.face_data[current_module][
controller_index - 1]["AxisControl"][axis_tab_label][
"BoneRange"][current_selected_index - 1]["def"]
offset_value = None
if update == "Max":
if source == "scene":
current_jnt_value = get_channel_values(
current_selected)
offset_value = [
current_jnt_value[0] - default_jnt_value[0],
current_jnt_value[1] - default_jnt_value[1],
current_jnt_value[2] - default_jnt_value[2],
current_jnt_value[3] - default_jnt_value[3],
current_jnt_value[4] - default_jnt_value[4],
current_jnt_value[5] - default_jnt_value[5],
current_jnt_value[6],
current_jnt_value[7],
current_jnt_value[8],
]
self.face_data[current_module][
controller_index - 1]["AxisControl"][
axis_tab_label]["BoneRange"][
current_selected_index - 1][
"Max"] = offset_value
elif source == "panel":
name = "axis_{}_max_translate_field".format(attr)
offset_translate_value = pm.floatFieldGrp(
name, q=True, value=True)
name = "axis_{}_max_rotate_field".format(attr)
offset_rotate_value = pm.floatFieldGrp(
name, q=True, value=True)
name = "axis_{}_max_scale_field".format(attr)
offset_scale_value = pm.floatFieldGrp(
name, q=True, value=True)
offset_value = [
offset_translate_value[0] * 0.01,
offset_translate_value[1] * 0.01,
offset_translate_value[2] * 0.01,
offset_rotate_value[0],
offset_rotate_value[1],
offset_rotate_value[2],
offset_scale_value[0],
offset_scale_value[1],
offset_scale_value[2],
]
self.face_data[current_module][
controller_index - 1]["AxisControl"][
axis_tab_label][
"BoneRange"][current_selected_index - 1][
"Max"] = offset_value
if update == "Min":
if source == "scene":
current_jnt_value = get_channel_values(
current_selected)
offset_value = [
current_jnt_value[0] - default_jnt_value[0],
current_jnt_value[1] - default_jnt_value[1],
current_jnt_value[2] - default_jnt_value[2],
current_jnt_value[3] - default_jnt_value[3],
current_jnt_value[4] - default_jnt_value[4],
current_jnt_value[5] - default_jnt_value[5],
current_jnt_value[6],
current_jnt_value[7],
current_jnt_value[8],
]
elif source == "panel":
name = "axis_{}_min_translate_field".format(attr)
offset_translate_value = pm.floatFieldGrp(
name, q=True, value=True)
name = "axis_{}_min_rotate_field".format(attr)
offset_rotate_value = pm.floatFieldGrp(
name, q=True, value=True)
name = "axis_{}_min_scale_field".format(attr)
offset_scale_value = pm.floatFieldGrp(
name, q=True, value=True)
offset_value = [
offset_translate_value[0] * 0.01,
offset_translate_value[1] * 0.01,
offset_translate_value[2] * 0.01,
offset_rotate_value[0],
offset_rotate_value[1],
offset_rotate_value[2],
offset_scale_value[0],
offset_scale_value[1],
offset_scale_value[2],
]
self.face_data[current_module][
controller_index - 1]["AxisControl"][axis_tab_label][
"BoneRange"][current_selected_index - 1][
"Min"] = offset_value
if update == "Default":
current_jnt_value = get_channel_values(
current_selected)
self.face_data[current_module][
controller_index - 1]["AxisControl"][axis_tab_label][
"BoneRange"][current_selected_index - 1][
"def"] = current_jnt_value
common.write_json(dict_data=self.face_data, file_path=self.json_folder)
axis_x_data = self.face_data[current_module][controller_index - 1]
self.clean_axis_widget(attr)
self.update_axis_widget(attr=attr, data=axis_x_data)
return
| 38.460554 | 89 | 0.533023 | #!/usr/bin/env python
# coding: utf-8
# @Time : 2020/9/7 9:39
# @Author : Li XiaoJun
# @Site :
# @File : face_editor.py
import json
import os
from imp import reload
from animation import common
from animation import test_node
from animation.helper import manager_version
from pymel import core as pm
reload(common)
reload(test_node)
def get_channel_values(jnt, pre=5):
"""
获取骨骼在通道里面的值
列取骨骼在通道栏里面的属性及当前的值,数值小数点后保留5位,
其中位移属性的值需要缩小100倍,也就是乘以0.01,
这是为了解决FBX文件在MAYA,U3D这两个软件内比例单位的差异化造成的错误
:param jnt: 目标骨骼的名称
:param pre: 小数点后面保留几位
:return
"""
jnt_value = [
round(pm.PyNode(jnt).translateX.get() * 0.01, pre),
round(pm.PyNode(jnt).translateY.get() * 0.01, pre),
round(pm.PyNode(jnt).translateZ.get() * 0.01, pre),
round(pm.PyNode(jnt).rotateX.get(), pre),
round(pm.PyNode(jnt).rotateY.get(), pre),
round(pm.PyNode(jnt).rotateZ.get(), pre),
round(pm.PyNode(jnt).scaleX.get(), pre),
round(pm.PyNode(jnt).scaleY.get(), pre),
round(pm.PyNode(jnt).scaleZ.get(), pre),
]
return jnt_value
def build_driven(driver, axis_data, driver_attr):
joint_name = axis_data["BoneName"]
translate_dv = pm.PyNode(joint_name).translate.get()
rotate_dv = pm.PyNode(joint_name).rotate.get()
scale_dv = pm.PyNode(joint_name).scale.get()
attr_list = ["tx", "ty", "tz", "rx", "ry", "rz", "sx", "sy", "sz"]
for dv_attr in attr_list:
pm.PyNode(joint_name).translate.set(translate_dv)
pm.PyNode(joint_name).rotate.set(rotate_dv)
pm.PyNode(joint_name).scale.set(scale_dv)
# pm.setDrivenKeyframe(
# "{}.{}".format(joint_name, dv_attr),
# cd="{}.{}".format(driver, driver_attr),
# dv=1)
pm.setDrivenKeyframe(
"{}.{}".format(joint_name, dv_attr),
cd="{}.{}".format(driver, driver_attr),
dv=0)
# Max value
pm.PyNode(joint_name).translate.set([
axis_data["Max"][0] * 100 + translate_dv[0],
axis_data["Max"][1] * 100 + translate_dv[1],
axis_data["Max"][2] * 100 + translate_dv[2]])
pm.PyNode(joint_name).rotate.set([
axis_data["Max"][3] + rotate_dv[0],
axis_data["Max"][4] + rotate_dv[1],
axis_data["Max"][5] + rotate_dv[2]])
pm.PyNode(joint_name).scale.set([
axis_data["Max"][6],
axis_data["Max"][7],
axis_data["Max"][8]])
pm.setDrivenKeyframe(
"{}.{}".format(joint_name, dv_attr),
cd="{}.{}".format(driver, driver_attr),
dv=1)
# Min value
pm.PyNode(joint_name).translate.set([
axis_data["Min"][0] * 100 + translate_dv[0],
axis_data["Min"][1] * 100 + translate_dv[1],
axis_data["Min"][2] * 100 + translate_dv[2]])
pm.PyNode(joint_name).rotate.set([
axis_data["Min"][3] + rotate_dv[0],
axis_data["Min"][4] + rotate_dv[1],
axis_data["Min"][5] + rotate_dv[2]])
pm.PyNode(joint_name).scale.set([
axis_data["Min"][6],
axis_data["Min"][7],
axis_data["Min"][8]])
pm.setDrivenKeyframe(
"{}.{}".format(joint_name, dv_attr),
cd="{}.{}".format(driver, driver_attr),
dv=-1)
pm.setAttr("%s.%s" % (driver, driver_attr), 0)
return
class FaceEditor(common.Singleton):
"""
Json数据文件管理工具
"""
def __init__(self):
super(FaceEditor, self).__init__()
self.json_folder = ''
self.face_data = {}
# self.face_node = None
self.current_module = None
self.current_controller_index = None
self.pre_build_window()
self.show()
self.init_system()
def show(self):
"""
显示工具窗口
:return: window
"""
if pm.window("faceEditorWnd", ex=True):
pm.deleteUI("faceEditorWnd")
pm.window(
"faceEditorWnd",
t=u"Face Editor %s" % manager_version,
mb=True,
cc=lambda *args: self._closed_window_cmd())
self.menu_list()
pm.formLayout("editor_main_layout")
pm.textFieldButtonGrp(
"config_file_widget",
label="Config",
bl="Setting",
cw3=[50, 100, 50],
adj=2,
p="editor_main_layout",
bc=lambda *args: self.action_json_folder())
self.widget_module_selector()
pm.frameLayout(
"controller_list_grp", label="Controller List:", w=180, h=180)
pm.textScrollList(
"controller_list_widget",
sc=lambda *args: self.action_selected_controller())
pm.popupMenu()
pm.menuItem(
label=u"创建新的控制器",
c=lambda *args: self.new_controller_wnd())
pm.menuItem(
label=u"删除选择控制器")
# pm.menuItem(divider=True)
# pm.menuItem(
# label=u"恢复初始状态")
pm.setParent("..")
# Controller basic info frame
pm.frameLayout(
"controller_info_grp",
label="Controller Basic info:", bgs=True, mh=6)
pm.textFieldGrp(
"controller_name_widget",
label=u"控制器名",
cw2=[60, 200])
pm.textFieldGrp(
"controller_group_widget",
label=u"控制器组",
cw2=[60, 200],
# tcc=lambda *args: self.action_controller_group_widget()
)
pm.textFieldButtonGrp(
"controller_bone_widget",
label=u"挂点位置",
cw3=[60, 200, 140],
bl=u"更新",
# bc=lambda *args: self.action_controller_bone_widget(
# method="button"),
# tcc=lambda *args: self.action_controller_bone_widget(
# method="text")
)
pm.floatFieldGrp(
"controller_offset_widget",
numberOfFields=3,
pre=3,
label=u'挂点偏移',
cw4=[60, 50, 50, 50],
# cc=lambda *args: self.action_controller_off_widget()
)
pm.checkBoxGrp(
"axis_group_widget",
label=u"Axis:",
labelArray3=['X', 'Y', 'Z'],
cw4=[60, 50, 50, 50],
# cc=lambda *args: self.action_change_axis_state(),
numberOfCheckBoxes=3)
pm.setParent("..")
separator1 = pm.separator(style="in", h=10)
pm.tabLayout("axis_setting_grp")
axis_x_tab = self.axis_attr_tab(attr="x")
axis_y_tab = self.axis_attr_tab(attr="y")
axis_z_tab = self.axis_attr_tab(attr="z")
pm.tabLayout(
"axis_setting_grp", e=True,
tl=[(axis_x_tab, 'XAxis'),
(axis_y_tab, 'YAxis'),
(axis_z_tab, 'ZAxis')])
pm.setParent("..")
pm.formLayout(
"editor_main_layout", edit=True,
attachForm=[
("config_file_widget", 'left', 2),
("config_file_widget", 'right', 2),
("config_file_widget", 'top', 5),
("module_selector_widget", 'left', 2),
("module_selector_widget", 'right', 2),
("controller_list_grp", 'left', 2),
("controller_info_grp", 'right', 2),
(separator1, 'left', 2),
(separator1, 'right', 2),
("axis_setting_grp", 'left', 2),
("axis_setting_grp", 'right', 2),
("axis_setting_grp", 'bottom', 5),
],
attachControl=[
("module_selector_widget", 'top', 5,
"config_file_widget"),
("controller_list_grp", 'top', 5,
"module_selector_widget"),
("controller_info_grp", 'top', 5,
"module_selector_widget"),
("controller_info_grp", 'left', 2,
"controller_list_grp"),
(separator1, 'top', 5, "controller_list_grp"),
("axis_setting_grp", 'top', 5, separator1),
])
pm.showWindow("faceEditorWnd")
def widget_module_selector(self):
pm.optionMenuGrp(
"module_selector_widget",
label="Module",
adj=2,
cw2=[50, 100],
p="editor_main_layout",
cc=lambda *args: self.action_change_module())
if len(self.face_data.keys()) > 0:
for menu_item in self.face_data.keys():
pm.menuItem(label=menu_item)
def action_change_module(self):
"""
切换模块时调用的方法
:return:
"""
selected_module = pm.optionMenuGrp(
"module_selector_widget", q=True, value=True)
# print(selected_module)
self.current_module = self.face_data[selected_module]
# print(self.select_module)
pm.textScrollList("controller_list_widget", e=True, ra=True)
self.clean_controller_widget_data()
self.clean_axis_widget("x")
self.clean_axis_widget("y")
self.clean_axis_widget("z")
self.init_system()
return
def axis_attr_tab(self, attr="x"):
layout = pm.frameLayout(
"axis_{}_tab".format(attr),
p="axis_setting_grp",
label="{}Axis".format(attr.capitalize()),
lv=False)
pm.formLayout(
"axis_{}_form".format(attr),
p="axis_{}_tab".format(attr))
pm.textScrollList(
"axis_{}_joint_list".format(attr),
p="axis_{}_form".format(attr),
ams=True,
w=174,
sc=lambda *args: self.action_axis_list(
widget="axis_{}_joint_list".format(attr)))
pm.popupMenu()
pm.menuItem(
label=u"添加影响骨骼",
c=lambda *args: self.axis_list_signal(attr=attr, method="post"))
pm.menuItem(
label=u"移除选择骨骼",
c=lambda *args: self.axis_list_signal(
attr=attr, method="delete", target="select"))
pm.menuItem(
label=u"移除所有骨骼",
c=lambda *args: self.axis_list_signal(
attr=attr, method="delete", target="all"))
pm.menuItem(divider=True)
pm.menuItem(
label=u"更新初始范围数值", c=lambda *args: self.axis_list_signal(
attr=attr,
method="update",
update="Default",
source="scene",
))
pm.menuItem(
label=u"更新Max范围数值",
c=lambda *args: self.axis_list_signal(
attr=attr,
method="update",
update="Max",
source="scene",
))
pm.menuItem(
label=u"更新Min范围数值",
c=lambda *args: self.axis_list_signal(
attr=attr,
method="update",
update="Min",
source="scene",
))
pm.floatSliderGrp(
"axis_{}_test_widget".format(attr),
enable=False,
label=u"滑竿测试",
field=True,
minValue=-1.0,
maxValue=1.0,
fieldMinValue=-1.0,
fieldMaxValue=1.0,
pre=3,
adj=3,
value=0,
cw3=[60, 60, 100])
range_fields = ["max", "min"]
channel_fields = ["translate", "rotate", "scale"]
for range_field in range_fields:
pm.frameLayout(
"axis_{}_{}_range_widget".format(attr, range_field),
label=range_field.capitalize(),
p="axis_{}_form".format(attr),
cll=True, cl=False, mw=5, mh=5,
bgs=True)
for channel_field in channel_fields:
name = "axis_{}_{}_{}_field".format(attr, range_field,
channel_field)
pm.floatFieldGrp(
name,
# label=name,
label=channel_field.capitalize(),
nf=3, cw4=[50, 80, 80, 80],
numberOfFields=3,
pre=3,
p="axis_{}_{}_range_widget".format(attr, range_field))
pm.button(
"axis_{}_max_update_scene_btn".format(attr),
label="Update(Apply Scene)",
# label="axis_{}_max_update_scene_btn".format(attr),
w=140,
p="axis_{}_max_range_widget".format(attr),
c=lambda *args: self.axis_list_signal(
attr=attr,
method="update",
update="Max",
source="scene",
))
pm.button(
"axis_{}_max_update_panel_btn".format(attr),
label="Update(Apply Panel)",
# label="axis_{}_max_update_panel_btn".format(attr),
w=140,
p="axis_{}_max_range_widget".format(attr),
c=lambda *args: self.axis_list_signal(
attr=attr,
method="update",
update="Max",
source="panel",
))
pm.button(
"axis_{}_min_update_scene_btn".format(attr),
label="Update(Apply Scene)",
# label="axis_{}_min_update_scene_btn".format(attr),
w=140,
p="axis_{}_min_range_widget".format(attr),
c=lambda *args: self.axis_list_signal(
attr=attr,
method="update",
update="Min",
source="scene",
))
pm.button(
"axis_{}_min_update_panel_btn".format(attr),
label="Update(Apply Panel)",
# label="axis_{}_min_update_panel_btn".format(attr),
w=140,
p="axis_{}_min_range_widget".format(attr),
c=lambda *args: self.axis_list_signal(
attr=attr,
method="update",
update="Min",
source="panel",
))
pm.formLayout(
"axis_{}_form".format(attr),
e=True,
attachForm=[
("axis_{}_joint_list".format(attr), 'left', 2),
("axis_{}_joint_list".format(attr), 'top', 5),
("axis_{}_joint_list".format(attr), 'bottom', 5),
("axis_{}_test_widget".format(attr), 'top', 5),
("axis_{}_test_widget".format(attr), 'right', 7),
("axis_{}_max_range_widget".format(attr), 'right', 2),
("axis_{}_min_range_widget".format(attr), 'right', 2),
],
attachControl=[
("axis_{}_test_widget".format(attr), 'left', 5,
"axis_{}_joint_list".format(attr)),
("axis_{}_max_range_widget".format(attr), 'top', 5,
"axis_{}_test_widget".format(attr)),
("axis_{}_max_range_widget".format(attr), 'left', 5,
"axis_{}_joint_list".format(attr)),
("axis_{}_min_range_widget".format(attr), 'left', 5,
"axis_{}_joint_list".format(attr)),
("axis_{}_min_range_widget".format(attr), 'top', 5,
"axis_{}_max_range_widget".format(attr)),
])
return layout
def menu_list(self):
"""
工具菜单栏
:return:
"""
pm.menu(label=u"文件", tearOff=False)
pm.menuItem(
label=u"保存数据",
c=lambda *args: self.save_face_data())
pm.menuItem(divider=True)
pm.menuItem(
label=u"创建新模块",
c=lambda *args: self.new_module())
pm.menuItem(
label=u"创建控制器",
c=lambda *args: self.new_controller_wnd())
# pm.menu(label=u"设置", tearOff=False)
# pm.menuItem(
# label=u"设置Json存放目录",
# c=lambda *args: self.setting_json_folder())
# pm.menuItem(
# label=u"调试模式", cb=False)
pm.menu(label=u"测试", tearOff=False)
pm.menuItem(
label=u"创建测试用控制器",
c=lambda *args: self.new_test_controller())
return
def init_system(self):
"""
初始化,将配置信息填充到面板上
:return:
"""
pm.textFieldButtonGrp(
"config_file_widget", e=True, text=self.json_folder)
# 填充controller list
current_module = pm.optionMenuGrp(
"module_selector_widget", q=True, value=True)
self.current_module = self.face_data[current_module]
if len(self.current_module) > 0:
for controller in self.current_module:
pm.textScrollList(
"controller_list_widget",
e=True,
a=controller["ControllerName"])
# textScrollList这个份控件的下标默认为1,和python列表默认下标为0不同
pm.textScrollList("controller_list_widget", e=True, sii=1)
self.update_controller_widget_data(
controller_data=self.face_data[current_module][0])
def pre_build_window(self):
if pm.optionVar(q='faceEditorConfig'):
self.json_folder = pm.optionVar(q='faceEditorConfig')
self.face_data = common.read_json(self.json_folder)
# self.face_node = FaceData(common.read_json(self.json_folder))
def _closed_window_cmd(self):
pm.optionVar(sv=('faceEditorConfig', self.json_folder))
# pm.optionVar(sv=('arFileLocation', self.ar_file_location))
# self.current_tab_index = pm.tabLayout(
# self.main_tab, q=True, sti=True)
# print(self.current_tab_index)
# pm.optionVar(
# sv=('faceEditorMainTabIndex', self.current_tab_index))
# pm.optionVar(
# sv=('jsonManagerDiscardBrowFilePath', self.brow_file_folder))
# pm.optionVar(
# sv=('jsonManagerDiscardEyeFilePath', self.eye_file_folder))
# pm.optionVar(
# sv=('jsonManagerDiscardNoseFilePath', self.nose_file_folder))
# pm.optionVar(
# sv=('jsonManagerDiscardMouthFilePath', self.mouth_file_folder))
# pm.optionVar(
# sv=('jsonManagerDiscardFaceFilePath', self.face_file_folder))
def setting_json_folder(self):
json_folder = pm.fileDialog2(
dialogStyle=2,
fileFilter="JSON File (*.json);;",
fileMode=5, okc=u"应用")[0]
if json_folder:
self.json_folder = json_folder
if not (os.path.exists(self.json_folder)):
with open(self.json_folder, "w") as f:
data = {}
json.dump(data, f, indent=2)
else:
pass
return json_folder
def action_json_folder(self):
json_location = self.setting_json_folder()
pm.textFieldButtonGrp(
"config_file_widget", e=True, text=json_location)
return
# def generate_custom_data(self):
# dict_data = {}
# self.dict_data = list()
#
# for index in range(0, pm.scrollLayout(
# "controllerListLayout", q=True, nch=True)):
# dict_data = {}
# axis_control = {}
#
# dict_data["controllerName"] = pm.textFieldButtonGrp(
# "controllerNameWidget%s" % index, q=True, text=True)
# dict_data["ControllerGroupName"] = pm.textFieldButtonGrp(
# "controllerGrpNameWidget%s" % index, q=True, text=True)
# dict_data["ControllerBoneName"] = pm.textFieldButtonGrp(
# "controllerBoneNameWidget%s" % index, q=True, text=True)
# dict_data["ControllerPositionOffset"] = pm.floatFieldGrp(
# "controllerBoneOffsetWidget%s" % index, q=True, value=True)
#
# axis_control["XAxis"] = pm.textFieldGrp(
# "controller%sAxisX" % index, q=True, text=True)
# axis_control["YAxis"] = pm.textFieldGrp(
# "controller%sAxisY" % index, q=True, text=True)
# axis_control["ZAxis"] = pm.textFieldGrp(
# "controller%sAxisZ" % index, q=True, text=True)
#
# dict_data["AxisControl"] = axis_control
#
# self.dict_data.append(dict_data)
# def save_custom_data(self):
# """
# 保存自定义捏脸数据
#
# :return: None
# """
# menu_item_selected = pm.optionMenuGrp(
# "faceModuleOptionsWidget", q=True, value=True)
#
# self.generate_custom_data()
# control_file_path = "%s/%s/%sController.json" % (
# self.json_folder, menu_item_selected, menu_item_selected)
#
# controller_data = {}
# controller_data["%sController" % menu_item_selected] = self.dict_data
# with open(control_file_path, "w") as f:
# json.dump(controller_data, f, indent=4)
#
# self.generate_custom_detail_data()
# detail_file_path = "%s/%s/%sControlGroup.json" % (
# self.json_folder, menu_item_selected, menu_item_selected)
# with open(detail_file_path, "w") as f:
# json.dump(self.detail_data, f, indent=4)
#
# print(u"保存成功")
#
# return
def new_module(self):
"""
模块创建引导窗口
:return:
"""
if pm.window("moduleBuilderWnd", ex=True):
pm.deleteUI("moduleBuilderWnd")
pm.window("moduleBuilderWnd", title="Module Builder")
main_layout = pm.columnLayout(adj=1)
base_frame = pm.frameLayout(
label="Module Base", p=main_layout, mw=5, mh=5,
cll=False, cl=True)
module_selector = pm.optionMenuGrp(
label="Module:", p=base_frame, cw2=[48, 150])
pm.menuItem(label="eye")
pm.menuItem(label="brow")
pm.menuItem(label="nose")
pm.menuItem(label="mouth")
pm.menuItem(label="ear")
pm.menuItem(label="feature")
pm.button(
label="Build Module",
p=base_frame,
c=lambda *args: self.build_module(
module=pm.optionMenuGrp(module_selector, q=True, value=True)))
pm.showWindow("moduleBuilderWnd")
def build_module(self, module):
face_data = common.read_json(self.json_folder)
module_list = face_data.keys()
if module == "eye":
new_module = "{}Controller".format(module)
if new_module not in module_list:
self.build_eye_controller()
if module == "mouth":
new_module = "{}Controller".format(module)
if new_module not in module_list:
self.build_mouth_controller()
print(module_list)
if pm.window("moduleBuilderWnd", ex=True):
pm.deleteUI("moduleBuilderWnd")
def build_eye_controller(self, data=None):
if data is None:
data = []
face_data = common.read_json(self.json_folder)
print(face_data)
face_data["eyeController"] = data
print(face_data)
with open(self.json_folder, "w") as f:
json.dump(face_data, f, indent=4)
def build_mouth_controller(self, data=None):
if data is None:
data = []
face_data = common.read_json(self.json_folder)
print(face_data)
face_data["mouthController"] = data
print(face_data)
with open(self.json_folder, "w") as f:
json.dump(face_data, f, indent=4)
def new_controller_wnd(self):
if pm.window("controllerBuilderWnd", ex=True):
pm.deleteUI("controllerBuilderWnd")
pm.window("controllerBuilderWnd", title="Controller Builder")
pm.columnLayout(adj=1)
controller_info_grp = pm.frameLayout(
label="Controller Basic info:", bgs=True, mh=6, mw=6)
module_parent = pm.optionMenuGrp(
label="Module", adj=2,
cw2=[60, 100])
if len(self.face_data.keys()) > 0:
for menu_item in self.face_data.keys():
pm.menuItem(label=menu_item)
controller_name_widget = pm.textFieldGrp(
label=u"控制器名",
cw2=[60, 240])
controller_group_widget = pm.textFieldGrp(
label=u"控制器组",
cw2=[60, 240])
controller_bone_widget = pm.textFieldGrp(
label=u"挂点位置",
cw2=[60, 240])
controller_offset_widget = pm.floatFieldGrp(
numberOfFields=3,
pre=3,
label=u'挂点偏移',
cw4=[60, 50, 50, 50])
axis_group_widget = pm.checkBoxGrp(
label=u"Axis:",
labelArray3=['X', 'Y', 'Z'],
cw4=[60, 50, 50, 50],
numberOfCheckBoxes=3)
pm.button(
label="Add New Controller",
p=controller_info_grp,
c=lambda *args: add_controller())
pm.setParent("..")
pm.showWindow("controllerBuilderWnd")
menu_items_selected = pm.optionMenuGrp(
"module_selector_widget", q=True, sl=True)
pm.optionMenuGrp(module_parent, e=True, sl=menu_items_selected)
# current_tab = pm.tabLayout("module_tabs", q=True, st=True)
# pm.textFieldGrp(module_parent, e=True, text=current_tab)
def add_controller():
module_selected = pm.optionMenuGrp(
module_parent, q=True, value=True)
controller_name = pm.textFieldGrp(
controller_name_widget, q=True, text=True)
controller_group = pm.textFieldGrp(
controller_group_widget, q=True, text=True)
controller_bone = pm.textFieldGrp(controller_bone_widget, q=True,
text=True)
controller_offset = pm.floatFieldGrp(controller_offset_widget,
q=True, value=True)
axis_group = pm.checkBoxGrp(axis_group_widget, q=True, va3=True)
x_axis = ""
if axis_group[0]:
x_axis = "{}_X".format(controller_name)
y_axis = ""
if axis_group[1]:
y_axis = "{}_Y".format(controller_name)
z_axis = ""
if axis_group[2]:
z_axis = "{}_Z".format(controller_name)
new_controller = dict()
new_controller["ControllerName"] = controller_name
new_controller["ControllerGroupName"] = controller_group
new_controller["ControllerBoneName"] = controller_bone
new_controller["ControllerPositionOffset"] = [
controller_offset[0] * 0.01,
controller_offset[1] * 0.01,
controller_offset[2] * 0.01,
]
new_controller["AxisControl"] = {
"ZAxis": {
"BoneRange": [],
"GroupName": z_axis
},
"XAxis": {
"BoneRange": [],
"GroupName": x_axis
},
"YAxis": {
"BoneRange": [],
"GroupName": y_axis
}
}
self.face_data[module_selected].append(new_controller)
common.write_json(dict_data=self.face_data,
file_path=self.json_folder)
pm.deleteUI("controllerBuilderWnd")
pm.textScrollList(
"controller_list_widget", e=True, a=controller_name)
return
def clean_controller_widget_data(self):
"""
清空控制器面板部件内的数据
:return:
"""
# Controller Basic info frame
pm.textFieldGrp("controller_name_widget", e=True, text="")
pm.textFieldGrp("controller_group_widget", e=True, text="")
pm.textFieldButtonGrp("controller_bone_widget", e=True, text="")
pm.floatFieldGrp("controller_offset_widget", e=True,
value=[0, 0, 0, 0])
pm.checkBoxGrp("axis_group_widget", e=True, v1=False, v2=False,
v3=False)
# 清除Axis面板部件内的数据
pm.textScrollList("axis_x_joint_list", e=True, ra=True)
pm.textScrollList("axis_y_joint_list", e=True, ra=True)
pm.textScrollList("axis_z_joint_list", e=True, ra=True)
# 清除Axis面部骨骼运动范围的数据
self.clean_axis_widget("x")
self.clean_axis_widget("y")
self.clean_axis_widget("z")
return
def clean_axis_widget(self, attr):
pm.floatSliderGrp(
"axis_{}_test_widget".format(attr),
e=True, en=False)
pm.floatFieldGrp(
"axis_{}_max_translate_field".format(attr),
e=True, v1=0, v2=0, v3=0)
pm.floatFieldGrp(
"axis_{}_max_rotate_field".format(attr),
e=True, v1=0, v2=0, v3=0)
pm.floatFieldGrp(
"axis_{}_max_scale_field".format(attr),
e=True, v1=1, v2=1, v3=1)
pm.floatFieldGrp(
"axis_{}_min_translate_field".format(attr),
e=True, v1=0, v2=0, v3=0)
pm.floatFieldGrp(
"axis_{}_min_rotate_field".format(attr),
e=True, v1=0, v2=0, v3=0)
pm.floatFieldGrp(
"axis_{}_min_scale_field".format(attr),
e=True, v1=1, v2=1, v3=1)
return
def update_controller_widget_data(self, controller_data):
"""
为控制器面板部件填充数据
:param controller_data: 单个控制器的字典类型数据
:return:
"""
pm.textFieldGrp(
"controller_name_widget",
e=True, text=controller_data["ControllerName"])
pm.textFieldGrp(
"controller_group_widget", e=True,
text=controller_data["ControllerGroupName"])
pm.textFieldButtonGrp(
"controller_bone_widget", e=True,
text=controller_data["ControllerBoneName"])
pm.floatFieldGrp(
"controller_offset_widget",
e=True,
v1=controller_data["ControllerPositionOffset"][0] * 100,
v2=controller_data["ControllerPositionOffset"][1] * 100,
v3=controller_data["ControllerPositionOffset"][2] * 100)
if (controller_data["AxisControl"]["XAxis"]["GroupName"]) == "":
pm.checkBoxGrp(
"axis_group_widget", e=True, v1=False)
else:
pm.checkBoxGrp(
"axis_group_widget", e=True, v1=True)
if (controller_data["AxisControl"]["YAxis"]["GroupName"]) == "":
pm.checkBoxGrp(
"axis_group_widget", e=True, v2=False)
else:
pm.checkBoxGrp(
"axis_group_widget", e=True, v2=True)
if (controller_data["AxisControl"]["ZAxis"]["GroupName"]) == "":
pm.checkBoxGrp(
"axis_group_widget", e=True, v3=False)
else:
pm.checkBoxGrp(
"axis_group_widget", e=True, v3=True)
# 为Axis部分填充数据
axis_x_joints_grp = controller_data["AxisControl"]["XAxis"]
for axis_x_joint in axis_x_joints_grp["BoneRange"]:
pm.textScrollList(
"axis_x_joint_list", e=True, a=axis_x_joint["BoneName"])
if len(axis_x_joints_grp["BoneRange"]) > 0:
pm.textScrollList("axis_x_joint_list", e=True, sii=1)
# 为XAxis骨骼的控制范围填充数据
self.update_axis_widget("x", controller_data)
axis_y_joints_grp = controller_data["AxisControl"]["YAxis"]
for axis_y_joint in axis_y_joints_grp["BoneRange"]:
pm.textScrollList(
"axis_y_joint_list", e=True, a=axis_y_joint["BoneName"])
if len(axis_y_joints_grp["BoneRange"]) > 0:
pm.textScrollList("axis_y_joint_list", e=True, sii=1)
# 为YAxis骨骼的控制范围填充数据
self.update_axis_widget("y", controller_data)
axis_z_joints_grp = controller_data["AxisControl"]["ZAxis"]
for axis_z_joint in axis_z_joints_grp["BoneRange"]:
pm.textScrollList(
"axis_z_joint_list", e=True, a=axis_z_joint["BoneName"])
if len(axis_z_joints_grp["BoneRange"]) > 0:
pm.textScrollList("axis_z_joint_list", e=True, sii=1)
# 为ZAxis骨骼的控制范围填充数据
self.update_axis_widget("z", controller_data)
return
def update_axis_widget(self, attr, data):
if pm.textScrollList(
"axis_{}_joint_list".format(attr), q=True, ni=True) > 0:
axis_index = int(pm.textScrollList(
"axis_{}_joint_list".format(attr), q=True, sii=True)[0])
axis_attr_data = data[
"AxisControl"]["{}Axis".format(attr.capitalize())][
"BoneRange"][axis_index - 1]
axis_max_value = axis_attr_data["Max"]
# print("axis_a_max: {}".format(axis_a_max))
pm.floatFieldGrp(
"axis_{}_max_translate_field".format(attr),
e=True,
v1=axis_max_value[0] * 100,
v2=axis_max_value[1] * 100,
v3=axis_max_value[2] * 100)
pm.floatFieldGrp(
"axis_{}_max_rotate_field".format(attr),
e=True,
v1=axis_max_value[3],
v2=axis_max_value[4],
v3=axis_max_value[5])
pm.floatFieldGrp(
"axis_{}_max_scale_field".format(attr),
e=True,
v1=axis_max_value[6],
v2=axis_max_value[7],
v3=axis_max_value[8])
axis_min_value = axis_attr_data["Min"]
pm.floatFieldGrp(
"axis_{}_min_translate_field".format(attr),
e=True,
v1=axis_min_value[0] * 100,
v2=axis_min_value[1] * 100,
v3=axis_min_value[2] * 100)
pm.floatFieldGrp(
"axis_{}_min_rotate_field".format(attr),
e=True,
v1=axis_min_value[3], v2=axis_min_value[4],
v3=axis_min_value[5])
pm.floatFieldGrp(
"axis_{}_min_scale_field".format(attr),
e=True,
v1=axis_min_value[6], v2=axis_min_value[7],
v3=axis_min_value[8])
if pm.objExists(data["ControllerName"]):
pm.floatSliderGrp(
"axis_{}_test_widget".format(attr), e=True, en=True)
pm.connectControl(
"axis_{}_test_widget".format(attr),
pm.PyNode(data["ControllerName"]).attr(
"slider{}".format(attr.capitalize())))
return
def action_selected_controller(self):
"""
controller list控件里面选择controller时调用的函数
:return:
"""
current_module = pm.optionMenuGrp(
"module_selector_widget", q=True, value=True)
selected_index = int(pm.textScrollList(
"controller_list_widget", q=True, sii=True)[0])
# 清除当前面板上面的数据
self.clean_controller_widget_data()
# 填充数据
controller_data = self.face_data[current_module][selected_index - 1]
self.update_controller_widget_data(controller_data=controller_data)
def new_test_controller(self):
"""
创建测试用控制器
"""
if not pm.objExists("TestGeoGrp"):
pm.createNode("transform", name="TestGeoGrp")
selected_controller = pm.textScrollList(
"controller_list_widget", q=True, si=True)[0]
print(selected_controller)
if pm.objExists(selected_controller):
pm.error("The {} was exists in scene".format(selected_controller))
else:
test_controller = pm.polySphere(
r=0.5, sx=20, sy=20, ax=[0, 1, 0], cuv=2, ch=1,
name=selected_controller)[0]
test_grp = pm.createNode("transform",
name="{}Grp".format(selected_controller))
pm.parent(test_controller, test_grp)
pm.parent(test_grp, "TestGeoGrp")
controller_bone_name = pm.textFieldButtonGrp(
"controller_bone_widget", q=True, text=True)
pm.parentConstraint(controller_bone_name, test_grp, mo=False)
controller_offset = pm.floatFieldGrp(
"controller_offset_widget", q=True, value=True)
common.lock_and_hide_attr(
test_controller, translate=False, vis=True)
test_controller.translate.set([
controller_offset[0],
controller_offset[1],
controller_offset[2]])
current_module = pm.optionMenuGrp(
"module_selector_widget", q=True, value=True)
current_controller = int(pm.textScrollList(
"controller_list_widget", q=True, sii=True)[0])
axis_data = self.face_data[current_module][
current_controller - 1]["AxisControl"]
# 为测试控制器添加测试属性
if pm.checkBoxGrp("axis_group_widget", q=True, v1=True):
pm.addAttr(test_controller,
ln="sliderX", at="double",
min=-1, max=1,
dv=0)
pm.setAttr("{}.sliderX".format(test_controller), e=True,
keyable=True)
driver_data_list = axis_data["XAxis"]["BoneRange"]
for driver_data in driver_data_list:
build_driven(
driver=test_controller,
axis_data=driver_data,
driver_attr="sliderX")
pm.floatSliderGrp("axis_x_test_widget", e=True, en=True)
pm.connectControl(
"axis_x_test_widget",
test_controller.attr("sliderX"))
if pm.checkBoxGrp("axis_group_widget", q=True, v2=True):
pm.addAttr(test_controller,
ln="sliderY", at="double",
min=-1, max=1,
dv=0)
pm.setAttr("{}.sliderY".format(test_controller), e=True,
keyable=True)
driver_data_list = axis_data["YAxis"]["BoneRange"]
# print(driver_data_list)
for driver_data in driver_data_list:
build_driven(
driver=test_controller,
axis_data=driver_data,
driver_attr="sliderY")
pm.floatSliderGrp("axis_y_test_widget", e=True, en=True)
pm.connectControl(
"axis_y_test_widget",
test_controller.attr("sliderY"))
if pm.checkBoxGrp("axis_group_widget", q=True, v3=True):
pm.addAttr(test_controller,
ln="sliderZ", at="double",
min=-1, max=1,
dv=0)
pm.setAttr("{}.sliderZ".format(test_controller), e=True,
keyable=True)
driver_data_list = axis_data["ZAxis"]["BoneRange"]
for driver_data in driver_data_list:
build_driven(
driver=test_controller,
axis_data=driver_data,
driver_attr="sliderZ")
pm.floatSliderGrp("axis_z_test_widget", e=True, en=True)
pm.connectControl(
"axis_z_test_widget",
test_controller.attr("sliderZ"))
# print(controller_offset)
print("Done!")
# pm.deleteUI("controllerBuilderWnd")
return
def update_test_controller(self):
pass
def action_axis_list(self, widget):
current_module = pm.optionMenuGrp(
"module_selector_widget", q=True, value=True)
controller_index = int(pm.textScrollList(
"controller_list_widget", q=True, sii=True)[0])
controller_data = self.face_data[current_module][controller_index - 1]
current_axis_selected = pm.textScrollList(widget, q=True, si=True)
pm.select(current_axis_selected)
# print(current_axis_selected)
axis_tab_list = pm.tabLayout("axis_setting_grp", q=True, tl=True)
axis_tab_index = pm.tabLayout("axis_setting_grp", q=True, sti=True)
tab_label = axis_tab_list[axis_tab_index - 1]
if tab_label == "XAxis":
self.update_axis_widget(attr="x", data=controller_data)
if tab_label == "YAxis":
self.update_axis_widget(attr="y", data=controller_data)
if tab_label == "ZAxis":
self.update_axis_widget(attr="z", data=controller_data)
def axis_list_signal(self, attr="", method="",
update="", source="", target="select"):
"""
XAxis内的控件的信号
:return:
"""
current_module = pm.optionMenuGrp(
"module_selector_widget", q=True, value=True)
controller_index = int(pm.textScrollList(
"controller_list_widget", q=True, sii=True)[0])
axis_tab_list = pm.tabLayout("axis_setting_grp", q=True, tl=True)
axis_tab_index = pm.tabLayout("axis_setting_grp", q=True, sti=True)
axis_tab_label = axis_tab_list[axis_tab_index - 1]
if method == "delete":
if target == "select":
current_selected = int(pm.textScrollList(
"axis_{}_joint_list".format(attr),
q=True, sii=True)[0])
self.face_data[current_module][controller_index - 1][
"AxisControl"][axis_tab_label]["BoneRange"].pop(
current_selected - 1)
pm.textScrollList(
"axis_{}_joint_list".format(attr),
e=True, rii=current_selected)
elif target == "all":
self.face_data[current_module][controller_index - 1][
"AxisControl"][axis_tab_label]["BoneRange"] = []
pm.textScrollList(
"axis_{}_joint_list".format(attr), e=True, ra=True)
if method == "post":
for new_jnt in pm.ls(sl=True):
# new_jnt = pm.ls(sl=True)[0]
if new_jnt.controller_name() not in (pm.textScrollList(
"axis_{}_joint_list".format(attr), q=True, ai=True)):
new_jnt_default_value = get_channel_values(new_jnt.controller_name())
new_jnt_data = {
"BoneName": new_jnt.controller_name(),
"Max": [0, 0, 0, 0, 0, 0, 1, 1, 1],
"Min": [0, 0, 0, 0, 0, 0, 1, 1, 1],
"def": new_jnt_default_value,
}
self.face_data[current_module][controller_index - 1][
"AxisControl"][axis_tab_label]["BoneRange"].append(
new_jnt_data)
pm.textScrollList(
"axis_{}_joint_list".format(attr), e=True,
a=new_jnt.controller_name())
pm.textScrollList(
"axis_{}_joint_list".format(attr), e=True,
si=new_jnt.controller_name())
if method == "update":
update_joints_index = pm.textScrollList(
"axis_{}_joint_list".format(attr), q=True, sii=True)
all_controller = pm.textScrollList(
"axis_{}_joint_list".format(attr), q=True, ai=True)
for update_joint_index in update_joints_index:
current_selected_index = int(update_joint_index)
current_selected = all_controller[current_selected_index - 1]
default_jnt_value = self.face_data[current_module][
controller_index - 1]["AxisControl"][axis_tab_label][
"BoneRange"][current_selected_index - 1]["def"]
offset_value = None
if update == "Max":
if source == "scene":
current_jnt_value = get_channel_values(
current_selected)
offset_value = [
current_jnt_value[0] - default_jnt_value[0],
current_jnt_value[1] - default_jnt_value[1],
current_jnt_value[2] - default_jnt_value[2],
current_jnt_value[3] - default_jnt_value[3],
current_jnt_value[4] - default_jnt_value[4],
current_jnt_value[5] - default_jnt_value[5],
current_jnt_value[6],
current_jnt_value[7],
current_jnt_value[8],
]
self.face_data[current_module][
controller_index - 1]["AxisControl"][
axis_tab_label]["BoneRange"][
current_selected_index - 1][
"Max"] = offset_value
elif source == "panel":
name = "axis_{}_max_translate_field".format(attr)
offset_translate_value = pm.floatFieldGrp(
name, q=True, value=True)
name = "axis_{}_max_rotate_field".format(attr)
offset_rotate_value = pm.floatFieldGrp(
name, q=True, value=True)
name = "axis_{}_max_scale_field".format(attr)
offset_scale_value = pm.floatFieldGrp(
name, q=True, value=True)
offset_value = [
offset_translate_value[0] * 0.01,
offset_translate_value[1] * 0.01,
offset_translate_value[2] * 0.01,
offset_rotate_value[0],
offset_rotate_value[1],
offset_rotate_value[2],
offset_scale_value[0],
offset_scale_value[1],
offset_scale_value[2],
]
self.face_data[current_module][
controller_index - 1]["AxisControl"][
axis_tab_label][
"BoneRange"][current_selected_index - 1][
"Max"] = offset_value
if update == "Min":
if source == "scene":
current_jnt_value = get_channel_values(
current_selected)
offset_value = [
current_jnt_value[0] - default_jnt_value[0],
current_jnt_value[1] - default_jnt_value[1],
current_jnt_value[2] - default_jnt_value[2],
current_jnt_value[3] - default_jnt_value[3],
current_jnt_value[4] - default_jnt_value[4],
current_jnt_value[5] - default_jnt_value[5],
current_jnt_value[6],
current_jnt_value[7],
current_jnt_value[8],
]
elif source == "panel":
name = "axis_{}_min_translate_field".format(attr)
offset_translate_value = pm.floatFieldGrp(
name, q=True, value=True)
name = "axis_{}_min_rotate_field".format(attr)
offset_rotate_value = pm.floatFieldGrp(
name, q=True, value=True)
name = "axis_{}_min_scale_field".format(attr)
offset_scale_value = pm.floatFieldGrp(
name, q=True, value=True)
offset_value = [
offset_translate_value[0] * 0.01,
offset_translate_value[1] * 0.01,
offset_translate_value[2] * 0.01,
offset_rotate_value[0],
offset_rotate_value[1],
offset_rotate_value[2],
offset_scale_value[0],
offset_scale_value[1],
offset_scale_value[2],
]
self.face_data[current_module][
controller_index - 1]["AxisControl"][axis_tab_label][
"BoneRange"][current_selected_index - 1][
"Min"] = offset_value
if update == "Default":
current_jnt_value = get_channel_values(
current_selected)
self.face_data[current_module][
controller_index - 1]["AxisControl"][axis_tab_label][
"BoneRange"][current_selected_index - 1][
"def"] = current_jnt_value
common.write_json(dict_data=self.face_data, file_path=self.json_folder)
axis_x_data = self.face_data[current_module][controller_index - 1]
self.clean_axis_widget(attr)
self.update_axis_widget(attr=attr, data=axis_x_data)
return
def action_change_axis_state(self):
current_module = pm.optionMenuGrp(
"module_selector_widget", q=True, value=True)
controller_index = int(pm.textScrollList(
"controller_list_widget", q=True, sii=True)[0])
axis_control = self.face_data[current_module][controller_index - 1][
"AxisControl"]
state_array = pm.checkBoxGrp("axis_group_widget", q=True, va3=True)
if state_array[0]:
axis_control["XAxis"]["GroupName"] = "{}_X".format(
self.face_data[current_module][controller_index - 1][
"ControllerName"])
else:
axis_control["XAxis"]["GroupName"] = ""
if state_array[1]:
axis_control["YAxis"]["GroupName"] = "{}_Y".format(
self.face_data[current_module][controller_index - 1][
"ControllerName"])
else:
axis_control["YAxis"]["GroupName"] = ""
if state_array[2]:
axis_control["ZAxis"]["GroupName"] = "{}_Z".format(
self.face_data[current_module][controller_index - 1][
"ControllerName"])
else:
axis_control["ZAxis"]["GroupName"] = ""
# common.write_json(dict_data=self.face_data, file_path=self.json_folder)
return
def action_controller_group_widget(self):
current_module = pm.optionMenuGrp(
"module_selector_widget", q=True, value=True)
controller_index = int(pm.textScrollList(
"controller_list_widget", q=True, sii=True)[0])
text = pm.textFieldGrp("controller_group_widget", q=True, text=True)
self.face_data[current_module][controller_index - 1][
"ControllerGroupName"] = text
# common.write_json(dict_data=self.face_data, file_path=self.json_folder)
return
def action_controller_bone_widget(self, method):
current_module = pm.optionMenuGrp(
"module_selector_widget", q=True, value=True)
controller_index = int(pm.textScrollList(
"controller_list_widget", q=True, sii=True)[0])
if method == "text":
value = pm.textFieldButtonGrp(
"controller_bone_widget", q=True, text=True)
self.face_data[current_module][controller_index - 1][
"ControllerBoneName"] = value
if method == "button":
value = pm.ls(sl=True)[0].controller_name()
print(value)
pm.textFieldButtonGrp("controller_bone_widget", e=True, text=value)
self.face_data[current_module][controller_index - 1][
"ControllerBoneName"] = value
# common.write_json(dict_data=self.face_data, file_path=self.json_folder)
return
def action_controller_off_widget(self):
pass
def save_face_data(self):
# common.write_json(
# dict_data=self.face_data,
# file_path=self.json_folder)
print(self.face_data)
return
class FaceData:
def __init__(self, data):
self.eye = None
self.brow = None
self.nose = None
self.mouth = None
self.face = None
self.init_from_data(data)
def init_from_data(self, data):
if "eyeController" in data.keys():
self.eye = Controller(data=data["eyeController"])
if "mouthController" in data.keys():
self.mouth = Controller(data=data["mouthController"])
if "browController" in data.keys():
self.brow = Controller(data=data["browController"])
if "noseController" in data.keys():
self.mouth = Controller(data=data["noseController"])
if "faceController" in data.keys():
self.face = Controller(data=data["faceController"])
class Controller:
def __init__(self, data):
self.controllerName = ""
self.controllerGroupName = ""
self.controllerBoneName = ""
self.controllerPositionOffset = [0, 0, 0]
self.axisControl = {}
self.init_from_dict(dict_data=data)
def init_from_dict(self, dict_data=None):
if dict_data is None:
dict_data = {}
self.controllerName = dict_data["ControllerName"]
self.controllerGroupName = dict_data["ControllerGroupName"]
self.controllerBoneName = dict_data["ControllerBoneName"]
self.controllerPositionOffset = dict_data["ControllerPositionOffset"]
self.axisControl = dict_data["AxisControl"]
return
| 24,588 | -10 | 715 |
718fa4fe50d6852876511690c70b17f636bd0209 | 10,738 | py | Python | gibson2/utils/assets_utils.py | dnandha/iGibson | bbd8c294aad1ddffce868244a474dd40c2976590 | [
"MIT"
] | 3 | 2021-02-07T14:00:38.000Z | 2021-07-03T19:40:59.000Z | gibson2/utils/assets_utils.py | dnandha/iGibson | bbd8c294aad1ddffce868244a474dd40c2976590 | [
"MIT"
] | null | null | null | gibson2/utils/assets_utils.py | dnandha/iGibson | bbd8c294aad1ddffce868244a474dd40c2976590 | [
"MIT"
] | 2 | 2021-04-01T12:14:59.000Z | 2021-06-18T13:04:39.000Z | import gibson2
import os
import argparse
import random
import subprocess
import json
from collections import defaultdict
import yaml
def get_ig_category_ids():
"""
Get iGibson object categories
:return: file path to the scene name
"""
ig_dataset_path = gibson2.ig_dataset_path
ig_categories_files = os.path.join(
ig_dataset_path, 'metadata', 'categories.txt')
name_to_id = {}
with open(ig_categories_files, 'r') as fp:
for i, l in enumerate(fp.readlines()):
name_to_id[l.rstrip()] = i
return defaultdict(lambda: 255, name_to_id)
def get_ig_scene_path(scene_name):
"""
Get iGibson scene path
:param scene_name: scene name
:return: file path to the scene name
"""
ig_dataset_path = gibson2.ig_dataset_path
ig_scenes_path = ig_dataset_path + "/scenes"
assert scene_name in os.listdir(
ig_scenes_path), "Scene {} does not exist".format(scene_name)
return os.path.join(ig_scenes_path, scene_name)
def get_3dfront_scene_path(scene_name):
"""
Get 3D-FRONT scene path
:param scene_name: scene name
:return: file path to the scene name
"""
threedfront_dataset_path = gibson2.threedfront_dataset_path
threedfront_dataset_path = os.path.join( threedfront_dataset_path, "scenes")
assert scene_name in os.listdir(
threedfront_dataset_path), "Scene {} does not exist".format(scene_name)
return os.path.join(threedfront_dataset_path, scene_name)
def get_cubicasa_scene_path(scene_name):
"""
Get cubicasa scene path
:param scene_name: scene name
:return: file path to the scene name
"""
cubicasa_dataset_path = gibson2.cubicasa_dataset_path
cubicasa_dataset_path= os.path.join( cubicasa_dataset_path, "scenes")
assert scene_name in os.listdir(
cubicasa_dataset_path), "Scene {} does not exist".format(scene_name)
return os.path.join(cubicasa_dataset_path, scene_name)
def get_ig_category_path(category_name):
"""
Get iGibson object category path
:param category_name: object category
:return: file path to the object category
"""
ig_dataset_path = gibson2.ig_dataset_path
ig_categories_path = ig_dataset_path + "/objects"
assert category_name in os.listdir(
ig_categories_path), "Category {} does not exist".format(category_name)
return os.path.join(ig_categories_path, category_name)
def get_ig_model_path(category_name, model_name):
"""
Get iGibson object model path
:param category_name: object category
:param model_name: object model
:return: file path to the object model
"""
ig_category_path = get_ig_category_path(category_name)
assert model_name in os.listdir(
ig_category_path), "Model {} from category {} does not exist".format(model_name, category_name)
return os.path.join(ig_category_path, model_name)
def get_all_object_models():
"""
Get iGibson all object models
:return: a list of all object model paths
"""
ig_dataset_path = gibson2.ig_dataset_path
ig_categories_path = ig_dataset_path + "/objects"
categories = os.listdir(ig_categories_path)
categories = [item for item in categories if os.path.isdir(
os.path.join(ig_categories_path, item))]
models = []
for category in categories:
category_models = os.listdir(
os.path.join(ig_categories_path, category))
category_models = [item for item in category_models
if os.path.isdir(os.path.join(ig_categories_path,
category,
item))]
models.extend([os.path.join(ig_categories_path, category, item)
for item in category_models])
return models
def get_ig_assets_version():
"""
Get iGibson asset version
:return: iGibson asset version
"""
process = subprocess.Popen(['git', '-C', gibson2.ig_dataset_path, 'rev-parse', 'HEAD'],
shell=False, stdout=subprocess.PIPE)
git_head_hash = str(process.communicate()[0].strip())
return "{}".format(git_head_hash)
def get_scene_path(scene_id):
"""
Gibson scene path
:param scene_id: scene id
:return: scene path for this scene_id
"""
data_path = gibson2.g_dataset_path
assert scene_id in os.listdir(
data_path) or scene_id == 'stadium', "Scene {} does not exist".format(scene_id)
return os.path.join(data_path, scene_id)
def get_texture_file(mesh_file):
"""
Get texture file
:param mesh_file: mesh obj file
:return: texture file path
"""
model_dir = os.path.dirname(mesh_file)
with open(mesh_file, 'r') as f:
lines = [line.strip() for line in f.readlines() if 'mtllib' in line]
if len(lines) == 0:
return
mtl_file = lines[0].split()[1]
mtl_file = os.path.join(model_dir, mtl_file)
with open(mtl_file, 'r') as f:
lines = [line.strip() for line in f.readlines() if 'map_Kd' in line]
if len(lines) == 0:
return
texture_file = lines[0].split()[1]
texture_file = os.path.join(model_dir, texture_file)
return texture_file
def download_assets():
"""
Download iGibson assets
"""
if not os.path.exists(os.path.dirname(gibson2.assets_path)):
os.makedirs(os.path.dirname(gibson2.assets_path))
os.system(
'wget -c --retry-connrefused --tries=5 --timeout=5 '
'https://storage.googleapis.com/gibson_scenes/assets_igibson.tar.gz -O ~/tmp/assets_igibson.tar.gz')
os.system('tar -zxf ~/tmp/assets_igibson.tar.gz --directory {}'.format(
os.path.dirname(gibson2.assets_path)))
def download_demo_data():
"""
Download iGibson demo dataset
"""
if not os.path.exists(gibson2.g_dataset_path):
os.makedirs(gibson2.g_dataset_path)
if not os.path.exists(os.path.join(gibson2.g_dataset_path, 'Rs')):
os.system(
'wget -c --retry-connrefused --tries=5 --timeout=5 '
'https://storage.googleapis.com/gibson_scenes/Rs.tar.gz -O ~/tmp/Rs.tar.gz')
os.system(
'tar -zxf ~/tmp/Rs.tar.gz --directory {}'.format(gibson2.g_dataset_path))
def download_dataset(url):
"""
Download Gibson dataset
"""
if not os.path.exists(gibson2.g_dataset_path):
os.makedirs(gibson2.g_dataset_path)
file_name = url.split('/')[-1]
os.system(
'wget -c --retry-connrefused --tries=5 --timeout=5 {} -O ~/tmp/{}'.format(url, file_name))
os.system(
'tar -zxf ~/tmp/{} --strip-components=1 --directory {}'.format(file_name, gibson2.g_dataset_path))
# These datasets come as folders; in these folder there are scenes, so --strip-components are needed.
def download_ig_dataset():
"""
Download iGibson dataset
"""
#while input("Do you agree to the terms for using iGibson dataset (http://svl.stanford.edu/gibson2/assets/GDS_agreement.pdf)? [y/n]") != "y":
# print("You need to agree to the terms for using iGibson dataset.")
if not os.path.exists(gibson2.ig_dataset_path):
os.makedirs(gibson2.ig_dataset_path)
url = "https://storage.googleapis.com/gibson_scenes/ig_dataset.tar.gz"
file_name = url.split('/')[-1]
os.system(
'wget -c --retry-connrefused --tries=5 --timeout=5 {} -O ~/tmp/{}'.format(url, file_name))
os.system(
'tar -zxf ~/tmp/{} --strip-components=1 --directory {}'.format(file_name, gibson2.ig_dataset_path))
# These datasets come as folders; in these folder there are scenes, so --strip-components are needed.
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--download_assets',
action='store_true', help='download assets file')
parser.add_argument('--download_demo_data', action='store_true',
help='download demo data Rs')
parser.add_argument('--download_dataset', type=str,
help='download dataset file given an URL')
parser.add_argument('--download_ig_dataset', action='store_true',
help='download iG Dataset')
parser.add_argument('--download_ext_scene_assets', action='store_true',
help='download external scene dataset assets')
parser.add_argument('--change_data_path', action='store_true',
help='change the path to store assets and datasert')
args = parser.parse_args()
if args.download_assets:
download_assets()
elif args.download_demo_data:
download_demo_data()
elif args.download_dataset is not None:
download_dataset(args.download_dataset)
elif args.download_ig_dataset:
download_ig_dataset()
elif args.change_data_path:
change_data_path()
elif args.download_ext_scene_assets:
download_ext_scene_assets()
| 36.033557 | 145 | 0.655336 | import gibson2
import os
import argparse
import random
import subprocess
import json
from collections import defaultdict
import yaml
def get_ig_category_ids():
"""
Get iGibson object categories
:return: file path to the scene name
"""
ig_dataset_path = gibson2.ig_dataset_path
ig_categories_files = os.path.join(
ig_dataset_path, 'metadata', 'categories.txt')
name_to_id = {}
with open(ig_categories_files, 'r') as fp:
for i, l in enumerate(fp.readlines()):
name_to_id[l.rstrip()] = i
return defaultdict(lambda: 255, name_to_id)
def get_ig_scene_path(scene_name):
"""
Get iGibson scene path
:param scene_name: scene name
:return: file path to the scene name
"""
ig_dataset_path = gibson2.ig_dataset_path
ig_scenes_path = ig_dataset_path + "/scenes"
assert scene_name in os.listdir(
ig_scenes_path), "Scene {} does not exist".format(scene_name)
return os.path.join(ig_scenes_path, scene_name)
def get_3dfront_scene_path(scene_name):
"""
Get 3D-FRONT scene path
:param scene_name: scene name
:return: file path to the scene name
"""
threedfront_dataset_path = gibson2.threedfront_dataset_path
threedfront_dataset_path = os.path.join( threedfront_dataset_path, "scenes")
assert scene_name in os.listdir(
threedfront_dataset_path), "Scene {} does not exist".format(scene_name)
return os.path.join(threedfront_dataset_path, scene_name)
def get_cubicasa_scene_path(scene_name):
"""
Get cubicasa scene path
:param scene_name: scene name
:return: file path to the scene name
"""
cubicasa_dataset_path = gibson2.cubicasa_dataset_path
cubicasa_dataset_path= os.path.join( cubicasa_dataset_path, "scenes")
assert scene_name in os.listdir(
cubicasa_dataset_path), "Scene {} does not exist".format(scene_name)
return os.path.join(cubicasa_dataset_path, scene_name)
def get_ig_category_path(category_name):
"""
Get iGibson object category path
:param category_name: object category
:return: file path to the object category
"""
ig_dataset_path = gibson2.ig_dataset_path
ig_categories_path = ig_dataset_path + "/objects"
assert category_name in os.listdir(
ig_categories_path), "Category {} does not exist".format(category_name)
return os.path.join(ig_categories_path, category_name)
def get_ig_model_path(category_name, model_name):
"""
Get iGibson object model path
:param category_name: object category
:param model_name: object model
:return: file path to the object model
"""
ig_category_path = get_ig_category_path(category_name)
assert model_name in os.listdir(
ig_category_path), "Model {} from category {} does not exist".format(model_name, category_name)
return os.path.join(ig_category_path, model_name)
def get_all_object_models():
"""
Get iGibson all object models
:return: a list of all object model paths
"""
ig_dataset_path = gibson2.ig_dataset_path
ig_categories_path = ig_dataset_path + "/objects"
categories = os.listdir(ig_categories_path)
categories = [item for item in categories if os.path.isdir(
os.path.join(ig_categories_path, item))]
models = []
for category in categories:
category_models = os.listdir(
os.path.join(ig_categories_path, category))
category_models = [item for item in category_models
if os.path.isdir(os.path.join(ig_categories_path,
category,
item))]
models.extend([os.path.join(ig_categories_path, category, item)
for item in category_models])
return models
def get_ig_assets_version():
"""
Get iGibson asset version
:return: iGibson asset version
"""
process = subprocess.Popen(['git', '-C', gibson2.ig_dataset_path, 'rev-parse', 'HEAD'],
shell=False, stdout=subprocess.PIPE)
git_head_hash = str(process.communicate()[0].strip())
return "{}".format(git_head_hash)
def get_scene_path(scene_id):
"""
Gibson scene path
:param scene_id: scene id
:return: scene path for this scene_id
"""
data_path = gibson2.g_dataset_path
assert scene_id in os.listdir(
data_path) or scene_id == 'stadium', "Scene {} does not exist".format(scene_id)
return os.path.join(data_path, scene_id)
def get_texture_file(mesh_file):
"""
Get texture file
:param mesh_file: mesh obj file
:return: texture file path
"""
model_dir = os.path.dirname(mesh_file)
with open(mesh_file, 'r') as f:
lines = [line.strip() for line in f.readlines() if 'mtllib' in line]
if len(lines) == 0:
return
mtl_file = lines[0].split()[1]
mtl_file = os.path.join(model_dir, mtl_file)
with open(mtl_file, 'r') as f:
lines = [line.strip() for line in f.readlines() if 'map_Kd' in line]
if len(lines) == 0:
return
texture_file = lines[0].split()[1]
texture_file = os.path.join(model_dir, texture_file)
return texture_file
def download_assets():
"""
Download iGibson assets
"""
if not os.path.exists(os.path.dirname(gibson2.assets_path)):
os.makedirs(os.path.dirname(gibson2.assets_path))
os.system(
'wget -c --retry-connrefused --tries=5 --timeout=5 '
'https://storage.googleapis.com/gibson_scenes/assets_igibson.tar.gz -O ~/tmp/assets_igibson.tar.gz')
os.system('tar -zxf ~/tmp/assets_igibson.tar.gz --directory {}'.format(
os.path.dirname(gibson2.assets_path)))
def download_demo_data():
"""
Download iGibson demo dataset
"""
if not os.path.exists(gibson2.g_dataset_path):
os.makedirs(gibson2.g_dataset_path)
if not os.path.exists(os.path.join(gibson2.g_dataset_path, 'Rs')):
os.system(
'wget -c --retry-connrefused --tries=5 --timeout=5 '
'https://storage.googleapis.com/gibson_scenes/Rs.tar.gz -O ~/tmp/Rs.tar.gz')
os.system(
'tar -zxf ~/tmp/Rs.tar.gz --directory {}'.format(gibson2.g_dataset_path))
def download_dataset(url):
"""
Download Gibson dataset
"""
if not os.path.exists(gibson2.g_dataset_path):
os.makedirs(gibson2.g_dataset_path)
file_name = url.split('/')[-1]
os.system(
'wget -c --retry-connrefused --tries=5 --timeout=5 {} -O ~/tmp/{}'.format(url, file_name))
os.system(
'tar -zxf ~/tmp/{} --strip-components=1 --directory {}'.format(file_name, gibson2.g_dataset_path))
# These datasets come as folders; in these folder there are scenes, so --strip-components are needed.
def download_ext_scene_assets():
os.makedirs(gibson2.threedfront_dataset_path, exist_ok=True)
os.makedirs(gibson2.cubicasa_dataset_path, exist_ok=True)
url = "https://storage.googleapis.com/gibson_scenes/default_materials.tar.gz"
file_name = url.split('/')[-1]
os.system(
'wget -c --retry-connrefused --tries=5 --timeout=5 {} -O ~/tmp/{}'.format(url, file_name))
os.system( 'tar -zxf ~/tmp/{} --directory {}'.format(
file_name, gibson2.cubicasa_dataset_path))
os.system( 'tar -zxf ~/tmp/{} --directory {}'.format(
file_name, gibson2.threedfront_dataset_path))
url = "https://storage.googleapis.com/gibson_scenes/threedfront_urdfs.tar.gz"
file_name = url.split('/')[-1]
os.system(
'wget -c --retry-connrefused --tries=5 --timeout=5 {} -O ~/tmp/{}'.format(url, file_name))
os.system( 'tar -zxf ~/tmp/{} --directory {}'.format(
file_name, gibson2.threedfront_dataset_path))
def download_ig_dataset():
"""
Download iGibson dataset
"""
#while input("Do you agree to the terms for using iGibson dataset (http://svl.stanford.edu/gibson2/assets/GDS_agreement.pdf)? [y/n]") != "y":
# print("You need to agree to the terms for using iGibson dataset.")
if not os.path.exists(gibson2.ig_dataset_path):
os.makedirs(gibson2.ig_dataset_path)
url = "https://storage.googleapis.com/gibson_scenes/ig_dataset.tar.gz"
file_name = url.split('/')[-1]
os.system(
'wget -c --retry-connrefused --tries=5 --timeout=5 {} -O ~/tmp/{}'.format(url, file_name))
os.system(
'tar -zxf ~/tmp/{} --strip-components=1 --directory {}'.format(file_name, gibson2.ig_dataset_path))
# These datasets come as folders; in these folder there are scenes, so --strip-components are needed.
def change_data_path():
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'global_config.yaml')) as f:
global_config = yaml.load(f, Loader=yaml.FullLoader)
print("Current dataset path:")
for k, v in global_config.items():
print("{}: {}".format(k,v))
for k,v in global_config.items():
new_path = input("Change {} from {} to: ".format(k, v))
global_config[k] = new_path
print("New dataset path:")
for k, v in global_config.items():
print("{}: {}".format(k,v))
response = input("Save? [y/n]")
if response == "y":
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'global_config.yaml'), 'w') as f:
yaml.dump(global_config, f)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--download_assets',
action='store_true', help='download assets file')
parser.add_argument('--download_demo_data', action='store_true',
help='download demo data Rs')
parser.add_argument('--download_dataset', type=str,
help='download dataset file given an URL')
parser.add_argument('--download_ig_dataset', action='store_true',
help='download iG Dataset')
parser.add_argument('--download_ext_scene_assets', action='store_true',
help='download external scene dataset assets')
parser.add_argument('--change_data_path', action='store_true',
help='change the path to store assets and datasert')
args = parser.parse_args()
if args.download_assets:
download_assets()
elif args.download_demo_data:
download_demo_data()
elif args.download_dataset is not None:
download_dataset(args.download_dataset)
elif args.download_ig_dataset:
download_ig_dataset()
elif args.change_data_path:
change_data_path()
elif args.download_ext_scene_assets:
download_ext_scene_assets()
| 1,696 | 0 | 46 |
98b43896db4a9743c1b1d21ae5bcb6569ad055c4 | 2,797 | py | Python | satori/tests/test_common_templating.py | mgeisler/satori | dea382bae1cd043189589c0f7d4c20b4b6725ab5 | [
"Apache-2.0"
] | 1 | 2015-01-18T19:56:28.000Z | 2015-01-18T19:56:28.000Z | satori/tests/test_common_templating.py | samstav/satori | 239fa1e3c7aac78599145c670576f0ac76a41a89 | [
"Apache-2.0"
] | null | null | null | satori/tests/test_common_templating.py | samstav/satori | 239fa1e3c7aac78599145c670576f0ac76a41a89 | [
"Apache-2.0"
] | null | null | null | # pylint: disable=C0103,R0904
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit Tests for Templating module."""
import unittest
from satori.common import templating
def fail_fixture():
"""Used to simulate a template error."""
raise AttributeError("Boom!")
class TestTemplating(unittest.TestCase):
"""Test Templating Module."""
def test_prepend_function(self):
"""preserve returns escaped linefeeds."""
result = templating.parse("{{ root|prepend('/')}}/path", root="etc")
self.assertEqual(result, '/etc/path')
def test_prepend_function_blank(self):
"""preserve returns escaped linefeeds."""
result = templating.parse("{{ root|prepend('/')}}/path")
self.assertEqual(result, '/path')
def test_preserve_linefeed_escaping(self):
"""preserve returns escaped linefeeds."""
result = templating.parse('{{ "A\nB" | preserve }}')
self.assertEqual(result, 'A\\nB')
def test_template_extra_globals(self):
"""Globals are available in template."""
result = templating.parse("{{ foo }}", foo="bar")
self.assertEqual(result, 'bar')
def test_template_syntax_error(self):
"""jinja.TemplateSyntaxError is caught."""
self.assertRaises(templating.TemplateException, templating.parse,
"{{ not closed")
def test_template_undefined_error(self):
"""jinja.UndefinedError is caught."""
self.assertRaises(templating.TemplateException, templating.parse,
"{{ unknown() }}")
def test_template_exception(self):
"""Exception in global is caught."""
self.assertRaises(templating.TemplateException, templating.parse,
"{{ boom() }}", boom=fail_fixture)
def test_extra_globals(self):
"""Validates globals are set."""
env = templating.get_jinja_environment("", {'foo': 1})
self.assertTrue('foo' in env.globals)
self.assertEqual(env.globals['foo'], 1)
def test_json_included(self):
"""json library available to template."""
result = templating.parse("{{ json.dumps({'data': 1}) }}")
self.assertEqual(result, '{"data": 1}')
if __name__ == '__main__':
unittest.main()
| 34.9625 | 77 | 0.648552 | # pylint: disable=C0103,R0904
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit Tests for Templating module."""
import unittest
from satori.common import templating
def fail_fixture():
"""Used to simulate a template error."""
raise AttributeError("Boom!")
class TestTemplating(unittest.TestCase):
"""Test Templating Module."""
def test_prepend_function(self):
"""preserve returns escaped linefeeds."""
result = templating.parse("{{ root|prepend('/')}}/path", root="etc")
self.assertEqual(result, '/etc/path')
def test_prepend_function_blank(self):
"""preserve returns escaped linefeeds."""
result = templating.parse("{{ root|prepend('/')}}/path")
self.assertEqual(result, '/path')
def test_preserve_linefeed_escaping(self):
"""preserve returns escaped linefeeds."""
result = templating.parse('{{ "A\nB" | preserve }}')
self.assertEqual(result, 'A\\nB')
def test_template_extra_globals(self):
"""Globals are available in template."""
result = templating.parse("{{ foo }}", foo="bar")
self.assertEqual(result, 'bar')
def test_template_syntax_error(self):
"""jinja.TemplateSyntaxError is caught."""
self.assertRaises(templating.TemplateException, templating.parse,
"{{ not closed")
def test_template_undefined_error(self):
"""jinja.UndefinedError is caught."""
self.assertRaises(templating.TemplateException, templating.parse,
"{{ unknown() }}")
def test_template_exception(self):
"""Exception in global is caught."""
self.assertRaises(templating.TemplateException, templating.parse,
"{{ boom() }}", boom=fail_fixture)
def test_extra_globals(self):
"""Validates globals are set."""
env = templating.get_jinja_environment("", {'foo': 1})
self.assertTrue('foo' in env.globals)
self.assertEqual(env.globals['foo'], 1)
def test_json_included(self):
"""json library available to template."""
result = templating.parse("{{ json.dumps({'data': 1}) }}")
self.assertEqual(result, '{"data": 1}')
if __name__ == '__main__':
unittest.main()
| 0 | 0 | 0 |
03e934b55fee6d774e434334e745d004e74e3130 | 4,638 | py | Python | finetune/lm_entailment.py | johndpope/finetune | 8cdc2a29104f3f2f6e032a9496b3c4e251ac028c | [
"MIT"
] | null | null | null | finetune/lm_entailment.py | johndpope/finetune | 8cdc2a29104f3f2f6e032a9496b3c4e251ac028c | [
"MIT"
] | null | null | null | finetune/lm_entailment.py | johndpope/finetune | 8cdc2a29104f3f2f6e032a9496b3c4e251ac028c | [
"MIT"
] | null | null | null | import json
from sklearn.model_selection import train_test_split
from finetune.config import BATCH_SIZE
from finetune.lm_base import LanguageModelBase
from finetune.target_encoders import OrdinalClassificationEncoder
if __name__ == "__main__":
with open("data/questions.json", "rt") as fp:
data = json.load(fp)
scores = []
questions = []
answers = []
save_path = 'saved-models/cola'
model = LanguageModelEntailment(save_path)
for item in data:
row = data[item]
scores.append(row["score"])
questions.append(row["question"])
answers.append(row["answers"][0]["answer"])
scores_train, scores_test, ques_train, ques_test, ans_train, ans_test = train_test_split(
scores, questions, answers, test_size=0.33, random_state=5)
#model.finetune(ques_train, ans_train, scores_train)
model = LanguageModelEntailment.load(save_path)
print("TRAIN EVAL")
predictions = model.predict(ques_train, ans_train)
print(predictions)
from scipy.stats import spearmanr
print(spearmanr(predictions, scores_train))
print("TEST EVAL")
predictions = model.predict(ques_test, ans_test)
print(predictions)
print(spearmanr(predictions, scores_test))
| 42.163636 | 117 | 0.689521 | import json
from sklearn.model_selection import train_test_split
from finetune.config import BATCH_SIZE
from finetune.lm_base import LanguageModelBase
from finetune.target_encoders import OrdinalClassificationEncoder
class LanguageModelEntailment(LanguageModelBase):
def get_target_encoder(self):
return OrdinalClassificationEncoder()
def _text_to_ids(self, *Xs, max_length=None):
max_length = max_length or self.max_length
assert len(Xs) == 2, "This implementation assumes 2 Xs"
question_answer_pairs = self.encoder.encode_for_entailment(*Xs, max_length=max_length)
tokens, mask = self._array_format(question_answer_pairs)
return tokens, mask
def finetune(self, X_1, X_2, Y, batch_size=BATCH_SIZE, val_size=0.05, val_interval=150):
"""
:param X_1: list or array of text to embed as the queries.
:param X_2: list or array of text to embed as the answers.
:param Y: integer or string-valued class labels. It is necessary for the items of Y to be sortable.
:param batch_size: integer number of examples per batch. When N_GPUS > 1, this number
corresponds to the number of training examples provided to each GPU.
:param val_size: Float fraction or int number that represents the size of the validation set.
:param val_interval: The interval for which validation is performed, measured in number of steps.
"""
self.is_classification = True
return self._finetune(X_1, X_2, Y=Y, batch_size=batch_size, val_size=val_size, val_interval=val_interval)
def predict(self, X_1, X_2, max_length=None):
"""
Produces X_2 list of most likely class labels as determined by the fine-tuned model.
:param X_1: list or array of text to embed as the queries.
:param X_2: list or array of text to embed as the answers.
:param max_length: the number of tokens to be included in the document representation.
Providing more than `max_length` tokens as input will result in truncation.
:returns: list of class labels.
"""
return self.label_encoder.inverse_transform(self._predict_proba(X_1, X_2, max_length=max_length))
def predict_proba(self, X_1, X_2, max_length=None):
"""
Produces X_2 probability distribution over classes for each example in X.
:param X_1: list or array of text to embed as the queries.
:param X_2: list or array of text to embed as the answers.
:param max_length: the number of tokens to be included in the document representation.
Providing more than `max_length` tokens as input will result in truncation.
:returns: list of dictionaries. Each dictionary maps from X_2 class label to its assigned class probability.
"""
return self._predict_proba(X_1, X_2, max_length=max_length)
def featurize(self, X_1, X_2, max_length=None):
"""
Embeds inputs in learned feature space. Can be called before or after calling :meth:`finetune`.
:param X_1: list or array of text to embed as the queries.
:param X_2: list or array of text to embed as the answers.
:param max_length: the number of tokens to be included in the document representation.
Providing more than `max_length` tokens as input will result in truncation.
:returns: np.array of features of shape (n_examples, embedding_size).
"""
return self._featurize(X_1, X_2, max_length=max_length)
if __name__ == "__main__":
with open("data/questions.json", "rt") as fp:
data = json.load(fp)
scores = []
questions = []
answers = []
save_path = 'saved-models/cola'
model = LanguageModelEntailment(save_path)
for item in data:
row = data[item]
scores.append(row["score"])
questions.append(row["question"])
answers.append(row["answers"][0]["answer"])
scores_train, scores_test, ques_train, ques_test, ans_train, ans_test = train_test_split(
scores, questions, answers, test_size=0.33, random_state=5)
#model.finetune(ques_train, ans_train, scores_train)
model = LanguageModelEntailment.load(save_path)
print("TRAIN EVAL")
predictions = model.predict(ques_train, ans_train)
print(predictions)
from scipy.stats import spearmanr
print(spearmanr(predictions, scores_train))
print("TEST EVAL")
predictions = model.predict(ques_test, ans_test)
print(predictions)
print(spearmanr(predictions, scores_test))
| 383 | 2,977 | 23 |
ec8065188243aed314b8d7196f0b5047cc870aa9 | 1,543 | py | Python | bayesrl/environments/chainworld.py | OlehLuk/bayesrl | ceaee729e93254f8209738274e724afd463c994e | [
"MIT"
] | null | null | null | bayesrl/environments/chainworld.py | OlehLuk/bayesrl | ceaee729e93254f8209738274e724afd463c994e | [
"MIT"
] | null | null | null | bayesrl/environments/chainworld.py | OlehLuk/bayesrl | ceaee729e93254f8209738274e724afd463c994e | [
"MIT"
] | null | null | null | from ..utils import check_random_state
| 29.673077 | 88 | 0.593649 | from ..utils import check_random_state
class ChainWorld(object):
def __init__(self,
left_length,
left_reward,
right_length,
right_reward,
on_chain_reward,
p_return_to_start,
random_state=None):
self.left_length = left_length
self.left_reward = left_reward
self.right_length = right_length
self.right_reward = right_reward
self.on_chain_reward = on_chain_reward
self.p_return_to_start = p_return_to_start
self.num_states = self.left_length + self.right_length + 1
self.num_actions = 2
self.random_state = check_random_state(random_state)
self.reset()
def reset(self):
self.state = self.left_length
def observe(self):
return self.state
def is_terminal(self, state):
return state == 0 or state == self.num_states - 1
def perform_action(self, action):
if self.p_return_to_start and self.random_state.rand() < self.p_return_to_start:
self.reset()
elif action == 0:
self.state -= 1
else:
self.state += 1
if self.state == 0:
reward = self.left_reward
elif self.state == self.num_states - 1:
reward = self.right_reward
else:
reward = self.on_chain_reward
return self.observe(), reward
def get_max_reward(self):
return max(self.left_reward, self.right_reward)
| 1,315 | 4 | 184 |
e543001c6dbbf7c0f14676334e540a1dbbbed706 | 4,780 | py | Python | backend.py | MahatKC/expertsystemassignment | 4db94390c9b7651d46b6a6e6a69c35e93aafcf31 | [
"MIT"
] | null | null | null | backend.py | MahatKC/expertsystemassignment | 4db94390c9b7651d46b6a6e6a69c35e93aafcf31 | [
"MIT"
] | null | null | null | backend.py | MahatKC/expertsystemassignment | 4db94390c9b7651d46b6a6e6a69c35e93aafcf31 | [
"MIT"
] | null | null | null | #trabalho por Leonardo Vanzin, Mateus Karvat e Roberta Aparecida
#inicialmente, são importadas as bibliotecas necessárias
from experta import *
import interface
valores_convertidos = {
'proximidadeMar': float(interface.valores['proximidadeMar']),
'desnivel': float(interface.valores['desnivel']),
'velocidadeVento': float(interface.valores['velocidadeVento']),
'latitude': float(interface.valores['latitude']),
'area': float(interface.valores['area']),
'temperaturaInterna': float(interface.valores['temperaturaInterna'])
}
viabilidade = {
'maremotriz': False,
'eolica': False,
'solar': False,
'geotermica': False,
'hidrica': False
}
#então, criaremos os fatos utilizado para as regras do SE
#no Experta, cada fato é um classe individual com parâmetros próprios
#criaremos um fato distinto para cada possível fonte energética de nosso problema
# o motor de inferência deve ser declarado como uma classe própria, dentro da qual serão definidas
# as regras
# após declarar o motor, as regras e os fatos, é preciso instanciá-los
engine = AnaliseViabilidade()
# o motor é reinicializado para aceitar novos fatos (limpando quaisquer valores existentes na cache após uma execução anterior)
engine.reset()
# cada um dos fatos é declarado individualmente, podendo-se passar múltiplos parâmetros para um mesmo fato de uma vez só
engine.declare(Maremotriz(diferenca_mare=valores_convertidos['desnivel'],
proximidade_mar=valores_convertidos['proximidadeMar']))
engine.declare(Eolica(velocidade_vento=valores_convertidos['velocidadeVento']))
engine.declare(Solar(latitude=valores_convertidos['latitude']))
engine.declare(Hidrica(area_reservatorio=valores_convertidos['area']))
engine.declare(Geotermica(temperatura_subterranea=valores_convertidos['temperaturaInterna']))
# por fim, o motor é executado
engine.run()
print(viabilidade) | 45.961538 | 127 | 0.722803 | #trabalho por Leonardo Vanzin, Mateus Karvat e Roberta Aparecida
#inicialmente, são importadas as bibliotecas necessárias
from experta import *
import interface
valores_convertidos = {
'proximidadeMar': float(interface.valores['proximidadeMar']),
'desnivel': float(interface.valores['desnivel']),
'velocidadeVento': float(interface.valores['velocidadeVento']),
'latitude': float(interface.valores['latitude']),
'area': float(interface.valores['area']),
'temperaturaInterna': float(interface.valores['temperaturaInterna'])
}
viabilidade = {
'maremotriz': False,
'eolica': False,
'solar': False,
'geotermica': False,
'hidrica': False
}
#então, criaremos os fatos utilizado para as regras do SE
#no Experta, cada fato é um classe individual com parâmetros próprios
#criaremos um fato distinto para cada possível fonte energética de nosso problema
class Solar(Fact):
# ao declarar uma variável, pode-se restringir seu tipo, seu valor, e definir se ela é
# obrigatória ou não
# aqui, temos um variável float obrigatória cujo valor em módulo deve ser menor que 90
latitude = Field(lambda longitude:
isinstance(longitude, float) and abs(longitude)<=90, mandatory=True)
class Geotermica(Fact):
# aqui, a declaração da variável não tem restrição de valores, logo sua declaração é mais simples
# e não requer o uso e uma função lambda
temperatura_subterranea = Field(float, mandatory=True) #em graus celsius
class Maremotriz(Fact):
# para esta classe, a diferenca_mare só será relevante caso o usuário esteja próximo do mar
# logo, este parâmetro não é obrigatório e a inserção de um fato que não o contenha na base
# de conhecimento será aceita
diferenca_mare = Field(lambda diferenca_mare:
isinstance(diferenca_mare, float) and diferenca_mare>=0) #em metros
proximidade_mar = Field(float, mandatory=True) #em km
class Eolica(Fact):
velocidade_vento = Field(lambda velocidade_vento: isinstance(velocidade_vento, float)
and velocidade_vento>=0, mandatory=True) #em km/h
class Hidrica(Fact):
area_reservatorio = Field(lambda area_reservatorio:
isinstance(area_reservatorio, float) and area_reservatorio>=0,
mandatory=True) #em km²
# o motor de inferência deve ser declarado como uma classe própria, dentro da qual serão definidas
# as regras
class AnaliseViabilidade(KnowledgeEngine):
# cada regra usa um annotation @rule, dentro do qual os possíveis valores dos fatos são especificados
# acessamos o fato desejado e determinamos uma função lambda para tal valor, indicando seus possíveis valores
# quando uma regra acessa mais de um parâmetro, ela só será validada caso todas as restrições sejam atendidas
# neste caso, a função regra_maremotriz só será executada caso o parâmetro diferenca_mare for maior ou igual a 7 e
# o parametro promixidade_mar menor ou igual a 2. do contrário, nada ocorrerá
@Rule(Maremotriz(diferenca_mare=P(lambda d: d>=7),proximidade_mar=P(lambda p: p<=2)))
def regra_maremotriz(self):
print("Maremotriz é top")
viabilidade['maremotriz'] = True
@Rule(Eolica(velocidade_vento=P(lambda v: v>25)))
def regra_eolica(self):
print("Eolica é top")
viabilidade['eolica'] = True
@Rule(Solar(latitude=P(lambda y: abs(y)<50)))
def regra_solar(self):
print("Solar é top")
viabilidade['solar'] = True
@Rule(Geotermica(temperatura_subterranea=P(lambda t: t>150)))
def regra_geotermica(self):
print("Geotermica é top")
viabilidade['geotermica'] = True
@Rule(Hidrica(area_reservatorio=P(lambda a: a>3.0)))
def regra_hidrica(self):
print("Hidrica é top")
viabilidade['hidrica'] = True
# após declarar o motor, as regras e os fatos, é preciso instanciá-los
engine = AnaliseViabilidade()
# o motor é reinicializado para aceitar novos fatos (limpando quaisquer valores existentes na cache após uma execução anterior)
engine.reset()
# cada um dos fatos é declarado individualmente, podendo-se passar múltiplos parâmetros para um mesmo fato de uma vez só
engine.declare(Maremotriz(diferenca_mare=valores_convertidos['desnivel'],
proximidade_mar=valores_convertidos['proximidadeMar']))
engine.declare(Eolica(velocidade_vento=valores_convertidos['velocidadeVento']))
engine.declare(Solar(latitude=valores_convertidos['latitude']))
engine.declare(Hidrica(area_reservatorio=valores_convertidos['area']))
engine.declare(Geotermica(temperatura_subterranea=valores_convertidos['temperaturaInterna']))
# por fim, o motor é executado
engine.run()
print(viabilidade) | 374 | 2,424 | 136 |
880bad578d9944f1ec06e580824fc923f1978b8e | 2,886 | py | Python | main.py | warifp/InstagramPostAndDelete | d22577325eccf42e629cef076ab43f7788587bc4 | [
"MIT"
] | 4 | 2019-06-03T04:00:51.000Z | 2021-11-09T21:34:38.000Z | main.py | nittaya1990/InstagramPostAndDelete | d22577325eccf42e629cef076ab43f7788587bc4 | [
"MIT"
] | null | null | null | main.py | nittaya1990/InstagramPostAndDelete | d22577325eccf42e629cef076ab43f7788587bc4 | [
"MIT"
] | 4 | 2019-10-30T19:44:08.000Z | 2021-09-07T16:30:09.000Z | #! @@Author : WAHYU ARIF PURNOMO
#! @@Create : 18 Januari 2019
#! @@Modify : 19 Januari 2019
#! Gambar dari reddit.
#! Gunakan VPN karena DNS situs reddit sudah di blokir dari negara Indonesia.
import os
import json
import requests
import progressbar
from PIL import Image
from lxml import html
from time import sleep
from ImageDeleter import delete_png
from InstagramAPI import InstagramAPI
InstagramAPI = InstagramAPI(input("Username: "), input("Password: "))
while True:
if (InstagramAPI.login()):
break
else:
for x in range(300):
os.system('cls')
print(300-x)
sleep(1)
global useable
useable = []
os.system('pause')
while True:
get_image()
print("Gambar sukses di upload.")
sleep(5)
os.system('pause')
| 28.574257 | 125 | 0.615731 | #! @@Author : WAHYU ARIF PURNOMO
#! @@Create : 18 Januari 2019
#! @@Modify : 19 Januari 2019
#! Gambar dari reddit.
#! Gunakan VPN karena DNS situs reddit sudah di blokir dari negara Indonesia.
import os
import json
import requests
import progressbar
from PIL import Image
from lxml import html
from time import sleep
from ImageDeleter import delete_png
from InstagramAPI import InstagramAPI
InstagramAPI = InstagramAPI(input("Username: "), input("Password: "))
while True:
if (InstagramAPI.login()):
break
else:
for x in range(300):
os.system('cls')
print(300-x)
sleep(1)
global useable
useable = []
os.system('pause')
def get_image():
print("Memulai mendapatkan gambar ..")
json_raw = requests.get('https://www.reddit.com/r/me_irl/new/.json', headers = {'User-agent': 'Image_Testing_V3'}).json()
json_data = json_raw['data']
json_children = json_data['children']
for x in range(len(json_children)):
json_current = json_children[x]
json_current_data = json_current['data']
json_current_url = json_current_data['url']
if "https://i.redd.it/" not in json_current_url:
pass
else:
if json_current_url not in useable:
useable.append(json_current_url)
download()
else:
pass
def download():
print("Memulai download ..")
global filename
new_filename = ""
filename = useable[-1]
filename = filename.replace("https://i.redd.it/", "")
print(filename)
f = open(filename, 'wb')
f.write(requests.get(useable[-1]).content)
f.close()
if (filename[-3] + filename[-2] + filename[-1]) != 'jpg':
im = Image.open(filename)
for x in range(len(filename)-3):
new_filename = new_filename + filename[x]
im = im.convert("RGB")
im.save("edit" + new_filename + 'jpg')
new_filename = "edit" + new_filename + "jpg"
print(new_filename)
else:
new_filename = filename
upload(new_filename)
def delete_image(bad_file):
print("Memulai menghapus gambar ..")
if (bad_file[0] + bad_file[1] + bad_file[2] + bad_file[3]) == "edit":
png_bad_file = ''
for x in range(len(bad_file)-3):
png_bad_file = png_bad_file + bad_file[x]
png_bad_file = png_bad_file + "png"
try:
os.remove(png_bad_file)
except Exception as e:
pass
os.remove(bad_file)
delete_png()
print("Selesai.")
wait()
def upload(file):
print("Memulai upload ..")
caption = ""
InstagramAPI.uploadPhoto(file, caption=caption)
delete_image(file)
def wait():
for i in progressbar.progressbar(range(1800)):
sleep(1)
while True:
get_image()
print("Gambar sukses di upload.")
sleep(5)
os.system('pause')
| 1,986 | 0 | 115 |
66a485d2b69e5ed6b33aeebb70db814ec8baae6b | 3,101 | py | Python | examples/simple/simple_ep.py | thomashopkins32/LEAP | aafada03a261543f35ebfbb9b1ef7140cb079f6c | [
"AFL-3.0"
] | 53 | 2018-10-04T18:01:22.000Z | 2022-03-24T05:24:37.000Z | examples/simple/simple_ep.py | thomashopkins32/LEAP | aafada03a261543f35ebfbb9b1ef7140cb079f6c | [
"AFL-3.0"
] | 180 | 2018-10-11T21:36:57.000Z | 2022-03-25T17:38:33.000Z | examples/simple/simple_ep.py | thomashopkins32/LEAP | aafada03a261543f35ebfbb9b1ef7140cb079f6c | [
"AFL-3.0"
] | 8 | 2019-09-27T23:11:26.000Z | 2021-09-29T21:15:35.000Z | #!/usr/bin/env python3
"""
This implements a simple Evolutionary Programming (EP) system, but it
does not evolve state machines as done with the original EP approach.
TODO convert to a state machines problem
"""
import os
from toolz import pipe
from leap_ec import Individual, context, test_env_var
from leap_ec import ops, util
from leap_ec.decoder import IdentityDecoder
from leap_ec.real_rep.problems import SpheroidProblem
from leap_ec.real_rep.initializers import create_real_vector
from leap_ec.real_rep.ops import mutate_gaussian
def print_population(population, generation):
""" Convenience function for pretty printing a population that's
associated with a given generation
:param population:
:param generation:
:return: None
"""
for individual in population:
print(generation, individual.genome, individual.fitness)
BROOD_SIZE = 3 # how many offspring each parent will reproduce
if __name__ == '__main__':
# Define the real value bounds for initializing the population. In this case,
# we define a genome of four bounds.
# the (-5.12,5.12) was what was originally used for this problem in
# Ken De Jong's 1975 dissertation, so was used for historical reasons.
bounds = [(-5.12, 5.12), (-5.12, 5.12), (-5.12, 5.12), (-5.12, 5.12)]
parents = Individual.create_population(5,
initialize=create_real_vector(
bounds),
decoder=IdentityDecoder(),
problem=SpheroidProblem(
maximize=False))
# Evaluate initial population
parents = Individual.evaluate_population(parents)
# print initial, random population
print_population(parents, generation=0)
# When running the test harness, just run for two generations
# (we use this to quickly ensure our examples don't get bitrot)
if os.environ.get(test_env_var, False) == 'True':
max_generation = 2
else:
max_generation = 100
# Set up a generation counter using the default global context variable
generation_counter = util.inc_generation()
while generation_counter.generation() < max_generation:
offspring = pipe(parents,
ops.cyclic_selection,
ops.clone,
mutate_gaussian(std=.1, expected_num_mutations='isotropic'),
ops.evaluate,
# create the brood
ops.pool(size=len(parents) * BROOD_SIZE),
# mu + lambda
ops.truncation_selection(size=len(parents),
parents=parents))
parents = offspring
generation_counter() # increment to the next generation
# Just to demonstrate that we can also get the current generation from
# the context
print_population(parents, context['leap']['generation'])
| 37.361446 | 85 | 0.6198 | #!/usr/bin/env python3
"""
This implements a simple Evolutionary Programming (EP) system, but it
does not evolve state machines as done with the original EP approach.
TODO convert to a state machines problem
"""
import os
from toolz import pipe
from leap_ec import Individual, context, test_env_var
from leap_ec import ops, util
from leap_ec.decoder import IdentityDecoder
from leap_ec.real_rep.problems import SpheroidProblem
from leap_ec.real_rep.initializers import create_real_vector
from leap_ec.real_rep.ops import mutate_gaussian
def print_population(population, generation):
""" Convenience function for pretty printing a population that's
associated with a given generation
:param population:
:param generation:
:return: None
"""
for individual in population:
print(generation, individual.genome, individual.fitness)
BROOD_SIZE = 3 # how many offspring each parent will reproduce
if __name__ == '__main__':
# Define the real value bounds for initializing the population. In this case,
# we define a genome of four bounds.
# the (-5.12,5.12) was what was originally used for this problem in
# Ken De Jong's 1975 dissertation, so was used for historical reasons.
bounds = [(-5.12, 5.12), (-5.12, 5.12), (-5.12, 5.12), (-5.12, 5.12)]
parents = Individual.create_population(5,
initialize=create_real_vector(
bounds),
decoder=IdentityDecoder(),
problem=SpheroidProblem(
maximize=False))
# Evaluate initial population
parents = Individual.evaluate_population(parents)
# print initial, random population
print_population(parents, generation=0)
# When running the test harness, just run for two generations
# (we use this to quickly ensure our examples don't get bitrot)
if os.environ.get(test_env_var, False) == 'True':
max_generation = 2
else:
max_generation = 100
# Set up a generation counter using the default global context variable
generation_counter = util.inc_generation()
while generation_counter.generation() < max_generation:
offspring = pipe(parents,
ops.cyclic_selection,
ops.clone,
mutate_gaussian(std=.1, expected_num_mutations='isotropic'),
ops.evaluate,
# create the brood
ops.pool(size=len(parents) * BROOD_SIZE),
# mu + lambda
ops.truncation_selection(size=len(parents),
parents=parents))
parents = offspring
generation_counter() # increment to the next generation
# Just to demonstrate that we can also get the current generation from
# the context
print_population(parents, context['leap']['generation'])
| 0 | 0 | 0 |
3274152719814b53020602fee540527b2afc4a45 | 481 | py | Python | intervention_herd/intervention_herd/make_rand_pop.py | molkjar/bachelor | a0591691b820c6c8a45d16f8d55f3a7e80ea384b | [
"MIT"
] | null | null | null | intervention_herd/intervention_herd/make_rand_pop.py | molkjar/bachelor | a0591691b820c6c8a45d16f8d55f3a7e80ea384b | [
"MIT"
] | null | null | null | intervention_herd/intervention_herd/make_rand_pop.py | molkjar/bachelor | a0591691b820c6c8a45d16f8d55f3a7e80ea384b | [
"MIT"
] | null | null | null | import covasim as cv
import pandas as pd
import sciris as sc
import numpy as np
import population_random as pr
if __name__ == '__main__':
#Without dispersion
cv.set_seed(1)
people = pr.generate_people(n_people=200e3, n_contacts=20, dispersion=None)
sc.saveobj('randppl.pop', people)
# With dispersion
cv.set_seed(1)
peopleDisp = pr.generate_people(n_people=200e3, n_contacts=20, dispersion=1.5)
sc.saveobj('randppl_disp.pop', peopleDisp)
| 21.863636 | 82 | 0.721414 | import covasim as cv
import pandas as pd
import sciris as sc
import numpy as np
import population_random as pr
if __name__ == '__main__':
#Without dispersion
cv.set_seed(1)
people = pr.generate_people(n_people=200e3, n_contacts=20, dispersion=None)
sc.saveobj('randppl.pop', people)
# With dispersion
cv.set_seed(1)
peopleDisp = pr.generate_people(n_people=200e3, n_contacts=20, dispersion=1.5)
sc.saveobj('randppl_disp.pop', peopleDisp)
| 0 | 0 | 0 |
0321cbc0cd8aae6294dc88fca152c51fa61272fc | 978 | py | Python | LeetCode/069 Sqrt(x).py | gesuwen/Algorithms | 0c9cf4412d76f8b69ef68cc80636323f5a0e5786 | [
"MIT"
] | null | null | null | LeetCode/069 Sqrt(x).py | gesuwen/Algorithms | 0c9cf4412d76f8b69ef68cc80636323f5a0e5786 | [
"MIT"
] | null | null | null | LeetCode/069 Sqrt(x).py | gesuwen/Algorithms | 0c9cf4412d76f8b69ef68cc80636323f5a0e5786 | [
"MIT"
] | null | null | null | # Math; Binary Search
# Implement int sqrt(int x).
#
# Compute and return the square root of x, where x is guaranteed to be a non-negative integer.
#
# Since the return type is an integer, the decimal digits are truncated and only the integer part of the result is returned.
#
# Example 1:
#
# Input: 4
# Output: 2
# Example 2:
#
# Input: 8
# Output: 2
# Explanation: The square root of 8 is 2.82842..., and since
# the decimal part is truncated, 2 is returned.
| 25.736842 | 124 | 0.52863 | # Math; Binary Search
# Implement int sqrt(int x).
#
# Compute and return the square root of x, where x is guaranteed to be a non-negative integer.
#
# Since the return type is an integer, the decimal digits are truncated and only the integer part of the result is returned.
#
# Example 1:
#
# Input: 4
# Output: 2
# Example 2:
#
# Input: 8
# Output: 2
# Explanation: The square root of 8 is 2.82842..., and since
# the decimal part is truncated, 2 is returned.
class Solution:
def mySqrt(self, x):
"""
:type x: int
:rtype: int
"""
if x <= 1:
return x
leftBound = 1
rightBound = x
while leftBound <= rightBound:
m = (leftBound + rightBound) // 2
if (m ** 2 < x and (m+1) ** 2 > x) or m ** 2 == x:
return m
if m ** 2 < x:
leftBound = m
elif m ** 2 > x:
rightBound = m
return False
| 0 | 480 | 22 |
c78ae29735b85b179be5cc23fb9f757749681fab | 1,645 | py | Python | compilador.py | RevertonLuis/NavierStokesSpaceTimeParallelProjectionMethod | 23fbf3b01a34c244d8928fae4ad9d12bffef8d07 | [
"MIT"
] | 1 | 2021-03-02T16:28:12.000Z | 2021-03-02T16:28:12.000Z | compilador.py | RevertonLuis/NavierStokesSpaceTimeParallelProjectionMethod | 23fbf3b01a34c244d8928fae4ad9d12bffef8d07 | [
"MIT"
] | null | null | null | compilador.py | RevertonLuis/NavierStokesSpaceTimeParallelProjectionMethod | 23fbf3b01a34c244d8928fae4ad9d12bffef8d07 | [
"MIT"
] | null | null | null | import os
import subprocess
import sys
fontes = ["funcoes_abstratas.f90",
"funcoes_alias.f90",
'matriz_A.f90',
'gs.f90',
'extrapolacoes_de_u_e_v.f90',
'fontes_subrotinas.f90',
'class_array_subrotinas.f90',
'residuo.f90',
'variaveis_solvers_p.f90',
'variaveis_solvers_u.f90',
'variaveis_solvers_v.f90',
'mg_gs_u.f90',
'mg_gs_v.f90',
'subrotinas_mg_gs.f90',
"class_u.f90",
"class_v.f90",
"class_p.f90",
"variaveis_gerais.f90",
"navier_stokes_inout.f90",
"experimentos_numericos.f90",
"inicializacoes.f90",
"NavierStokes.f90"]
executavel = "main"
if "linux" in sys.platform:
compilador = "/usr/bin/gfortran"
os.system("clear")
else:
compilador = "gfortran"
os.system("cls")
compilacao = [compilador]
for f in fontes:
compilacao.append(f)
compilacao.append("-o")
compilacao.append(executavel)
# Openmp flag
compilacao.append("-fopenmp")
# 132 caracteres flag
compilacao.append("-ffree-line-length-none")
if "win" in sys.platform:
executavel = executavel + ".exe"
if os.path.exists(executavel):
os.remove(executavel)
p = subprocess.Popen(compilacao)
p.wait()
# Removendo os temporarios
for f in os.listdir("./"):
if f.split(".")[-1] == "mod":
os.remove(f)
# Executando e enviando a saida para o log
if os.path.exists(executavel):
flag = ""
if "linux" in sys.platform:
flag = "./"
os.system(flag + "%s" % (executavel))
else:
print("\nExecutavel nao compilado\n")
| 22.22973 | 44 | 0.606687 | import os
import subprocess
import sys
fontes = ["funcoes_abstratas.f90",
"funcoes_alias.f90",
'matriz_A.f90',
'gs.f90',
'extrapolacoes_de_u_e_v.f90',
'fontes_subrotinas.f90',
'class_array_subrotinas.f90',
'residuo.f90',
'variaveis_solvers_p.f90',
'variaveis_solvers_u.f90',
'variaveis_solvers_v.f90',
'mg_gs_u.f90',
'mg_gs_v.f90',
'subrotinas_mg_gs.f90',
"class_u.f90",
"class_v.f90",
"class_p.f90",
"variaveis_gerais.f90",
"navier_stokes_inout.f90",
"experimentos_numericos.f90",
"inicializacoes.f90",
"NavierStokes.f90"]
executavel = "main"
if "linux" in sys.platform:
compilador = "/usr/bin/gfortran"
os.system("clear")
else:
compilador = "gfortran"
os.system("cls")
compilacao = [compilador]
for f in fontes:
compilacao.append(f)
compilacao.append("-o")
compilacao.append(executavel)
# Openmp flag
compilacao.append("-fopenmp")
# 132 caracteres flag
compilacao.append("-ffree-line-length-none")
if "win" in sys.platform:
executavel = executavel + ".exe"
if os.path.exists(executavel):
os.remove(executavel)
p = subprocess.Popen(compilacao)
p.wait()
# Removendo os temporarios
for f in os.listdir("./"):
if f.split(".")[-1] == "mod":
os.remove(f)
# Executando e enviando a saida para o log
if os.path.exists(executavel):
flag = ""
if "linux" in sys.platform:
flag = "./"
os.system(flag + "%s" % (executavel))
else:
print("\nExecutavel nao compilado\n")
| 0 | 0 | 0 |
486c6736e43246d360dbfa3421cf92c6a8d90770 | 8,057 | py | Python | s3splitmerge/tests/run.py | MacHu-GWU/s3splitmerge-project | 873892158f4a2d0ee20f291e5d3b2a80f0bae1ba | [
"MIT"
] | null | null | null | s3splitmerge/tests/run.py | MacHu-GWU/s3splitmerge-project | 873892158f4a2d0ee20f291e5d3b2a80f0bae1ba | [
"MIT"
] | null | null | null | s3splitmerge/tests/run.py | MacHu-GWU/s3splitmerge-project | 873892158f4a2d0ee20f291e5d3b2a80f0bae1ba | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import typing
import awswrangler as wr
from .data import (
create_s3_csv_file,
create_s3_json_file,
create_many_parquet_file,
create_many_json_file,
)
from ..merge import (
merge_parquet_by_prefix,
merge_json_by_prefix,
)
from ..helpers import (
is_s3_object_exists,
)
def run_test_split_csv(
boto_ses,
n_k_rows: int,
header: bool,
source_bucket: str,
source_key: str,
target_bucket: str,
target_key: str,
target_size_or_rows: int,
split_csv_func: typing.Callable,
force_redo: bool,
):
"""
A parameterized split_csv_... function unit test executor.
"""
s3_client = boto_ses.client("s3")
# Create single source csv file if not exists
if (force_redo) or (not is_s3_object_exists(s3_client, source_bucket, source_key)):
create_s3_csv_file(
boto_ses=boto_ses,
n_k_rows=n_k_rows,
header=header,
bucket=source_bucket,
key=source_key,
)
# If first target file dosn't exist, execute split csv
first_target_key = target_key.format(i=1)
if (force_redo) or (not is_s3_object_exists(s3_client, target_bucket, first_target_key)):
split_csv_func(
s3_client,
source_bucket,
source_key,
target_bucket,
target_key,
target_size_or_rows,
header,
)
# Verify small target csv files
common_target_key_prefix = target_key.replace("{i}.csv", "")
response = s3_client.list_objects(Bucket=target_bucket, Prefix=common_target_key_prefix)
n_rows_total = 0
previous_last_id = None
if header:
read_csv_additional_kwargs = {}
else:
read_csv_additional_kwargs = {"header": None}
for nth_file, obj_meta in enumerate(response["Contents"]):
nth_file += 1
key = obj_meta["Key"]
df = wr.s3.read_csv(
path=f"s3://{target_bucket}/{key}",
boto3_session=boto_ses,
**read_csv_additional_kwargs
)
n_rows = df.shape[0]
if header:
first_id = df["id"].head(1).tolist()[0]
last_id = df["id"].tail(1).tolist()[0]
else:
first_id = df[df.columns[0]].head(1).tolist()[0]
last_id = df[df.columns[0]].tail(1).tolist()[0]
n_rows_total += n_rows
if nth_file != 1:
assert previous_last_id + 1 == first_id
previous_last_id = last_id
assert n_rows_total == n_k_rows * 1000
def run_test_split_json(
boto_ses,
n_k_rows: int,
source_bucket: str,
source_key: str,
target_bucket: str,
target_key: str,
target_size_or_rows: int,
split_json_func: typing.Callable,
force_redo: bool,
):
"""
A parameterized split_json_... function unit test executor.
"""
s3_client = boto_ses.client("s3")
# Create single source csv file if not exists
if (force_redo) or (not is_s3_object_exists(s3_client, source_bucket, source_key)):
create_s3_json_file(
boto_ses=boto_ses,
n_k_rows=n_k_rows,
bucket=source_bucket,
key=source_key,
)
# If first target not exists, execute split csv
first_target_key = target_key.format(i=1)
if (force_redo) or (not is_s3_object_exists(s3_client, target_bucket, first_target_key)):
split_json_func(
s3_client,
source_bucket,
source_key,
target_bucket,
target_key,
target_size_or_rows,
)
# Verify small target json files
common_target_key_prefix = target_key.replace("{i}.json", "")
response = s3_client.list_objects(Bucket=target_bucket, Prefix=common_target_key_prefix)
n_rows_total = 0
previous_last_id = None
for nth_file, obj_meta in enumerate(response["Contents"]):
nth_file += 1
key = obj_meta["Key"]
df = wr.s3.read_json(
path=f"s3://{target_bucket}/{key}",
orient="records",
lines=True,
)
n_rows = df.shape[0]
first_id = df["id"].head(1).tolist()[0]
last_id = df["id"].tail(1).tolist()[0]
n_rows_total += n_rows
if nth_file != 1:
assert previous_last_id + 1 == first_id
previous_last_id = last_id
assert n_rows_total == n_k_rows * 1000
| 31.84585 | 93 | 0.601092 | # -*- coding: utf-8 -*-
import typing
import awswrangler as wr
from .data import (
create_s3_csv_file,
create_s3_json_file,
create_many_parquet_file,
create_many_json_file,
)
from ..merge import (
merge_parquet_by_prefix,
merge_json_by_prefix,
)
from ..helpers import (
is_s3_object_exists,
)
def run_test_split_csv(
boto_ses,
n_k_rows: int,
header: bool,
source_bucket: str,
source_key: str,
target_bucket: str,
target_key: str,
target_size_or_rows: int,
split_csv_func: typing.Callable,
force_redo: bool,
):
"""
A parameterized split_csv_... function unit test executor.
"""
s3_client = boto_ses.client("s3")
# Create single source csv file if not exists
if (force_redo) or (not is_s3_object_exists(s3_client, source_bucket, source_key)):
create_s3_csv_file(
boto_ses=boto_ses,
n_k_rows=n_k_rows,
header=header,
bucket=source_bucket,
key=source_key,
)
# If first target file dosn't exist, execute split csv
first_target_key = target_key.format(i=1)
if (force_redo) or (not is_s3_object_exists(s3_client, target_bucket, first_target_key)):
split_csv_func(
s3_client,
source_bucket,
source_key,
target_bucket,
target_key,
target_size_or_rows,
header,
)
# Verify small target csv files
common_target_key_prefix = target_key.replace("{i}.csv", "")
response = s3_client.list_objects(Bucket=target_bucket, Prefix=common_target_key_prefix)
n_rows_total = 0
previous_last_id = None
if header:
read_csv_additional_kwargs = {}
else:
read_csv_additional_kwargs = {"header": None}
for nth_file, obj_meta in enumerate(response["Contents"]):
nth_file += 1
key = obj_meta["Key"]
df = wr.s3.read_csv(
path=f"s3://{target_bucket}/{key}",
boto3_session=boto_ses,
**read_csv_additional_kwargs
)
n_rows = df.shape[0]
if header:
first_id = df["id"].head(1).tolist()[0]
last_id = df["id"].tail(1).tolist()[0]
else:
first_id = df[df.columns[0]].head(1).tolist()[0]
last_id = df[df.columns[0]].tail(1).tolist()[0]
n_rows_total += n_rows
if nth_file != 1:
assert previous_last_id + 1 == first_id
previous_last_id = last_id
assert n_rows_total == n_k_rows * 1000
def run_test_split_json(
boto_ses,
n_k_rows: int,
source_bucket: str,
source_key: str,
target_bucket: str,
target_key: str,
target_size_or_rows: int,
split_json_func: typing.Callable,
force_redo: bool,
):
"""
A parameterized split_json_... function unit test executor.
"""
s3_client = boto_ses.client("s3")
# Create single source csv file if not exists
if (force_redo) or (not is_s3_object_exists(s3_client, source_bucket, source_key)):
create_s3_json_file(
boto_ses=boto_ses,
n_k_rows=n_k_rows,
bucket=source_bucket,
key=source_key,
)
# If first target not exists, execute split csv
first_target_key = target_key.format(i=1)
if (force_redo) or (not is_s3_object_exists(s3_client, target_bucket, first_target_key)):
split_json_func(
s3_client,
source_bucket,
source_key,
target_bucket,
target_key,
target_size_or_rows,
)
# Verify small target json files
common_target_key_prefix = target_key.replace("{i}.json", "")
response = s3_client.list_objects(Bucket=target_bucket, Prefix=common_target_key_prefix)
n_rows_total = 0
previous_last_id = None
for nth_file, obj_meta in enumerate(response["Contents"]):
nth_file += 1
key = obj_meta["Key"]
df = wr.s3.read_json(
path=f"s3://{target_bucket}/{key}",
orient="records",
lines=True,
)
n_rows = df.shape[0]
first_id = df["id"].head(1).tolist()[0]
last_id = df["id"].tail(1).tolist()[0]
n_rows_total += n_rows
if nth_file != 1:
assert previous_last_id + 1 == first_id
previous_last_id = last_id
assert n_rows_total == n_k_rows * 1000
def run_test_merge_parquet(boto_ses,
n_files: int,
n_rows_per_file: int,
source_bucket: str,
source_key: str,
target_bucket: str,
target_key: str,
target_size: int,
force_redo: bool):
s3_client = boto_ses.client("s3")
# Create many parquet test dummy data
create_many_parquet_file(
boto_ses,
n_files=n_files,
n_rows_per_file=n_rows_per_file,
bucket=source_bucket,
key=source_key,
overwrite=True,
)
# Merge files
common_source_key_prefix = source_key.replace("{i}.parquet", "")
merge_parquet_by_prefix(
boto3_session=boto_ses,
source_bucket=source_bucket,
source_key_prefix=common_source_key_prefix,
target_bucket=target_bucket,
target_key=target_key,
target_size=target_size,
)
# Verify merged parquet files
common_target_key_prefix = target_key.replace("{i}.parquet", "")
response = s3_client.list_objects(Bucket=target_bucket, Prefix=common_target_key_prefix)
n_rows_total = 0
previous_last_id = None
for nth_file, obj_meta in enumerate(response["Contents"]):
nth_file += 1
key = obj_meta["Key"]
df = wr.s3.read_parquet(path=f"s3://{target_bucket}/{key}", boto3_session=boto_ses)
n_rows = df.shape[0]
first_id = df["id"].head(1).tolist()[0]
last_id = df["id"].tail(1).tolist()[0]
n_rows_total += n_rows
if nth_file != 1:
assert previous_last_id + 1 == first_id
previous_last_id = last_id
assert n_rows_total == n_files * n_rows_per_file
def run_test_merge_json(boto_ses,
n_files: int,
n_rows_per_file: int,
source_bucket: str,
source_key: str,
target_bucket: str,
target_key: str,
target_size: int,
force_redo: bool):
s3_client = boto_ses.client("s3")
# Create many parquet test dummy data
create_many_json_file(
boto_ses,
n_files=n_files,
n_rows_per_file=n_rows_per_file,
bucket=source_bucket,
key=source_key,
overwrite=True,
)
# Merge files
common_source_key_prefix = source_key.replace("{i}.json", "")
merge_json_by_prefix(
s3_client=s3_client,
source_bucket=source_bucket,
source_key_prefix=common_source_key_prefix,
target_bucket=target_bucket,
target_key=target_key,
target_size=target_size,
)
# Verify merged parquet files
common_target_key_prefix = target_key.replace("{i}.json", "")
response = s3_client.list_objects(Bucket=target_bucket, Prefix=common_target_key_prefix)
n_rows_total = 0
previous_last_id = None
for nth_file, obj_meta in enumerate(response["Contents"]):
nth_file += 1
key = obj_meta["Key"]
df = wr.s3.read_json(
path=f"s3://{target_bucket}/{key}",
orient="records",
lines=True,
boto3_session=boto_ses,
)
n_rows = df.shape[0]
first_id = df["id"].head(1).tolist()[0]
last_id = df["id"].tail(1).tolist()[0]
n_rows_total += n_rows
if nth_file != 1:
assert previous_last_id + 1 == first_id
previous_last_id = last_id
assert n_rows_total == n_files * n_rows_per_file
| 3,594 | 0 | 46 |
1a0d56e351e57ca087fab587b737387fabc26c12 | 12,731 | py | Python | pymilvus_orm/utility.py | scipe/pymilvus-orm | cdcf5b7c99430d9c7e6da16556964323cdb58e80 | [
"Apache-2.0"
] | null | null | null | pymilvus_orm/utility.py | scipe/pymilvus-orm | cdcf5b7c99430d9c7e6da16556964323cdb58e80 | [
"Apache-2.0"
] | null | null | null | pymilvus_orm/utility.py | scipe/pymilvus-orm | cdcf5b7c99430d9c7e6da16556964323cdb58e80 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2019-2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from .connections import get_connection
from .exceptions import ConnectionNotExistException, ExceptionsMessage
def loading_progress(collection_name, partition_names=None, using="default"):
"""
Show #loaded entities vs #total entities.
:param collection_name: The name of collection is loading
:type collection_name: str
:param partition_names: The names of partitions is loading
:type partition_names: str list
:return dict:
Loading progress is a dict contains num of loaded entities and num of total entities.
{'num_loaded_entities':loaded_segments_nums, 'num_total_entities': total_segments_nums}
:raises PartitionNotExistException: If partition doesn't exist.
:example:
>>> from pymilvus_orm import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_vector], description="get collection entities num")
>>> collection = Collection(name="test_collection", schema=schema)
>>> import pandas as pd
>>> int64_series = pd.Series(data=list(range(10, 20)), index=list(range(10)))i
>>> float_vector_series = [[random.random() for _ in range _DIM] for _ in range (10)]
>>> data = pd.DataFrame({"int64" : int64_series, "float_vector": float_vector_series})
>>> collection.insert(data)
>>> collection.load() # load collection to memory
>>> utility.loading_progress("test_collection")
"""
if not partition_names or len(partition_names) == 0:
return _get_connection(using).load_collection_progress(collection_name)
return _get_connection(using).load_partitions_progress(collection_name, partition_names)
def wait_for_loading_complete(collection_name, partition_names=None, timeout=None, using="default"):
"""
Block until loading is done or Raise Exception after timeout.
:param collection_name: The name of collection to wait for loading complete
:type collection_name: str
:param partition_names: The names of partitions to wait for loading complete
:type partition_names: str list
:param timeout: The timeout for this method, unit: second
:type timeout: int
:raises CollectionNotExistException: If collection doesn't exist.
:raises PartitionNotExistException: If partition doesn't exist.
:example:
>>> from pymilvus_orm import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_float_vector], description="get collection entities num")
>>> collection = Collection(name="test_collection", schema=schema)
>>> import pandas as pd
>>> int64_series = pd.Series(data=list(range(10, 20)), index=list(range(10)))i
>>> float_vector_series = [[random.random() for _ in range _DIM] for _ in range (10)]
>>> data = pd.DataFrame({"int64" : int64_series, "float_vector": float_vector_series})
>>> collection.insert(data)
>>> collection.load() # load collection to memory
>>> utility.wait_for_loading_complete("test_collection")
"""
if not partition_names or len(partition_names) == 0:
return _get_connection(using).wait_for_loading_collection_complete(collection_name, timeout)
return _get_connection(using).wait_for_loading_partitions_complete(collection_name,
partition_names,
timeout)
def index_building_progress(collection_name, index_name="", using="default"):
"""
Show # indexed entities vs. # total entities.
:param collection_name: The name of collection is building index
:type collection_name: str
:param index_name: The name of index is building.
Default index_name is to be used if index_name is not specific.
:type index_name: str
:return dict:
Index building progress is a dict contains num of indexed entities and num of total
entities.
{'total_rows':total_rows,'indexed_rows':indexed_rows}
:raises CollectionNotExistException: If collection doesn't exist.
:raises IndexNotExistException: If index doesn't exist.
:example:
>>> from pymilvus_orm import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_float_vector], description="test")
>>> collection = Collection(name="test_collection", schema=schema)
>>> import random
>>> import numpy as np
>>> import pandas as pd
>>> vectors = [[random.random() for _ in range(_DIM)] for _ in range(5000)]
>>> int64_series = pd.Series(data=list(range(5000, 10000)), index=list(range(5000)))
>>> vectors = [[random.random() for _ in range(_DIM)] for _ in range (5000)]
>>> data = pd.DataFrame({"int64" : int64_series, "float_vector": vectors})
>>> collection.insert(data)
>>> collection.load() # load collection to memory
>>> index_param = {
>>> "metric_type": "L2",
>>> "index_type": "IVF_FLAT",
>>> "params": {"nlist": 1024}
>>> }
>>> collection.create_index("float_vector", index_param)
>>> utility.index_building_progress("test_collection", "")
>>> utility.loading_progress("test_collection")
"""
return _get_connection(using).get_index_build_progress(collection_name, index_name)
def wait_for_index_building_complete(collection_name, index_name="", timeout=None, using="default"):
"""
Block until building is done or Raise Exception after timeout.
:param collection_name: The name of collection to wait
:type collection_name: str
:param index_name: The name of index to wait
:type index_name: str
:param timeout: The timeout for this method, unit: second
:type timeout: int
:raises CollectionNotExistException: If collection doesn't exist.
:raises IndexNotExistException: If index doesn't exist.
:example:
>>> from pymilvus_orm import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_float_vector], description="test")
>>> collection = Collection(name="test_collection", schema=schema)
>>> import random
>>> import numpy as np
>>> import pandas as pd
>>> vectors = [[random.random() for _ in range(_DIM)] for _ in range(5000)]
>>> int64_series = pd.Series(data=list(range(5000, 10000)), index=list(range(5000)))
>>> vectors = [[random.random() for _ in range(_DIM)] for _ in range (5000)]
>>> data = pd.DataFrame({"int64" : int64_series, "float_vector": vectors})
>>> collection.insert(data)
>>> collection.load() # load collection to memory
>>> index_param = {
>>> "metric_type": "L2",
>>> "index_type": "IVF_FLAT",
>>> "params": {"nlist": 1024}
>>> }
>>> collection.create_index("float_vector", index_param)
>>> utility.index_building_progress("test_collection", "")
>>> utility.loading_progress("test_collection")
"""
return _get_connection(using).wait_for_creating_index(collection_name, index_name, timeout)
def has_collection(collection_name, using="default"):
"""
Checks whether a specified collection exists.
:param collection_name: The name of collection to check.
:type collection_name: str
:return bool:
Whether the collection exists.
:example:
>>> from pymilvus_orm import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_float_vector], description="test")
>>> collection = Collection(name="test_collection", schema=schema)
>>> utility.has_collection("test_collection")
"""
return _get_connection(using).has_collection(collection_name)
def has_partition(collection_name, partition_name, using="default"):
"""
Checks if a specified partition exists in a collection.
:param collection_name: The collection name of partition to check
:type collection_name: str
:param partition_name: The name of partition to check.
:type partition_name: str
:return bool:
Whether the partition exist.
:example:
>>> from pymilvus_orm import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_float_vector], description="test")
>>> collection = Collection(name="test_collection", schema=schema)
>>> utility.has_partition("_default")
"""
return _get_connection(using).has_partition(collection_name, partition_name)
def list_collections(timeout=None, using="default") -> list:
"""
Returns a list of all collection names.
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return list[str]:
List of collection names, return when operation is successful
:example:
>>> from pymilvus_orm import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_float_vector], description="test")
>>> collection = Collection(name="test_collection", schema=schema)
>>> utility.list_collections()
"""
return _get_connection(using).list_collections()
| 48.406844 | 139 | 0.684392 | # Copyright (C) 2019-2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from .connections import get_connection
from .exceptions import ConnectionNotExistException, ExceptionsMessage
def _get_connection(alias):
conn = get_connection(alias)
if conn is None:
raise ConnectionNotExistException(0, ExceptionsMessage.ConnectFirst)
return conn
def loading_progress(collection_name, partition_names=None, using="default"):
"""
Show #loaded entities vs #total entities.
:param collection_name: The name of collection is loading
:type collection_name: str
:param partition_names: The names of partitions is loading
:type partition_names: str list
:return dict:
Loading progress is a dict contains num of loaded entities and num of total entities.
{'num_loaded_entities':loaded_segments_nums, 'num_total_entities': total_segments_nums}
:raises PartitionNotExistException: If partition doesn't exist.
:example:
>>> from pymilvus_orm import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_vector], description="get collection entities num")
>>> collection = Collection(name="test_collection", schema=schema)
>>> import pandas as pd
>>> int64_series = pd.Series(data=list(range(10, 20)), index=list(range(10)))i
>>> float_vector_series = [[random.random() for _ in range _DIM] for _ in range (10)]
>>> data = pd.DataFrame({"int64" : int64_series, "float_vector": float_vector_series})
>>> collection.insert(data)
>>> collection.load() # load collection to memory
>>> utility.loading_progress("test_collection")
"""
if not partition_names or len(partition_names) == 0:
return _get_connection(using).load_collection_progress(collection_name)
return _get_connection(using).load_partitions_progress(collection_name, partition_names)
def wait_for_loading_complete(collection_name, partition_names=None, timeout=None, using="default"):
"""
Block until loading is done or Raise Exception after timeout.
:param collection_name: The name of collection to wait for loading complete
:type collection_name: str
:param partition_names: The names of partitions to wait for loading complete
:type partition_names: str list
:param timeout: The timeout for this method, unit: second
:type timeout: int
:raises CollectionNotExistException: If collection doesn't exist.
:raises PartitionNotExistException: If partition doesn't exist.
:example:
>>> from pymilvus_orm import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_float_vector], description="get collection entities num")
>>> collection = Collection(name="test_collection", schema=schema)
>>> import pandas as pd
>>> int64_series = pd.Series(data=list(range(10, 20)), index=list(range(10)))i
>>> float_vector_series = [[random.random() for _ in range _DIM] for _ in range (10)]
>>> data = pd.DataFrame({"int64" : int64_series, "float_vector": float_vector_series})
>>> collection.insert(data)
>>> collection.load() # load collection to memory
>>> utility.wait_for_loading_complete("test_collection")
"""
if not partition_names or len(partition_names) == 0:
return _get_connection(using).wait_for_loading_collection_complete(collection_name, timeout)
return _get_connection(using).wait_for_loading_partitions_complete(collection_name,
partition_names,
timeout)
def index_building_progress(collection_name, index_name="", using="default"):
"""
Show # indexed entities vs. # total entities.
:param collection_name: The name of collection is building index
:type collection_name: str
:param index_name: The name of index is building.
Default index_name is to be used if index_name is not specific.
:type index_name: str
:return dict:
Index building progress is a dict contains num of indexed entities and num of total
entities.
{'total_rows':total_rows,'indexed_rows':indexed_rows}
:raises CollectionNotExistException: If collection doesn't exist.
:raises IndexNotExistException: If index doesn't exist.
:example:
>>> from pymilvus_orm import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_float_vector], description="test")
>>> collection = Collection(name="test_collection", schema=schema)
>>> import random
>>> import numpy as np
>>> import pandas as pd
>>> vectors = [[random.random() for _ in range(_DIM)] for _ in range(5000)]
>>> int64_series = pd.Series(data=list(range(5000, 10000)), index=list(range(5000)))
>>> vectors = [[random.random() for _ in range(_DIM)] for _ in range (5000)]
>>> data = pd.DataFrame({"int64" : int64_series, "float_vector": vectors})
>>> collection.insert(data)
>>> collection.load() # load collection to memory
>>> index_param = {
>>> "metric_type": "L2",
>>> "index_type": "IVF_FLAT",
>>> "params": {"nlist": 1024}
>>> }
>>> collection.create_index("float_vector", index_param)
>>> utility.index_building_progress("test_collection", "")
>>> utility.loading_progress("test_collection")
"""
return _get_connection(using).get_index_build_progress(collection_name, index_name)
def wait_for_index_building_complete(collection_name, index_name="", timeout=None, using="default"):
"""
Block until building is done or Raise Exception after timeout.
:param collection_name: The name of collection to wait
:type collection_name: str
:param index_name: The name of index to wait
:type index_name: str
:param timeout: The timeout for this method, unit: second
:type timeout: int
:raises CollectionNotExistException: If collection doesn't exist.
:raises IndexNotExistException: If index doesn't exist.
:example:
>>> from pymilvus_orm import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_float_vector], description="test")
>>> collection = Collection(name="test_collection", schema=schema)
>>> import random
>>> import numpy as np
>>> import pandas as pd
>>> vectors = [[random.random() for _ in range(_DIM)] for _ in range(5000)]
>>> int64_series = pd.Series(data=list(range(5000, 10000)), index=list(range(5000)))
>>> vectors = [[random.random() for _ in range(_DIM)] for _ in range (5000)]
>>> data = pd.DataFrame({"int64" : int64_series, "float_vector": vectors})
>>> collection.insert(data)
>>> collection.load() # load collection to memory
>>> index_param = {
>>> "metric_type": "L2",
>>> "index_type": "IVF_FLAT",
>>> "params": {"nlist": 1024}
>>> }
>>> collection.create_index("float_vector", index_param)
>>> utility.index_building_progress("test_collection", "")
>>> utility.loading_progress("test_collection")
"""
return _get_connection(using).wait_for_creating_index(collection_name, index_name, timeout)
def has_collection(collection_name, using="default"):
"""
Checks whether a specified collection exists.
:param collection_name: The name of collection to check.
:type collection_name: str
:return bool:
Whether the collection exists.
:example:
>>> from pymilvus_orm import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_float_vector], description="test")
>>> collection = Collection(name="test_collection", schema=schema)
>>> utility.has_collection("test_collection")
"""
return _get_connection(using).has_collection(collection_name)
def has_partition(collection_name, partition_name, using="default"):
"""
Checks if a specified partition exists in a collection.
:param collection_name: The collection name of partition to check
:type collection_name: str
:param partition_name: The name of partition to check.
:type partition_name: str
:return bool:
Whether the partition exist.
:example:
>>> from pymilvus_orm import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_float_vector], description="test")
>>> collection = Collection(name="test_collection", schema=schema)
>>> utility.has_partition("_default")
"""
return _get_connection(using).has_partition(collection_name, partition_name)
def list_collections(timeout=None, using="default") -> list:
"""
Returns a list of all collection names.
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return list[str]:
List of collection names, return when operation is successful
:example:
>>> from pymilvus_orm import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_float_vector], description="test")
>>> collection = Collection(name="test_collection", schema=schema)
>>> utility.list_collections()
"""
return _get_connection(using).list_collections()
| 153 | 0 | 23 |
b656d4ba8b1282042b0f0afbfafd94533471b4d7 | 293 | py | Python | 04_devops/02_unittest/mock/file_util.py | bluehenry/python.best.practices | 99fde3557b0c423d3050e988e82a641ccd75b644 | [
"MIT"
] | null | null | null | 04_devops/02_unittest/mock/file_util.py | bluehenry/python.best.practices | 99fde3557b0c423d3050e988e82a641ccd75b644 | [
"MIT"
] | null | null | null | 04_devops/02_unittest/mock/file_util.py | bluehenry/python.best.practices | 99fde3557b0c423d3050e988e82a641ccd75b644 | [
"MIT"
] | null | null | null | import json
from collections import OrderedDict
from pandas.io.json import json_normalize
| 26.636364 | 73 | 0.795222 | import json
from collections import OrderedDict
from pandas.io.json import json_normalize
def get_data_frame_from_json_file(json_file_path):
with open(json_file_path) as f:
json_object = json.loads(f.read(), object_pairs_hook=OrderedDict)
return json_normalize(json_object)
| 178 | 0 | 23 |
a3d9228044efb6c05033803e028e1c86272deb18 | 3,595 | py | Python | pyocd/rtos/provider.py | majorlin/pyOCD | 62dbca36645a72152f0fb9049e5d46070f8b66b7 | [
"Apache-2.0"
] | 1 | 2022-02-13T13:47:49.000Z | 2022-02-13T13:47:49.000Z | pyocd/rtos/provider.py | majorlin/pyOCD | 62dbca36645a72152f0fb9049e5d46070f8b66b7 | [
"Apache-2.0"
] | null | null | null | pyocd/rtos/provider.py | majorlin/pyOCD | 62dbca36645a72152f0fb9049e5d46070f8b66b7 | [
"Apache-2.0"
] | null | null | null | # pyOCD debugger
# Copyright (c) 2016 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
LOG = logging.getLogger(__name__)
class TargetThread(object):
"""@brief Base class representing a thread on the target."""
@property
@property
@property
@property
@property
class ThreadProvider(object):
"""@brief Base class for RTOS support plugins."""
def init(self, symbolProvider):
"""@retval True The provider was successfully initialzed.
@retval False The provider could not be initialized successfully.
"""
raise NotImplementedError()
@property
@read_from_target.setter
@property
@property
def get_current_thread_id(self):
"""From GDB's point of view, where Handler Mode is a thread"""
raise NotImplementedError()
def get_actual_current_thread_id(self):
"""From OS's point of view, so the current OS thread even in Handler Mode"""
raise NotImplementedError()
| 28.991935 | 100 | 0.667038 | # pyOCD debugger
# Copyright (c) 2016 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
LOG = logging.getLogger(__name__)
class TargetThread(object):
"""@brief Base class representing a thread on the target."""
def __init__(self):
pass
@property
def unique_id(self):
raise NotImplementedError()
@property
def name(self):
raise NotImplementedError()
@property
def description(self):
raise NotImplementedError()
@property
def is_current(self):
raise NotImplementedError()
@property
def context(self):
raise NotImplementedError()
class ThreadProvider(object):
"""@brief Base class for RTOS support plugins."""
def __init__(self, target):
self._target = target
self._target_context = self._target.get_target_context()
self._last_run_token = -1
self._read_from_target = False
def _lookup_symbols(self, symbolList, symbolProvider):
syms = {}
for name in symbolList:
addr = symbolProvider.get_symbol_value(name)
LOG.debug("Value for symbol %s = %s", name, hex(addr) if addr is not None else "<none>")
if addr is None:
return None
syms[name] = addr
return syms
def init(self, symbolProvider):
"""@retval True The provider was successfully initialzed.
@retval False The provider could not be initialized successfully.
"""
raise NotImplementedError()
def _build_thread_list(self):
raise NotImplementedError()
def _is_thread_list_dirty(self):
token = self._target.run_token
if token == self._last_run_token:
# Target hasn't run since we last updated threads, so there is nothing to do.
return False
self._last_run_token = token
return True
def update_threads(self):
if self._is_thread_list_dirty() and self._read_from_target:
self._build_thread_list()
def get_threads(self):
raise NotImplementedError()
def get_thread(self, threadId):
raise NotImplementedError()
def invalidate(self):
raise NotImplementedError()
@property
def read_from_target(self):
return self._read_from_target
@read_from_target.setter
def read_from_target(self, value):
if value != self._read_from_target:
self.invalidate()
self._read_from_target = value
@property
def is_enabled(self):
raise NotImplementedError()
@property
def current_thread(self):
raise NotImplementedError()
def is_valid_thread_id(self, threadId):
raise NotImplementedError()
def get_current_thread_id(self):
"""From GDB's point of view, where Handler Mode is a thread"""
raise NotImplementedError()
def get_actual_current_thread_id(self):
"""From OS's point of view, so the current OS thread even in Handler Mode"""
raise NotImplementedError()
| 1,541 | 0 | 504 |
4aef6a340594c5e0b0940fd61bf81cef75f15b6f | 2,822 | py | Python | workflow/jena_functions.py | ChalkLab/SciFlow | 5bf021007d6184402ebfe6cefc2111d99160cb69 | [
"MIT"
] | null | null | null | workflow/jena_functions.py | ChalkLab/SciFlow | 5bf021007d6184402ebfe6cefc2111d99160cb69 | [
"MIT"
] | null | null | null | workflow/jena_functions.py | ChalkLab/SciFlow | 5bf021007d6184402ebfe6cefc2111d99160cb69 | [
"MIT"
] | null | null | null | import requests
import urllib3
from sciflow import localsettings
path = "http://jena1.unfcsd.unf.edu:3030/"
dset = "SciData"
hdrs = {'Content-Type': 'application/json'}
hdrsld = {'Content-Type': 'application/ld+json'}
fpath = localsettings.ppath + "/static/files/"
def server():
"""get the server info from the fuseki endpoint"""
endpoint = path + "$/server"
response = requests.get(endpoint, headers=hdrs, auth=(localsettings.fuser, localsettings.fpass)).json()
return response
def stats():
"""get the status of the SciData dataset from the fuseki endpoint"""
endpoint = path + "$/stats/" + dset
response = requests.get(endpoint, headers=hdrs, auth=(localsettings.fuser, localsettings.fpass)).json()
return response
def status():
"""get the stats of the SciData dataset from the fuseki endpoint"""
endpoint = path + "$/datasets/" + dset
response = requests.get(endpoint, headers=hdrs, auth=(localsettings.fuser, localsettings.fpass)).json()
return response['ds.state']
def addgraph(file):
""" add a file to Jena """
if "http" in file:
http = urllib3.PoolManager()
r = http.request('GET', file)
data = r.data
elif file[0] == "/":
""" assumes file is full local path """
with open(file) as fp:
data = fp.read()
else:
""" assumes file is in <prjroot>/static/files/ """
with open(fpath + file) as fp:
data = fp.read()
# create endpoint URL
endpoint = path + dset + "/data"
response = requests.post(endpoint, data=data, headers=hdrsld, auth=(localsettings.fuser, localsettings.fpass))
if response.status_code == 200:
return "success"
else:
return response.text
def query(sparql):
""" executes a SPARQL query """
endpoint = path + dset + "/sparql"
response = requests.post(endpoint, data={'query': sparql}, auth=(localsettings.fuser, localsettings.fpass))
return response.json()
def update(sparql):
""" executes a SPARQL query """
endpoint = path + dset + "/update"
response = requests.post(endpoint, data={'update': sparql}, auth=(localsettings.fuser, localsettings.fpass))
if response.status_code == 200:
return "success"
else:
return response.text
# special functions
def tcount():
""" count all triples in the dataset """
# across all named graphs
sparql = "SELECT (COUNT(?s) AS ?triples) WHERE { GRAPH ?g { ?s ?p ?o . }}"
out = query(sparql)
ncount = int(out['results']['bindings'][0]['triples']['value'])
# in default graph
sparql = "SELECT (COUNT(?s) AS ?triples) WHERE { ?s ?p ?o . }"
out = query(sparql)
dcount = int(out['results']['bindings'][0]['triples']['value'])
# all triples
acount = dcount + ncount
return acount
| 32.068182 | 114 | 0.634656 | import requests
import urllib3
from sciflow import localsettings
path = "http://jena1.unfcsd.unf.edu:3030/"
dset = "SciData"
hdrs = {'Content-Type': 'application/json'}
hdrsld = {'Content-Type': 'application/ld+json'}
fpath = localsettings.ppath + "/static/files/"
def server():
"""get the server info from the fuseki endpoint"""
endpoint = path + "$/server"
response = requests.get(endpoint, headers=hdrs, auth=(localsettings.fuser, localsettings.fpass)).json()
return response
def stats():
"""get the status of the SciData dataset from the fuseki endpoint"""
endpoint = path + "$/stats/" + dset
response = requests.get(endpoint, headers=hdrs, auth=(localsettings.fuser, localsettings.fpass)).json()
return response
def status():
"""get the stats of the SciData dataset from the fuseki endpoint"""
endpoint = path + "$/datasets/" + dset
response = requests.get(endpoint, headers=hdrs, auth=(localsettings.fuser, localsettings.fpass)).json()
return response['ds.state']
def addgraph(file):
""" add a file to Jena """
if "http" in file:
http = urllib3.PoolManager()
r = http.request('GET', file)
data = r.data
elif file[0] == "/":
""" assumes file is full local path """
with open(file) as fp:
data = fp.read()
else:
""" assumes file is in <prjroot>/static/files/ """
with open(fpath + file) as fp:
data = fp.read()
# create endpoint URL
endpoint = path + dset + "/data"
response = requests.post(endpoint, data=data, headers=hdrsld, auth=(localsettings.fuser, localsettings.fpass))
if response.status_code == 200:
return "success"
else:
return response.text
def query(sparql):
""" executes a SPARQL query """
endpoint = path + dset + "/sparql"
response = requests.post(endpoint, data={'query': sparql}, auth=(localsettings.fuser, localsettings.fpass))
return response.json()
def update(sparql):
""" executes a SPARQL query """
endpoint = path + dset + "/update"
response = requests.post(endpoint, data={'update': sparql}, auth=(localsettings.fuser, localsettings.fpass))
if response.status_code == 200:
return "success"
else:
return response.text
# special functions
def tcount():
""" count all triples in the dataset """
# across all named graphs
sparql = "SELECT (COUNT(?s) AS ?triples) WHERE { GRAPH ?g { ?s ?p ?o . }}"
out = query(sparql)
ncount = int(out['results']['bindings'][0]['triples']['value'])
# in default graph
sparql = "SELECT (COUNT(?s) AS ?triples) WHERE { ?s ?p ?o . }"
out = query(sparql)
dcount = int(out['results']['bindings'][0]['triples']['value'])
# all triples
acount = dcount + ncount
return acount
| 0 | 0 | 0 |
d16129aa31fa37f2f468978bdec0a57ead1ab1be | 678 | py | Python | add_categories.py | 1dot75cm/taobaobao | bce05319b57f6737a249945a75a99e09125734c3 | [
"MIT"
] | 14 | 2017-03-29T11:03:39.000Z | 2020-06-29T14:06:13.000Z | add_categories.py | 1dot75cm/taobaobao | bce05319b57f6737a249945a75a99e09125734c3 | [
"MIT"
] | 1 | 2017-12-19T09:50:30.000Z | 2017-12-19T09:50:30.000Z | add_categories.py | 1dot75cm/taobaobao | bce05319b57f6737a249945a75a99e09125734c3 | [
"MIT"
] | 4 | 2017-05-24T08:03:00.000Z | 2019-04-19T13:33:02.000Z | # -*- coding: utf-8 -*-
import pymongo
import json
if __name__ in '__main__':
main()
| 24.214286 | 65 | 0.575221 | # -*- coding: utf-8 -*-
import pymongo
import json
def main():
conn = pymongo.MongoClient('127.0.0.1', 27017)
db = conn['taobao']
goods_coll = db['goods']
cate_coll = db['categories']
print('Reading categories.')
cate = set()
for i in goods_coll.find():
cate.add(json.dumps(i['categories']))
for j in cate:
obj = json.loads(j)
catid = obj[-1]['catid']
#cate_coll.insert(dict(categories=obj))
cate_coll.update({'categories.catid': {'$regex': catid}},
dict(categories=obj), upsert=True)
print('Categories:', len(cate))
conn.close()
if __name__ in '__main__':
main()
| 565 | 0 | 23 |
76d56251ad43eae7317b80963b63d36463f6e5d2 | 5,977 | py | Python | populate_10_day_data.py | willBear/Insider_Monitor | 14eac71a9769ed95f7c8500fd4c6b9dfbf7ca88e | [
"MIT"
] | null | null | null | populate_10_day_data.py | willBear/Insider_Monitor | 14eac71a9769ed95f7c8500fd4c6b9dfbf7ca88e | [
"MIT"
] | null | null | null | populate_10_day_data.py | willBear/Insider_Monitor | 14eac71a9769ed95f7c8500fd4c6b9dfbf7ca88e | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
from datetime import datetime
import time
import requests
from init_database_postgre import load_db_credential_info
from real_time_web_scraper import update_insider_trades, write_to_csv
insider_trades = []
trading_activity = {'B': 'Buy', 'S': 'Sell', 'O': 'Options Excersise'}
def parse_row_info(trades, trade_type):
"""
:param trades:
Contains usually 7 indexes, which are:
Ticker, Company Information, Person Filling & Position, Buy/Sell or Options Excersize, Share and Price,
Value, Trade Date & Time
:return:
"""
# Find the time now, in UTC time
now = datetime.utcnow()
# Check to see if it contains symbol and company info, otherwise use previous
if len(trades[-1]) == 0:
return
# If it contains content, that means we have a new equity / company
if len(trades[0]) > 1:
symbol = trades[0]
company = trades[1].split(' ')
company = company[0]
# Otherwise, we use the latest entry for company and symbol
else:
last_trade = insider_trades[-1]
symbol = last_trade[0]
company = last_trade[1]
# If we detect a '(' in the name, then we can parse out the position of the insider
if '(' in trades[2]:
# insider, insider_position = trades[2].split("(")
info = trades[2].split("(")
if len(info) > 2:
insider = info[0:-2]
insider_position = info[-1]
insider = insider[0].strip()
else:
insider, insider_position = trades[2].split("(")
else:
insider = trades[2]
insider_position = ''
insider = insider.strip()
insider_position = insider_position[:-1]
# Assign values to index 3 to 5 of the trades array
trade_shares, trade_price, trade_value = trades[3:6]
# Convert all values to float
trade_value = float(trade_value.replace(",", ""))
trade_shares = float(trade_shares.replace(",", ""))
trade_price = float(trade_price.replace(",", ""))
trade_date = datetime.strptime(trades[6], '%Y-%m-%d')
insider_trades.append(
[symbol, company, insider, insider_position, trade_type, trade_shares, trade_price, trade_value, trade_date,
now])
return
def find_pages_of_trades(soup_body):
"""
This function is used to determine the number of pages given from the bs4 search, it will then store all URLs
of the subsequent links of the report.
:param soup_body: Text body from BS4 that contains linkp, it will contain hrefs to all other pages of this day
:return: A list of href urls for later concatenation and length of pages
"""
length = 0
url_dict = []
for row in soup_body:
# Find all rows
urls = row.find_all('a', href=True)
for row in urls:
next_page_url = row['href']
# Check for redundancy
if next_page_url in url_dict:
pass
else:
# If not in the dictionary, then it is a unique link
url_dict.append(next_page_url)
length += 1
return url_dict, length
if __name__ == "__main__":
main()
# 'https://www.insider-monitor.com/insiderbuy.php?days=1'
| 34.75 | 116 | 0.628074 | from bs4 import BeautifulSoup
from datetime import datetime
import time
import requests
from init_database_postgre import load_db_credential_info
from real_time_web_scraper import update_insider_trades, write_to_csv
insider_trades = []
trading_activity = {'B': 'Buy', 'S': 'Sell', 'O': 'Options Excersise'}
def parse_row_info(trades, trade_type):
"""
:param trades:
Contains usually 7 indexes, which are:
Ticker, Company Information, Person Filling & Position, Buy/Sell or Options Excersize, Share and Price,
Value, Trade Date & Time
:return:
"""
# Find the time now, in UTC time
now = datetime.utcnow()
# Check to see if it contains symbol and company info, otherwise use previous
if len(trades[-1]) == 0:
return
# If it contains content, that means we have a new equity / company
if len(trades[0]) > 1:
symbol = trades[0]
company = trades[1].split(' ')
company = company[0]
# Otherwise, we use the latest entry for company and symbol
else:
last_trade = insider_trades[-1]
symbol = last_trade[0]
company = last_trade[1]
# If we detect a '(' in the name, then we can parse out the position of the insider
if '(' in trades[2]:
# insider, insider_position = trades[2].split("(")
info = trades[2].split("(")
if len(info) > 2:
insider = info[0:-2]
insider_position = info[-1]
insider = insider[0].strip()
else:
insider, insider_position = trades[2].split("(")
else:
insider = trades[2]
insider_position = ''
insider = insider.strip()
insider_position = insider_position[:-1]
# Assign values to index 3 to 5 of the trades array
trade_shares, trade_price, trade_value = trades[3:6]
# Convert all values to float
trade_value = float(trade_value.replace(",", ""))
trade_shares = float(trade_shares.replace(",", ""))
trade_price = float(trade_price.replace(",", ""))
trade_date = datetime.strptime(trades[6], '%Y-%m-%d')
insider_trades.append(
[symbol, company, insider, insider_position, trade_type, trade_shares, trade_price, trade_value, trade_date,
now])
return
def find_pages_of_trades(soup_body):
"""
This function is used to determine the number of pages given from the bs4 search, it will then store all URLs
of the subsequent links of the report.
:param soup_body: Text body from BS4 that contains linkp, it will contain hrefs to all other pages of this day
:return: A list of href urls for later concatenation and length of pages
"""
length = 0
url_dict = []
for row in soup_body:
# Find all rows
urls = row.find_all('a', href=True)
for row in urls:
next_page_url = row['href']
# Check for redundancy
if next_page_url in url_dict:
pass
else:
# If not in the dictionary, then it is a unique link
url_dict.append(next_page_url)
length += 1
return url_dict, length
def main():
base_buy_url = 'https://www.insider-monitor.com/insiderbuy.php?days='
base_report_url = 'https://www.insider-monitor.com/reports/'
index = 1
while index <= 10:
# We navigate to the first day of insider buys
url = base_buy_url + str(index)
# Request to retrieve the first page
response = requests.get(url)
# Parse the text using bs4
soup = BeautifulSoup(response.text, features='html.parser')
# Retrieve the next page urls and length of pages in a particular day
page_urls, total_pages = find_pages_of_trades(soup.find_all("p", {"class": "linkp"}))
# Now we parse the current page of the report
current_page = 1
# Instantiate table body of the first page
table_body = soup.find_all('tr')[1:]
# While loop to traverse through number of pages
while current_page <= total_pages:
# Parse each row in table body
for row in table_body:
# Find all table entries
trade = row.find_all('td')
# Go through each row in table and strip the text
row_info = [x.text.strip() for x in trade]
# Parse the info from another python file
parse_row_info(row_info, 'Buy')
current_page += 1
# Concatenate next url, if we do not see any additional URLS it means we are at the end of the pages
if len(page_urls) == 0:
break
else:
# Concactenate for our next url redirect
next_page_url = base_report_url + page_urls[0]
# Get rid of the next url in the list
page_urls.pop(0)
# Request for another page on the same day
response = requests.get(next_page_url)
soup = BeautifulSoup(response.text, features='html.parser')
table_body = soup.find_all('tr')[1:]
index += 1
'''
Now that we have processed the past 10 days worth of trade, we will insert it
into the dictionary
'''
# name of our database credential files (.txt)
db_credential_info = "database_info.txt"
# create a path version of our text file
db_credential_info_p = '/' + db_credential_info
# create our instance variables for host, username, password and database name
db_host, db_user, db_password, db_name = load_db_credential_info(db_credential_info_p)
# Call update insider trades to have it inserted into the dictionary
update_insider_trades(db_host, db_user, db_password, db_name, insider_trades)
# Write to CSV file for all the entries
write_to_csv(insider_trades)
if __name__ == "__main__":
main()
# 'https://www.insider-monitor.com/insiderbuy.php?days=1'
| 2,710 | 0 | 23 |
b0e8a8b9b6e76ec472f1b2d6bee4fe9b13ed9f73 | 1,989 | py | Python | pmaf/__init__.py | mmtechslv/PhyloMAF | bab43dd4a4d2812951b1fdf4f1abb83edb79ea88 | [
"BSD-3-Clause"
] | 1 | 2021-07-02T06:24:17.000Z | 2021-07-02T06:24:17.000Z | pmaf/__init__.py | mmtechslv/PhyloMAF | bab43dd4a4d2812951b1fdf4f1abb83edb79ea88 | [
"BSD-3-Clause"
] | 1 | 2021-06-28T12:02:46.000Z | 2021-06-28T12:02:46.000Z | pmaf/__init__.py | mmtechslv/PhyloMAF | bab43dd4a4d2812951b1fdf4f1abb83edb79ea88 | [
"BSD-3-Clause"
] | null | null | null | r"""
PhyloMAF (:mod:`pmaf`)
=======================
.. rubric:: Phylogenetic Microbiome Analysis Framework
.. currentmodule:: pmaf
PhyloMAF is a novel comprehensive microbiome data analysis tool based
on Python programming language. With memory efficient and extensible design, PhyloMAF
have wide range of applications including but not limited to: post OTU picking
microbiome data analysis, microbiome data meta-analysis, taxonomy based reference
phylogenetic tree pruning and reconstruction, cross database data validation,
primer design by taxonomic ranks, heterogeneous data retrieval from different
databases including remote mega-databases like NCBI or Ensembl.
.. rubric:: Currently available packages and modules
.. toctree::
:maxdepth: 1
pmaf.alignment
pmaf.biome
pmaf.database
pmaf.internal
pmaf.phylo
pmaf.pipe
pmaf.remote
pmaf.sequence
"""
import warnings as __warnings_
import tables as __tables_
import sys as __sys_
import os as __os_
if __sys_.platform == "win32":
__sep_ = ";"
else:
__sep_ = ":"
__os_.environ["PATH"] += __sep_ + __sys_.prefix
__os_.environ["PATH"] += __sep_ + __sys_.prefix + "/bin"
__warnings_.simplefilter("ignore", category=DeprecationWarning)
__warnings_.simplefilter("ignore", category=SyntaxWarning)
__warnings_.simplefilter("ignore", category=PendingDeprecationWarning)
__warnings_.filterwarnings(
action="ignore", category=__tables_.NaturalNameWarning, module="tables"
)
__warnings_.filterwarnings(
action="ignore", category=__tables_.PerformanceWarning, module="tables"
)
import pydevd_pycharm
try:
pydevd_pycharm.settrace('localhost', port=5555, stdoutToServer=True, stderrToServer=True)
except ConnectionRefusedError:
pass
except:
raise
from . import database
from . import biome
from . import alignment
from . import phylo
from . import pipe
from . import remote
from . import sequence
__all__ = ["database", "biome", "alignment", "phylo", "pipe", "remote", "sequence"]
| 26.52 | 93 | 0.759175 | r"""
PhyloMAF (:mod:`pmaf`)
=======================
.. rubric:: Phylogenetic Microbiome Analysis Framework
.. currentmodule:: pmaf
PhyloMAF is a novel comprehensive microbiome data analysis tool based
on Python programming language. With memory efficient and extensible design, PhyloMAF
have wide range of applications including but not limited to: post OTU picking
microbiome data analysis, microbiome data meta-analysis, taxonomy based reference
phylogenetic tree pruning and reconstruction, cross database data validation,
primer design by taxonomic ranks, heterogeneous data retrieval from different
databases including remote mega-databases like NCBI or Ensembl.
.. rubric:: Currently available packages and modules
.. toctree::
:maxdepth: 1
pmaf.alignment
pmaf.biome
pmaf.database
pmaf.internal
pmaf.phylo
pmaf.pipe
pmaf.remote
pmaf.sequence
"""
import warnings as __warnings_
import tables as __tables_
import sys as __sys_
import os as __os_
if __sys_.platform == "win32":
__sep_ = ";"
else:
__sep_ = ":"
__os_.environ["PATH"] += __sep_ + __sys_.prefix
__os_.environ["PATH"] += __sep_ + __sys_.prefix + "/bin"
__warnings_.simplefilter("ignore", category=DeprecationWarning)
__warnings_.simplefilter("ignore", category=SyntaxWarning)
__warnings_.simplefilter("ignore", category=PendingDeprecationWarning)
__warnings_.filterwarnings(
action="ignore", category=__tables_.NaturalNameWarning, module="tables"
)
__warnings_.filterwarnings(
action="ignore", category=__tables_.PerformanceWarning, module="tables"
)
import pydevd_pycharm
try:
pydevd_pycharm.settrace('localhost', port=5555, stdoutToServer=True, stderrToServer=True)
except ConnectionRefusedError:
pass
except:
raise
from . import database
from . import biome
from . import alignment
from . import phylo
from . import pipe
from . import remote
from . import sequence
__all__ = ["database", "biome", "alignment", "phylo", "pipe", "remote", "sequence"]
| 0 | 0 | 0 |
c64732ce1acdb5bd8b363d200430396748a5ed2c | 988 | py | Python | app/megatron/tests/slack_connection_test.py | team-labs/megatron | 24376a0173d654ef5bddb5c6ea3e763697a929bf | [
"MIT"
] | 4 | 2019-03-21T12:22:07.000Z | 2019-07-08T09:36:41.000Z | app/megatron/tests/slack_connection_test.py | team-labs/megatron | 24376a0173d654ef5bddb5c6ea3e763697a929bf | [
"MIT"
] | 17 | 2019-03-29T14:40:40.000Z | 2022-01-05T12:46:05.000Z | app/megatron/tests/slack_connection_test.py | team-labs/megatron | 24376a0173d654ef5bddb5c6ea3e763697a929bf | [
"MIT"
] | 3 | 2019-04-09T16:45:57.000Z | 2020-01-26T18:19:27.000Z | import pytest
from unittest.mock import MagicMock
from megatron.connections import slack
from megatron.tests.factories import factories
pytestmark = pytest.mark.django_db
@pytest.mark.django_db
| 32.933333 | 66 | 0.803644 | import pytest
from unittest.mock import MagicMock
from megatron.connections import slack
from megatron.tests.factories import factories
pytestmark = pytest.mark.django_db
def test_get_user_info():
workspace = factories.CustomerWorkspaceFactory()
connection = slack.SlackConnection(workspace.connection_token)
irrelevant_user_id = "U12345"
connection._refresh_access_token = MagicMock()
connection._get_user_info(irrelevant_user_id)
connection._refresh_access_token.assert_called_once()
@pytest.mark.django_db
def test_customer_workspace_refresh(fake_app_response):
workspace = factories.CustomerWorkspaceFactory()
connection = slack.SlackConnection(workspace.connection_token)
irrelevant_user_id = "U12345"
connection._refresh_access_token(irrelevant_user_id)
workspace.refresh_from_db()
assert workspace.name == "BORKBORK"
assert workspace.domain == "BORKBORKBORK"
assert workspace.connection_token == "BORK_BORK_BORK_BORK"
| 744 | 0 | 45 |
d07e9124fa7e0398e4d8d828555166b929492a62 | 3,878 | py | Python | src/demos/python/irrlicht/demo_IRR_visualize_collision.py | lucasw/chrono | e79d8c761c718ecb4c796725cff37026f357da8c | [
"BSD-3-Clause"
] | null | null | null | src/demos/python/irrlicht/demo_IRR_visualize_collision.py | lucasw/chrono | e79d8c761c718ecb4c796725cff37026f357da8c | [
"BSD-3-Clause"
] | null | null | null | src/demos/python/irrlicht/demo_IRR_visualize_collision.py | lucasw/chrono | e79d8c761c718ecb4c796725cff37026f357da8c | [
"BSD-3-Clause"
] | null | null | null | # =============================================================================
# PROJECT CHRONO - http://projectchrono.org
#
# Copyright (c) 2022 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http://projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: Radu Serban
# =============================================================================
#
# Test the collision drawer callback. This only tests that the callback can be
# used; there is no actual visualization of the collision shapes but simply
# printing out of the end points of the lines that would be used to render them.
# To visualize the collision shapes, one can use the same feature implemented in
# the underlying Irrlicht visualization (hit the 'i' key and select the check
# box 'Draw Collsion Shapes').
# The global reference frame has Y up.
#
# =============================================================================
import pychrono.core as chrono
import pychrono.irrlicht as chronoirr
# -----------------------------------------------------------------------------
# Callback class for collision shape visualization
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# The path to the Chrono data directory containing various assets (meshes, textures, data files)
# is automatically set, relative to the default location of this demo.
# If running from a different directory, you must change the path to the data directory with:
#chrono.SetChronoDataPath('relative/path/to/data/directory/')
print( "Copyright (c) 2022 projectchrono.org")
# Create sys, contact material, and bodies
sys = chrono.ChSystemNSC()
mat = chrono.ChMaterialSurfaceNSC()
ground = chrono.ChBodyEasyBox(10, 3, 10, 100, True, True, mat)
ground.SetBodyFixed(True);
ground.SetPos(chrono.ChVectorD(0, 0, 0))
sys.AddBody(ground)
cyl = chrono.ChBodyEasyCylinder(0.5, 1.0, 100, True, True, mat)
cyl.SetPos(chrono.ChVectorD(0, 3, 0))
sys.AddBody(cyl)
box = chrono.ChBodyEasyBox(0.5, 0.5, 0.5, 100, True, True, mat)
box.SetPos(chrono.ChVectorD(0.2, 2, 0))
sys.AddBody(box)
sphere = chrono.ChBodyEasySphere(0.25, 100.0, True, True, mat)
sphere.SetPos(chrono.ChVectorD(-0.2, 2, 0.75))
sys.AddBody(sphere)
ellipse = chrono.ChBodyEasyEllipsoid(chrono.ChVectorD(0.2, 0.4, 0.6), 100, True, True, mat)
ellipse.SetPos(chrono.ChVectorD(0.2, 2, -1.0))
sys.AddBody(ellipse)
mesh = chrono.ChBodyEasyMesh(chrono.GetChronoDataFile("models/cube.obj"), 100, True, True, True, mat, 0.05)
mesh.SetPos(chrono.ChVectorD(2.0, 3.5, -2.0))
sys.AddBody(mesh)
# Create the Irrlicht visualization
vis = chronoirr.ChVisualSystemIrrlicht()
sys.SetVisualSystem(vis)
vis.SetWindowSize(1024,768)
vis.SetWindowTitle('Collision visualization demo')
vis.Initialize()
vis.AddLogo(chrono.GetChronoDataFile('logo_pychrono_alpha.png'))
vis.AddSkyBox()
vis.AddCamera(chrono.ChVectorD(0, 8 , 6))
vis.AddTypicalLights()
# Create collision shape drawer
drawer = DebugDrawer()
sys.GetCollisionSystem().RegisterVisualizationCallback(drawer)
# Specify what information is visualized
mode = chrono.ChCollisionSystem.VIS_Shapes
use_zbuffer = True
# Run the simulation
while vis.Run():
vis.BeginScene()
vis.DrawAll()
vis.EndScene()
sys.DoStepDynamics(1e-3)
print(sys.GetChTime(), " ", sys.GetNcontacts())
sys.GetCollisionSystem().Visualize(chrono.ChCollisionSystem.VIS_Shapes)
| 36.242991 | 107 | 0.636668 | # =============================================================================
# PROJECT CHRONO - http://projectchrono.org
#
# Copyright (c) 2022 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http://projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: Radu Serban
# =============================================================================
#
# Test the collision drawer callback. This only tests that the callback can be
# used; there is no actual visualization of the collision shapes but simply
# printing out of the end points of the lines that would be used to render them.
# To visualize the collision shapes, one can use the same feature implemented in
# the underlying Irrlicht visualization (hit the 'i' key and select the check
# box 'Draw Collsion Shapes').
# The global reference frame has Y up.
#
# =============================================================================
import pychrono.core as chrono
import pychrono.irrlicht as chronoirr
# -----------------------------------------------------------------------------
# Callback class for collision shape visualization
# -----------------------------------------------------------------------------
class DebugDrawer(chrono.VisualizationCallback):
def __init__(self) :
super().__init__()
def DrawLine(self, pA, pB, color):
print(" pA = ", pA.x, pA.y, pA.z)
print(" pB = ", pB.x, pB.y, pB.z)
# -----------------------------------------------------------------------------
# The path to the Chrono data directory containing various assets (meshes, textures, data files)
# is automatically set, relative to the default location of this demo.
# If running from a different directory, you must change the path to the data directory with:
#chrono.SetChronoDataPath('relative/path/to/data/directory/')
print( "Copyright (c) 2022 projectchrono.org")
# Create sys, contact material, and bodies
sys = chrono.ChSystemNSC()
mat = chrono.ChMaterialSurfaceNSC()
ground = chrono.ChBodyEasyBox(10, 3, 10, 100, True, True, mat)
ground.SetBodyFixed(True);
ground.SetPos(chrono.ChVectorD(0, 0, 0))
sys.AddBody(ground)
cyl = chrono.ChBodyEasyCylinder(0.5, 1.0, 100, True, True, mat)
cyl.SetPos(chrono.ChVectorD(0, 3, 0))
sys.AddBody(cyl)
box = chrono.ChBodyEasyBox(0.5, 0.5, 0.5, 100, True, True, mat)
box.SetPos(chrono.ChVectorD(0.2, 2, 0))
sys.AddBody(box)
sphere = chrono.ChBodyEasySphere(0.25, 100.0, True, True, mat)
sphere.SetPos(chrono.ChVectorD(-0.2, 2, 0.75))
sys.AddBody(sphere)
ellipse = chrono.ChBodyEasyEllipsoid(chrono.ChVectorD(0.2, 0.4, 0.6), 100, True, True, mat)
ellipse.SetPos(chrono.ChVectorD(0.2, 2, -1.0))
sys.AddBody(ellipse)
mesh = chrono.ChBodyEasyMesh(chrono.GetChronoDataFile("models/cube.obj"), 100, True, True, True, mat, 0.05)
mesh.SetPos(chrono.ChVectorD(2.0, 3.5, -2.0))
sys.AddBody(mesh)
# Create the Irrlicht visualization
vis = chronoirr.ChVisualSystemIrrlicht()
sys.SetVisualSystem(vis)
vis.SetWindowSize(1024,768)
vis.SetWindowTitle('Collision visualization demo')
vis.Initialize()
vis.AddLogo(chrono.GetChronoDataFile('logo_pychrono_alpha.png'))
vis.AddSkyBox()
vis.AddCamera(chrono.ChVectorD(0, 8 , 6))
vis.AddTypicalLights()
# Create collision shape drawer
drawer = DebugDrawer()
sys.GetCollisionSystem().RegisterVisualizationCallback(drawer)
# Specify what information is visualized
mode = chrono.ChCollisionSystem.VIS_Shapes
use_zbuffer = True
# Run the simulation
while vis.Run():
vis.BeginScene()
vis.DrawAll()
vis.EndScene()
sys.DoStepDynamics(1e-3)
print(sys.GetChTime(), " ", sys.GetNcontacts())
sys.GetCollisionSystem().Visualize(chrono.ChCollisionSystem.VIS_Shapes)
| 128 | 27 | 75 |
3c25c23c04fb70e4cb3707a663dadaad60b47e80 | 1,600 | py | Python | src/bot.py | alexthvest/fresco-bot | b22d37bb0ad5975b3fe8268cfe5f8d9d2825c2e9 | [
"MIT"
] | null | null | null | src/bot.py | alexthvest/fresco-bot | b22d37bb0ad5975b3fe8268cfe5f8d9d2825c2e9 | [
"MIT"
] | null | null | null | src/bot.py | alexthvest/fresco-bot | b22d37bb0ad5975b3fe8268cfe5f8d9d2825c2e9 | [
"MIT"
] | null | null | null | import vk_api
import random
from fresko import create_quote_image
from vk_api.utils import get_random_id
from vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType
| 29.090909 | 73 | 0.62625 | import vk_api
import random
from fresko import create_quote_image
from vk_api.utils import get_random_id
from vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType
class FrescoBot:
def __init__(self, token, confirmation_code):
self.__vk_session = vk_api.VkApi(token=token)
self.__vk_api = self.__vk_session.get_api()
self.__confirmation_code = confirmation_code
self.__messages = []
def send_message(self, peer_id, text, **kwargs):
return self.__vk_api.messages.send(
peer_id=peer_id,
message=text,
random_id=get_random_id(),
**kwargs
)
def upload_photo(self, file, peer_id):
upload = vk_api.VkUpload(self.__vk_session)
photo = upload.photo_messages(photos=file, peer_id=peer_id)[0]
return f"photo{photo['owner_id']}_{photo['id']}"
def handle_callback(self, data):
if data["type"] == "confirmation":
return self.__confirmation_code
if data["type"] == "message_new":
self.handle_message(data)
return "ok"
return "ok"
def handle_message(self, data):
message = data["object"]["message"]
if "text" in message:
self.__messages.append(message["text"])
if len(self.__messages) == 20:
quote = random.choice(self.__messages)
image = create_quote_image(quote)
image = self.upload_photo(image, message["peer_id"])
self.send_message(message["peer_id"], "", attachment=[image])
self.__messages = []
| 1,279 | -5 | 158 |
cd06d5bedfc6f1dcb68298520f00db8f4bd05de9 | 3,260 | py | Python | AboutWindow.py | tirtharajghosh/Subtitle-Downloader | 50f2eac6d960d2f5cd2769e3a327579abb62ecb5 | [
"MIT"
] | null | null | null | AboutWindow.py | tirtharajghosh/Subtitle-Downloader | 50f2eac6d960d2f5cd2769e3a327579abb62ecb5 | [
"MIT"
] | null | null | null | AboutWindow.py | tirtharajghosh/Subtitle-Downloader | 50f2eac6d960d2f5cd2769e3a327579abb62ecb5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'about.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
| 67.916667 | 330 | 0.692025 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'about.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class About(object):
def setupUi(self, Dialog):
Dialog.setObjectName("About")
Dialog.resize(320, 240)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setGeometry(QtCore.QRect(10, 200, 301, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.lbl_title = QtWidgets.QLabel(Dialog)
self.lbl_title.setGeometry(QtCore.QRect(70, 20, 161, 20))
self.lbl_title.setObjectName("lbl_title")
self.txt_aboutHtml = QtWidgets.QTextBrowser(Dialog)
self.txt_aboutHtml.setGeometry(QtCore.QRect(30, 50, 256, 141))
self.txt_aboutHtml.setObjectName("txt_aboutHtml")
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("About", "About"))
self.lbl_title.setText(_translate("About", "<html><head/><body><p align=\"center\"><span style=\" font-size:12pt; font-weight:600;\">Subtitle Downloader</span></p></body></html>"))
self.txt_aboutHtml.setHtml(_translate("About", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Version: 1.1.0</p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Copyright © 2020 <a target='_blank' href=\"mailto:tirtharajghosh.ju@gmail.com\"><span style=\" text-decoration: underline; color:#0000ff;\">Tirtharaj Ghosh</span></a> </p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">MIT Licence</p>\n"
"<p align=\"center\" style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">NO WARRANTY</p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><a target='_blank' href=\"https://github.com/tirtharajghosh/Subtitle-Downloader\"><span style=\" text-decoration: underline; color:#0000ff;\">Github Repository</span></a></p></body></html>"))
| 2,947 | -1 | 76 |
82fa9e18e1b0b25fb23249fe9014e2bb41d7811c | 5,903 | py | Python | ncsnv2/models/__init__.py | taufikxu/FD-ScoreMatching | 9df0789bb98bb798b3de57072f63ee4b2f19947f | [
"MIT"
] | 12 | 2020-05-23T10:02:12.000Z | 2021-03-25T19:54:00.000Z | ncsnv2/models/__init__.py | taufikxu/FD-ScoreMatching | 9df0789bb98bb798b3de57072f63ee4b2f19947f | [
"MIT"
] | 6 | 2021-03-19T15:30:28.000Z | 2022-03-12T00:51:16.000Z | ncsnv2/models/__init__.py | taufikxu/FD-ScoreMatching | 9df0789bb98bb798b3de57072f63ee4b2f19947f | [
"MIT"
] | 4 | 2020-11-04T03:52:45.000Z | 2021-12-28T16:07:08.000Z | import torch
import numpy as np
@torch.no_grad()
@torch.no_grad()
def anneal_Langevin_dynamics_inpainting(x_mod, refer_image, scorenet, sigmas, image_size,
n_steps_each=100, step_lr=0.000008):
"""
Currently only good for 32x32 images. Assuming the right half is missing.
"""
images = []
refer_image = refer_image.unsqueeze(1).expand(-1, x_mod.shape[1], -1, -1, -1)
refer_image = refer_image.contiguous().view(-1, 3, image_size, image_size)
x_mod = x_mod.view(-1, 3, image_size, image_size)
cols = image_size // 2
half_refer_image = refer_image[..., :cols]
with torch.no_grad():
for c, sigma in enumerate(sigmas):
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
step_size = step_lr * (sigma / sigmas[-1]) ** 2
for s in range(n_steps_each):
images.append(x_mod.to('cpu'))
corrupted_half_image = half_refer_image + torch.randn_like(half_refer_image) * sigma
x_mod[:, :, :, :cols] = corrupted_half_image
noise = torch.randn_like(x_mod) * np.sqrt(step_size * 2)
grad = scorenet(x_mod, labels)
x_mod = x_mod + step_size * grad + noise
print("class: {}, step_size: {}, mean {}, max {}".format(c, step_size, grad.abs().mean(),
grad.abs().max()))
return images
@torch.no_grad() | 43.404412 | 121 | 0.552262 | import torch
import numpy as np
def get_sigmas(config):
if config.model.sigma_dist == 'geometric':
sigmas = torch.tensor(
np.exp(np.linspace(np.log(config.model.sigma_begin), np.log(config.model.sigma_end),
config.model.num_classes))).float().to(config.device)
elif config.model.sigma_dist == 'uniform':
sigmas = torch.tensor(
np.linspace(config.model.sigma_begin, config.model.sigma_end, config.model.num_classes)
).float().to(config.device)
else:
raise NotImplementedError('sigma distribution not supported')
return sigmas
@torch.no_grad()
def anneal_Langevin_dynamics(x_mod, scorenet, sigmas, n_steps_each=200, step_lr=0.000008,
final_only=False, verbose=False):
images = []
with torch.no_grad():
for c, sigma in enumerate(sigmas):
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
step_size = step_lr * (sigma / sigmas[-1]) ** 2
for s in range(n_steps_each):
grad = scorenet(x_mod, labels)
# denoise_x = x_mod + sigma ** 2 * grad
# concat_x = torch.cat([x_mod, denoise_x], dim=0)
# images.append(concat_x.to('cpu'))
noise = torch.randn_like(x_mod)
grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()
x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2)
image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()
snr = np.sqrt(step_size / 2.) * grad_norm / noise_norm
grad_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2 * sigma ** 2
if not final_only:
images.append(x_mod.to('cpu'))
if verbose:
print("level: {}, step_size: {}, grad_norm: {}, image_norm: {}, snr: {}, grad_mean_norm: {}".format(
c, step_size, grad_norm.item(), image_norm.item(), snr.item(), grad_mean_norm.item()))
# if c >= 0:
# return images
if final_only:
return [x_mod.to('cpu')]
else:
return images
@torch.no_grad()
def anneal_Langevin_dynamics_inpainting(x_mod, refer_image, scorenet, sigmas, image_size,
n_steps_each=100, step_lr=0.000008):
"""
Currently only good for 32x32 images. Assuming the right half is missing.
"""
images = []
refer_image = refer_image.unsqueeze(1).expand(-1, x_mod.shape[1], -1, -1, -1)
refer_image = refer_image.contiguous().view(-1, 3, image_size, image_size)
x_mod = x_mod.view(-1, 3, image_size, image_size)
cols = image_size // 2
half_refer_image = refer_image[..., :cols]
with torch.no_grad():
for c, sigma in enumerate(sigmas):
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
step_size = step_lr * (sigma / sigmas[-1]) ** 2
for s in range(n_steps_each):
images.append(x_mod.to('cpu'))
corrupted_half_image = half_refer_image + torch.randn_like(half_refer_image) * sigma
x_mod[:, :, :, :cols] = corrupted_half_image
noise = torch.randn_like(x_mod) * np.sqrt(step_size * 2)
grad = scorenet(x_mod, labels)
x_mod = x_mod + step_size * grad + noise
print("class: {}, step_size: {}, mean {}, max {}".format(c, step_size, grad.abs().mean(),
grad.abs().max()))
return images
@torch.no_grad()
def anneal_Langevin_dynamics_interpolation(x_mod, scorenet, sigmas, n_interpolations, n_steps_each=200, step_lr=0.000008,
final_only=False, verbose=False):
images = []
n_rows = x_mod.shape[0]
x_mod = x_mod[:, None, ...].repeat(1, n_interpolations, 1, 1, 1)
x_mod = x_mod.reshape(-1, *x_mod.shape[2:])
for c, sigma in enumerate(sigmas):
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
step_size = step_lr * (sigma / sigmas[-1]) ** 2
for s in range(n_steps_each):
grad = scorenet(x_mod, labels)
noise_p = torch.randn(n_rows, x_mod.shape[1], x_mod.shape[2], x_mod.shape[3],
device=x_mod.device)
noise_q = torch.randn(n_rows, x_mod.shape[1], x_mod.shape[2], x_mod.shape[3],
device=x_mod.device)
angles = torch.linspace(0, np.pi / 2., n_interpolations, device=x_mod.device)
noise = noise_p[:, None, ...] * torch.cos(angles)[None, :, None, None, None] + \
noise_q[:, None, ...] * torch.sin(angles)[None, :, None, None, None]
noise = noise.reshape(-1, *noise.shape[2:])
grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()
image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()
x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2)
snr = np.sqrt(step_size / 2.) * grad_norm / noise_norm
if not final_only:
images.append(x_mod.to('cpu'))
if verbose:
print(
"level: {}, step_size: {}, image_norm: {}, grad_norm: {}, snr: {}".format(
c, step_size, image_norm.item(), grad_norm.item(), snr.item()))
if final_only:
return [x_mod.to('cpu')]
else:
return images | 4,296 | 0 | 67 |
5afb623dd4f4042147e31b2180ef2c02ab6f2fc7 | 885 | py | Python | main.py | pjkostrzewski/kanonierzy | 9b5cc95d5a6ddcb7fc647b16373f54f9c0bca21d | [
"MIT"
] | null | null | null | main.py | pjkostrzewski/kanonierzy | 9b5cc95d5a6ddcb7fc647b16373f54f9c0bca21d | [
"MIT"
] | null | null | null | main.py | pjkostrzewski/kanonierzy | 9b5cc95d5a6ddcb7fc647b16373f54f9c0bca21d | [
"MIT"
] | null | null | null | from scrap.kanonierzy_scraper import KanonierzyScraper
from adapters.article_adapter import ArticleAdapter
from page.article import Article
from engine.chrome_browser import BrowserChrome
from adapters.file_adapter import FileAdapter
FILENAME = "results.json"
if __name__ == '__main__':
articles = get_articles_from_website()
save_articles(articles)
| 28.548387 | 101 | 0.764972 | from scrap.kanonierzy_scraper import KanonierzyScraper
from adapters.article_adapter import ArticleAdapter
from page.article import Article
from engine.chrome_browser import BrowserChrome
from adapters.file_adapter import FileAdapter
FILENAME = "results.json"
def get_articles_from_website():
browser = BrowserChrome()
browser.add_argument('headless').run()
scraper = KanonierzyScraper(browser)
scraper.scroll_down_page(browser)
return [Article.create_using_adapter(adapter=ArticleAdapter, **data) for data in scraper.scrap()]
def save_articles(articles):
file_adapter = FileAdapter(filename=FILENAME)
results = {}
for idx, x in enumerate([vars(article) for article in articles], start=1):
results[idx] = x
file_adapter.save(results)
if __name__ == '__main__':
articles = get_articles_from_website()
save_articles(articles)
| 474 | 0 | 46 |
8384dd5ded0186242d11810a5ae349eeec80f730 | 5,896 | py | Python | dataset/shapenet.py | hirotong/NeuralFusion | b83145c79df03857de7e32a2c46302c77635ff98 | [
"MIT"
] | 1 | 2021-09-25T08:19:42.000Z | 2021-09-25T08:19:42.000Z | dataset/shapenet.py | hirotong/NeuralFusion | b83145c79df03857de7e32a2c46302c77635ff98 | [
"MIT"
] | null | null | null | dataset/shapenet.py | hirotong/NeuralFusion | b83145c79df03857de7e32a2c46302c77635ff98 | [
"MIT"
] | null | null | null | '''
Author: Jinguang Tong
Affliction: Australia National University, DATA61 CSIRO
'''
import os
import glob
import numpy as np
from skimage import io
from copy import copy
from graphics import Voxelgrid
from scipy.ndimage.morphology import binary_dilation
from torch.utils.data import Dataset
from utils.data import add_kinect_noise, add_depth_noise, add_outliers
from dataset.binvox_utils import read_as_3d_array
if __name__ == '__main__':
from utils.loading import load_config_from_yaml
config = load_config_from_yaml('configs/fusion/shapenet.noise.005.without.routing.yaml')
config.DATA.scene_list = config.DATA.train_scene_list
dataset = ShapeNet(config.DATA)
dataset.get_grid('04530566/10e10b663a81801148c1c53e2c827229')
| 31.195767 | 114 | 0.614315 | '''
Author: Jinguang Tong
Affliction: Australia National University, DATA61 CSIRO
'''
import os
import glob
import numpy as np
from skimage import io
from copy import copy
from graphics import Voxelgrid
from scipy.ndimage.morphology import binary_dilation
from torch.utils.data import Dataset
from utils.data import add_kinect_noise, add_depth_noise, add_outliers
from dataset.binvox_utils import read_as_3d_array
class ShapeNet(Dataset):
def __init__(self, config):
self.root_dir = os.path.expanduser(config.root_dir)
self.resolution = (config.resy, config.resx)
self.xscale = self.resolution[0] / 480
self.yscale = self.resolution[1] / 640
self.transform = config.transform
self.scene_list = config.scene_list
self.noise_scale = config.noise_scale
self.outlier_scale = config.outlier_scale
self.outlier_fraction = config.outlier_fraction
self.grid_resolution = config.grid_resolution
self._load_frames()
def _load_frames(self):
self.frames = []
self._scenes = []
with open(self.scene_list, 'r') as file:
for line in file:
scene, obj = line.rstrip().split('\t')
self._scenes.append(os.path.join(scene, obj))
path = os.path.join(self.root_dir, scene, obj, 'data', '*.depth.png')
files = glob.glob(path)
for f in files:
self.frames.append(f.replace('.depth.png', ''))
@property
def scenes(self):
return self._scenes
def __len__(self):
return len(self.frames)
def __getitem__(self, item):
frame = self.frames[item]
pathsplit = frame.split(os.path.sep)
sc = pathsplit[-4]
obj = pathsplit[-3]
scene_id = '{}/{}'.format(sc, obj)
sample = {}
frame_id = pathsplit[-1]
frame_id = int(frame_id)
sample['frame_id'] = frame_id
depth = io.imread(f'{frame}.depth.png')
depth = depth.astype(np.float32)
depth = depth / 1000.
step_x = depth.shape[0] / self.resolution[0]
step_y = depth.shape[1] / self.resolution[1]
index_y = [int(step_y * i) for i in range(0, int(depth.shape[1] / step_y))]
index_x = [int(step_x * i) for i in range(0, int(depth.shape[0] / step_x))]
depth = depth[:, index_y]
depth = depth[index_x, :]
mask = copy(depth)
mask[mask == np.max(depth)] = 0
mask[mask != 0] = 1
sample['mask'] = copy(mask)
gradient_mask = binary_dilation(mask, iterations=5)
mask = binary_dilation(mask, iterations=8)
sample['routing_mask'] = mask
sample['gradient_mask'] = gradient_mask
depth[mask == 0] = 0
sample['depth'] = depth
sample['noisy_depth'] = add_kinect_noise(copy(depth), sigma_fraction=self.noise_scale)
sample['noisy_depth_octnetfusion'] = add_depth_noise(copy(depth), noise_sigma=self.noise_scale, seed=42)
sample['outlier_depth'] = add_outliers(copy(sample['noisy_depth_octnetfusion']), scale=self.outlier_scale,
fraction=self.outlier_fraction)
intrinsics = np.loadtxt(f'{frame}.intrinsics.txt')
# adapt intrinsics to camera resolution
scaling = np.eye(3)
scaling[1, 1] = self.yscale
scaling[0, 0] = self.xscale
sample['intrinsics'] = np.dot(scaling, intrinsics)
extrinsics = np.loadtxt(f'{frame}.extrinsics.txt')
extrinsics = np.linalg.inv(extrinsics)
sample['extrinsics'] = extrinsics
sample['scene_id'] = scene_id
for key in sample.keys():
if type(sample[key]) is not np.ndarray and type(sample[key]) is not str:
sample[key] = np.asarray(sample[key])
if self.transform:
sample = self.transform(sample)
return sample
def get_grid_occ(self, scene, truncation=None):
sc, obj = scene.split(os.path.sep)
if self.grid_resolution == 256:
filepath = os.path.join(self.root_dir, sc, obj, 'voxels', '*.binvox')
else:
filepath = os.path.join(self.root_dir, sc, obj, 'voxels', f'*.{self.grid_resolution}.binvox')
filepath = glob.glob(filepath)[0]
with open(filepath, 'rb') as file:
volume = read_as_3d_array(file)
resolution = 1. / self.grid_resolution
grid = Voxelgrid(resolution)
occ = Voxelgrid(resolution)
bbox = np.zeros((3, 2))
bbox[:, 0] = volume.translate
bbox[:, 1] = bbox[:, 0] + resolution * volume.dims[0]
grid.from_array(volume.data.astype(np.int), bbox)
occ.from_array(volume.data.astype(np.float), bbox)
# calculate tsdf
grid.transform()
grid.volume *= resolution
if truncation is not None:
grid.volume[np.abs(grid.volume) >= truncation] = truncation
return grid, occ
def get_occ(self, scene):
# TODO
sc, obj = scene.split(os.sep)
if self.grid_resolution == 256:
filepath = os.path.join(self.root_dir, sc, obj, 'occupancy', '*.npz')
else:
filepath = os.path.join(self.root_dir, sc, obj, 'occupancy', f'*.{self.grid_resolution}.npz')
filepath = glob.glob(filepath)[0]
volume = np.load(filepath)
occupancies = volume['occupancies']
resolution = 1. / self.grid_resolution
occ = Voxelgrid(resolution)
return occ
if __name__ == '__main__':
from utils.loading import load_config_from_yaml
config = load_config_from_yaml('configs/fusion/shapenet.noise.005.without.routing.yaml')
config.DATA.scene_list = config.DATA.train_scene_list
dataset = ShapeNet(config.DATA)
dataset.get_grid('04530566/10e10b663a81801148c1c53e2c827229')
| 4,906 | 206 | 23 |
d7c34b5cce6b5d68fdee32163b306249e4c320e0 | 2,470 | py | Python | dataLoader.py | deekshacsg/self_driving_car_dc | 36fb643026a6c0e222380a89f0874ca325e55fb5 | [
"MIT"
] | null | null | null | dataLoader.py | deekshacsg/self_driving_car_dc | 36fb643026a6c0e222380a89f0874ca325e55fb5 | [
"MIT"
] | null | null | null | dataLoader.py | deekshacsg/self_driving_car_dc | 36fb643026a6c0e222380a89f0874ca325e55fb5 | [
"MIT"
] | null | null | null | import os
import json
import pickle
import numpy as np
from PIL import Image
from collections import defaultdict
import swiftclient
feature_container_name = 'feature'
angle_container_name = 'angle'
def load_data(img_size, isSwiftEnabled):
"""
Create pickle objects of features and labels
:param img_size: the new size of the re-sized image
:param isSwiftEnabled: if you want to store using Swift Storage
:return: None
"""
images_dir = "./resources/log"
labels_dir = "./resources/json_data"
features, y_angle, y_throttle = [], [], []
if isSwiftEnabled:
conn = get_connection(name="admin", key="admin") # support for swift storage
put_container(conn)
for path, dir, files in os.walk(images_dir):
for file in files:
if file.endswith('.jpg'):
img_id = file.split('_')[0]
json_record = "record_" + img_id + ".json"
# resize and convert to grey scale
img = Image.open(os.path.join(path, file))
img = img.resize(img_size).convert('L')
features.append(list(img.getdata()))
if isSwiftEnabled:
conn.put_object(feature_container_name, img)
# get throttle and angle
with open(os.path.join(labels_dir, json_record)) as f:
data = json.load(f)
y_angle.append(data['user/angle'])
if isSwiftEnabled:
conn.put_object(angle_container_name, data['user/angle'])
y_throttle.append(data['user/throttle'])
print("%d features, %d angles, %d throttle" % (len(features), len(y_angle), len(y_throttle)))
X = np.array(features).astype('float32')
y_angle = np.array(y_angle).astype('float32')
with open("features", "wb") as f:
pickle.dump(X, f)
with open("angles", "wb") as f:
pickle.dump(y_angle, f)
if __name__ == '__main__':
load_data((80, 60), isSwiftEnabled=False)
# img = Image.open("./resources/log/0_cam-image_array_.jpg")
# print("Input image", img.format, img.size, img.mode)
# img.show()
| 30.493827 | 97 | 0.608502 | import os
import json
import pickle
import numpy as np
from PIL import Image
from collections import defaultdict
import swiftclient
feature_container_name = 'feature'
angle_container_name = 'angle'
def get_connection(name, key):
conn = swiftclient.Connection(
user=name,
key=key,
authurl='http://127.0.0.1:5000/'
)
return conn
def put_container(conn):
conn.put_container(feature_container_name)
conn.put_container(angle_container_name)
def load_data(img_size, isSwiftEnabled):
"""
Create pickle objects of features and labels
:param img_size: the new size of the re-sized image
:param isSwiftEnabled: if you want to store using Swift Storage
:return: None
"""
images_dir = "./resources/log"
labels_dir = "./resources/json_data"
features, y_angle, y_throttle = [], [], []
if isSwiftEnabled:
conn = get_connection(name="admin", key="admin") # support for swift storage
put_container(conn)
for path, dir, files in os.walk(images_dir):
for file in files:
if file.endswith('.jpg'):
img_id = file.split('_')[0]
json_record = "record_" + img_id + ".json"
# resize and convert to grey scale
img = Image.open(os.path.join(path, file))
img = img.resize(img_size).convert('L')
features.append(list(img.getdata()))
if isSwiftEnabled:
conn.put_object(feature_container_name, img)
# get throttle and angle
with open(os.path.join(labels_dir, json_record)) as f:
data = json.load(f)
y_angle.append(data['user/angle'])
if isSwiftEnabled:
conn.put_object(angle_container_name, data['user/angle'])
y_throttle.append(data['user/throttle'])
print("%d features, %d angles, %d throttle" % (len(features), len(y_angle), len(y_throttle)))
X = np.array(features).astype('float32')
y_angle = np.array(y_angle).astype('float32')
with open("features", "wb") as f:
pickle.dump(X, f)
with open("angles", "wb") as f:
pickle.dump(y_angle, f)
if __name__ == '__main__':
load_data((80, 60), isSwiftEnabled=False)
# img = Image.open("./resources/log/0_cam-image_array_.jpg")
# print("Input image", img.format, img.size, img.mode)
# img.show()
| 238 | 0 | 46 |
17e0d16c450ac6b9ea1068936422fbddc9f95db2 | 92 | py | Python | presalytics/lib/plugins/__init__.py | presalytics/python-client | 5d80b78562126feeeb49af4738e2c1aed12dce3a | [
"MIT"
] | 4 | 2020-02-21T16:30:46.000Z | 2021-01-12T12:22:03.000Z | presalytics/lib/plugins/__init__.py | presalytics/python-client | 5d80b78562126feeeb49af4738e2c1aed12dce3a | [
"MIT"
] | 4 | 2019-12-28T19:30:08.000Z | 2020-03-31T19:27:45.000Z | presalytics/lib/plugins/__init__.py | presalytics/python-client | 5d80b78562126feeeb49af4738e2c1aed12dce3a | [
"MIT"
] | null | null | null | """
Library folder for plugins configured from `presalytics.story.outline.Plugin` class
"""
| 23 | 83 | 0.771739 | """
Library folder for plugins configured from `presalytics.story.outline.Plugin` class
"""
| 0 | 0 | 0 |
4ce237325f75cd18007718e718200915fa2bda4b | 1,264 | py | Python | ikaros/0003-longest-substring-without-repeating-characters/longest_substring.py | MerleLiuKun/GoGoGo | 2387d365bf86e53c0866ff03583c375ce0b5deb7 | [
"MIT"
] | 4 | 2020-01-05T07:24:27.000Z | 2020-09-22T00:44:25.000Z | ikaros/0003-longest-substring-without-repeating-characters/longest_substring.py | MerleLiuKun/GoGoGo | 2387d365bf86e53c0866ff03583c375ce0b5deb7 | [
"MIT"
] | null | null | null | ikaros/0003-longest-substring-without-repeating-characters/longest_substring.py | MerleLiuKun/GoGoGo | 2387d365bf86e53c0866ff03583c375ce0b5deb7 | [
"MIT"
] | 2 | 2020-01-06T09:32:35.000Z | 2020-09-22T00:44:36.000Z |
if __name__ == "__main__":
sr = "pwwkew"
so = Solution()
print(so.method2(sr))
| 22.981818 | 57 | 0.438291 | class Solution:
def method1(self, s: str) -> int:
max_length = 0
cur_length = 0
r = []
for val in s:
if val not in r:
cur_length += 1
max_length = max(max_length, cur_length)
r.append(val)
else:
pidx = r.index(val)
r = r[pidx + 1 :]
r.append(val)
cur_length = len(r)
return max_length
def method2(self, s: str) -> int:
if not s:
return 0
start, max_length = 0, 0
v_map = {}
for idx, val in enumerate(s):
if val in v_map:
start = max(v_map[val] + 1, start)
v_map[val] = idx
max_length = max(max_length, idx - start + 1)
return max_length
def method3(self, s: str) -> int:
res, start, cache = 0, 0, {}
for idx, c in enumerate(s):
if c in cache and cache[c] >= start:
start = cache[c] + 1
else:
cur = idx - start + 1
res = max(res, cur)
cache[c] = idx
return res
if __name__ == "__main__":
sr = "pwwkew"
so = Solution()
print(so.method2(sr))
| 1,074 | -6 | 102 |
a14d84380d3b8f620573bdadf4a2f56bfbdccdb3 | 1,151 | py | Python | jobs/read_noise/v0/producer_read_noise.py | lsst-camera-dh/ts3-analysis | bf3400f286876c5ed4368e2dafe730a8598d0bf7 | [
"BSD-3-Clause-LBNL"
] | null | null | null | jobs/read_noise/v0/producer_read_noise.py | lsst-camera-dh/ts3-analysis | bf3400f286876c5ed4368e2dafe730a8598d0bf7 | [
"BSD-3-Clause-LBNL"
] | null | null | null | jobs/read_noise/v0/producer_read_noise.py | lsst-camera-dh/ts3-analysis | bf3400f286876c5ed4368e2dafe730a8598d0bf7 | [
"BSD-3-Clause-LBNL"
] | null | null | null | #!/usr/bin/env python
import os
import sys
import shutil
import pyfits
import lsst.eotest.image_utils as imutils
import lsst.eotest.sensor as sensorTest
from lcatr.harness.helpers import dependency_glob
bias_files = dependency_glob('*_fe55_bias_*.fits', jobname='ts3_fe55_data')
system_noise_files = dependency_glob('noise_*.fits', jobname='ts3_system_noise')
mask_files = dependency_glob('*_mask.fits')
print bias_files
print system_noise_files
print mask_files
sys.stdout.flush()
# Infer the sensor_id from the first dark filename as per LCA-10140.
sensor_id = os.path.basename(bias_files[0]).split('_')[0]
gain_file = dependency_glob('%s_eotest_results.fits' % sensor_id,
jobname='bright_pixels')[0]
gains = sensorTest.EOTestResults(gain_file)['GAIN']
# Handle annoying off-by-one issue in amplifier numbering:
gains = dict([(amp, gains[amp-1]) for amp in range(1, 17)])
# Make a local copy to fill with task results.
shutil.copy(gain_file, os.path.basename(gain_file))
task = sensorTest.ReadNoiseTask()
task.run(sensor_id, bias_files, gains,
system_noise_files=system_noise_files, mask_files=mask_files)
| 33.852941 | 80 | 0.76629 | #!/usr/bin/env python
import os
import sys
import shutil
import pyfits
import lsst.eotest.image_utils as imutils
import lsst.eotest.sensor as sensorTest
from lcatr.harness.helpers import dependency_glob
bias_files = dependency_glob('*_fe55_bias_*.fits', jobname='ts3_fe55_data')
system_noise_files = dependency_glob('noise_*.fits', jobname='ts3_system_noise')
mask_files = dependency_glob('*_mask.fits')
print bias_files
print system_noise_files
print mask_files
sys.stdout.flush()
# Infer the sensor_id from the first dark filename as per LCA-10140.
sensor_id = os.path.basename(bias_files[0]).split('_')[0]
gain_file = dependency_glob('%s_eotest_results.fits' % sensor_id,
jobname='bright_pixels')[0]
gains = sensorTest.EOTestResults(gain_file)['GAIN']
# Handle annoying off-by-one issue in amplifier numbering:
gains = dict([(amp, gains[amp-1]) for amp in range(1, 17)])
# Make a local copy to fill with task results.
shutil.copy(gain_file, os.path.basename(gain_file))
task = sensorTest.ReadNoiseTask()
task.run(sensor_id, bias_files, gains,
system_noise_files=system_noise_files, mask_files=mask_files)
| 0 | 0 | 0 |
e693962e57ac3a4f3ad2e9e6be752f290d8e1b6a | 32,913 | py | Python | .venv/lib/python3.8/site-packages/pandas/tests/groupby/test_function.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 115 | 2020-06-18T15:00:58.000Z | 2022-03-02T10:13:19.000Z | .venv/lib/python3.8/site-packages/pandas/tests/groupby/test_function.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 37 | 2020-10-20T08:30:53.000Z | 2020-12-22T13:15:45.000Z | .venv/lib/python3.8/site-packages/pandas/tests/groupby/test_function.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 60 | 2020-07-22T14:53:10.000Z | 2022-03-23T10:17:59.000Z | import builtins
from io import StringIO
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna
import pandas._testing as tm
import pandas.core.nanops as nanops
from pandas.util import _test_decorators as td
@pytest.fixture(
params=[np.int32, np.int64, np.float32, np.float64],
ids=["np.int32", "np.int64", "np.float32", "np.float64"],
)
def numpy_dtypes_for_minmax(request):
"""
Fixture of numpy dtypes with min and max values used for testing
cummin and cummax
"""
dtype = request.param
min_val = (
np.iinfo(dtype).min if np.dtype(dtype).kind == "i" else np.finfo(dtype).min
)
max_val = (
np.iinfo(dtype).max if np.dtype(dtype).kind == "i" else np.finfo(dtype).max
)
return (dtype, min_val, max_val)
@pytest.mark.parametrize("agg_func", ["any", "all"])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
"vals",
[
["foo", "bar", "baz"],
["foo", "", ""],
["", "", ""],
[1, 2, 3],
[1, 0, 0],
[0, 0, 0],
[1.0, 2.0, 3.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[True, True, True],
[True, False, False],
[False, False, False],
[np.nan, np.nan, np.nan],
],
)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize("keys", ["jim", ["jim", "joe"]]) # Single key # Multi-key
@pytest.mark.parametrize(
"dtype", ["int8", "int16", "int32", "int64", "float32", "float64", "uint64"]
)
@pytest.mark.parametrize(
"method,data",
[
("first", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
("last", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
("min", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
("max", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
("nth", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}], "args": [1]}),
("count", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 2}], "out_type": "int64"}),
],
)
@pytest.mark.parametrize(
"i",
[
(
Timestamp("2011-01-15 12:50:28.502376"),
Timestamp("2011-01-20 12:50:28.593448"),
),
(24650000000000001, 24650000000000002),
],
)
@pytest.mark.parametrize(
"func, values",
[
("idxmin", {"c_int": [0, 2], "c_float": [1, 3], "c_date": [1, 2]}),
("idxmax", {"c_int": [1, 3], "c_float": [0, 2], "c_date": [0, 3]}),
],
)
@pytest.mark.parametrize(
"op,targop",
[
("mean", np.mean),
("median", np.median),
("std", np.std),
("var", np.var),
("sum", np.sum),
("prod", np.prod),
("min", np.min),
("max", np.max),
("first", lambda x: x.iloc[0]),
("last", lambda x: x.iloc[-1]),
("count", np.size),
pytest.param("sem", scipy_sem, marks=td.skip_if_no_scipy),
],
)
@pytest.mark.parametrize("func", ["cumprod", "cumsum"])
@pytest.mark.parametrize(
"in_vals, out_vals",
[
# Basics: strictly increasing (T), strictly decreasing (F),
# abs val increasing (F), non-strictly increasing (T)
([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1], [True, False, False, True]),
# Test with inf vals
(
[1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf],
[True, False, True, False],
),
# Test with nan vals; should always be False
(
[1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False],
),
],
)
@pytest.mark.parametrize(
"in_vals, out_vals",
[
# Basics: strictly decreasing (T), strictly increasing (F),
# abs val decreasing (F), non-strictly increasing (T)
([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1], [True, False, False, True]),
# Test with inf vals
(
[np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf],
[True, True, False, True],
),
# Test with nan vals; should always be False
(
[1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False],
),
],
)
# describe
# --------------------------------
@pytest.mark.parametrize(
"values",
[
{
"a": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"b": [1, pd.NA, 2, 1, pd.NA, 2, 1, pd.NA, 2],
},
{"a": [1, 1, 2, 2, 3, 3], "b": [1, 2, 1, 2, 1, 2]},
],
)
@pytest.mark.parametrize("function", ["mean", "median", "var"])
| 31.286122 | 88 | 0.568286 | import builtins
from io import StringIO
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna
import pandas._testing as tm
import pandas.core.nanops as nanops
from pandas.util import _test_decorators as td
@pytest.fixture(
params=[np.int32, np.int64, np.float32, np.float64],
ids=["np.int32", "np.int64", "np.float32", "np.float64"],
)
def numpy_dtypes_for_minmax(request):
"""
Fixture of numpy dtypes with min and max values used for testing
cummin and cummax
"""
dtype = request.param
min_val = (
np.iinfo(dtype).min if np.dtype(dtype).kind == "i" else np.finfo(dtype).min
)
max_val = (
np.iinfo(dtype).max if np.dtype(dtype).kind == "i" else np.finfo(dtype).max
)
return (dtype, min_val, max_val)
@pytest.mark.parametrize("agg_func", ["any", "all"])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
"vals",
[
["foo", "bar", "baz"],
["foo", "", ""],
["", "", ""],
[1, 2, 3],
[1, 0, 0],
[0, 0, 0],
[1.0, 2.0, 3.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[True, True, True],
[True, False, False],
[False, False, False],
[np.nan, np.nan, np.nan],
],
)
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == "any":
exp = False
exp_df = DataFrame([exp] * 2, columns=["val"], index=Index(["a", "b"], name="key"))
result = getattr(df.groupby("key"), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]})
result = aa.groupby("nn").max()
assert "ss" in result
result = aa.groupby("nn").max(numeric_only=False)
assert "ss" in result
result = aa.groupby("nn").min()
assert "ss" in result
result = aa.groupby("nn").min(numeric_only=False)
assert "ss" in result
def test_min_date_with_nans():
# GH26321
dates = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d"
).dt.date
df = pd.DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates})
result = df.groupby("b", as_index=False)["c"].min()["c"]
expected = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d"
).dt.date
tm.assert_series_equal(result, expected)
result = df.groupby("b")["c"].min()
expected.index.name = "b"
tm.assert_series_equal(result, expected)
def test_intercept_builtin_sum():
s = Series([1.0, 2.0, np.nan, 3.0])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize("keys", ["jim", ["jim", "joe"]]) # Single key # Multi-key
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)), columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = f"invalid frame shape: {result.shape} (expected ({ngroups}, 3))"
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(
result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)),
)
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(), getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{
"group": [1, 1, 2],
"int": [1, 2, 3],
"float": [4.0, 5.0, 6.0],
"string": list("abc"),
"category_string": pd.Series(list("abc")).astype("category"),
"category_int": [7, 8, 9],
"datetime": pd.date_range("20130101", periods=3),
"datetimetz": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"timedelta": pd.timedelta_range("1 s", periods=3, freq="s"),
},
columns=[
"group",
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
],
)
expected_columns_numeric = Index(["int", "float", "category_int"])
# mean / median
expected = pd.DataFrame(
{
"category_int": [7.5, 9],
"float": [4.5, 6.0],
"timedelta": [pd.Timedelta("1.5s"), pd.Timedelta("3s")],
"int": [1.5, 3],
"datetime": [
pd.Timestamp("2013-01-01 12:00:00"),
pd.Timestamp("2013-01-03 00:00:00"),
],
"datetimetz": [
pd.Timestamp("2013-01-01 12:00:00", tz="US/Eastern"),
pd.Timestamp("2013-01-03 00:00:00", tz="US/Eastern"),
],
},
index=Index([1, 2], name="group"),
columns=["int", "float", "category_int", "datetime", "datetimetz", "timedelta"],
)
for attr in ["mean", "median"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(
[
"int",
"float",
"string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["min", "max"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(
[
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["first", "last"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "string", "category_int", "timedelta"])
result = df.groupby("group").sum()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = df.groupby("group").sum(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "category_int"])
for attr in ["prod", "cumprod"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(
["int", "float", "category_int", "datetime", "datetimetz", "timedelta"]
)
for attr in ["cummin", "cummax"]:
result = getattr(df.groupby("group"), attr)()
# GH 15561: numeric_only=False set by default like min/max
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "category_int", "timedelta"])
result = getattr(df.groupby("group"), "cumsum")()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), "cumsum")(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
def test_non_cython_api():
# GH5610
# non-cython calls should not include the grouper
df = DataFrame(
[[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, "baz"]], columns=["A", "B", "C"]
)
g = df.groupby("A")
gni = df.groupby("A", as_index=False)
# mad
expected = DataFrame([[0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
result = g.mad()
tm.assert_frame_equal(result, expected)
expected = DataFrame([[1, 0.0], [3, np.nan]], columns=["A", "B"], index=[0, 1])
result = gni.mad()
tm.assert_frame_equal(result, expected)
# describe
expected_index = pd.Index([1, 3], name="A")
expected_col = pd.MultiIndex(
levels=[["B"], ["count", "mean", "std", "min", "25%", "50%", "75%", "max"]],
codes=[[0] * 8, list(range(8))],
)
expected = pd.DataFrame(
[
[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
[0.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
],
index=expected_index,
columns=expected_col,
)
result = g.describe()
tm.assert_frame_equal(result, expected)
expected = pd.concat(
[
df[df.A == 1].describe().unstack().to_frame().T,
df[df.A == 3].describe().unstack().to_frame().T,
]
)
expected.index = pd.Index([0, 1])
result = gni.describe()
tm.assert_frame_equal(result, expected)
# any
expected = DataFrame(
[[True, True], [False, True]], columns=["B", "C"], index=[1, 3]
)
expected.index.name = "A"
result = g.any()
tm.assert_frame_equal(result, expected)
# idxmax
expected = DataFrame([[0.0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
result = g.idxmax()
tm.assert_frame_equal(result, expected)
def test_cython_api2():
# this takes the fast apply path
# cumsum (GH5614)
df = DataFrame([[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]], columns=["A", "B", "C"])
expected = DataFrame([[2, np.nan], [np.nan, 9], [4, 9]], columns=["B", "C"])
result = df.groupby("A").cumsum()
tm.assert_frame_equal(result, expected)
# GH 5755 - cumsum is a transformer and should ignore as_index
result = df.groupby("A", as_index=False).cumsum()
tm.assert_frame_equal(result, expected)
# GH 13994
result = df.groupby("A").cumsum(axis=1)
expected = df.cumsum(axis=1)
tm.assert_frame_equal(result, expected)
result = df.groupby("A").cumprod(axis=1)
expected = df.cumprod(axis=1)
tm.assert_frame_equal(result, expected)
def test_cython_median():
df = DataFrame(np.random.randn(1000))
df.values[::2] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
labels[::17] = np.nan
result = df.groupby(labels).median()
exp = df.groupby(labels).agg(nanops.nanmedian)
tm.assert_frame_equal(result, exp)
df = DataFrame(np.random.randn(1000, 5))
rs = df.groupby(labels).agg(np.median)
xp = df.groupby(labels).median()
tm.assert_frame_equal(rs, xp)
def test_median_empty_bins(observed):
df = pd.DataFrame(np.random.randint(0, 44, 500))
grps = range(0, 55, 5)
bins = pd.cut(df[0], grps)
result = df.groupby(bins, observed=observed).median()
expected = df.groupby(bins, observed=observed).agg(lambda x: x.median())
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", ["int8", "int16", "int32", "int64", "float32", "float64", "uint64"]
)
@pytest.mark.parametrize(
"method,data",
[
("first", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
("last", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
("min", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
("max", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
("nth", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}], "args": [1]}),
("count", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 2}], "out_type": "int64"}),
],
)
def test_groupby_non_arithmetic_agg_types(dtype, method, data):
# GH9311, GH6620
df = pd.DataFrame(
[{"a": 1, "b": 1}, {"a": 1, "b": 2}, {"a": 2, "b": 3}, {"a": 2, "b": 4}]
)
df["b"] = df.b.astype(dtype)
if "args" not in data:
data["args"] = []
if "out_type" in data:
out_type = data["out_type"]
else:
out_type = dtype
exp = data["df"]
df_out = pd.DataFrame(exp)
df_out["b"] = df_out.b.astype(out_type)
df_out.set_index("a", inplace=True)
grpd = df.groupby("a")
t = getattr(grpd, method)(*data["args"])
tm.assert_frame_equal(t, df_out)
@pytest.mark.parametrize(
"i",
[
(
Timestamp("2011-01-15 12:50:28.502376"),
Timestamp("2011-01-20 12:50:28.593448"),
),
(24650000000000001, 24650000000000002),
],
)
def test_groupby_non_arithmetic_agg_int_like_precision(i):
# see gh-6620, gh-9311
df = pd.DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}])
grp_exp = {
"first": {"expected": i[0]},
"last": {"expected": i[1]},
"min": {"expected": i[0]},
"max": {"expected": i[1]},
"nth": {"expected": i[1], "args": [1]},
"count": {"expected": 2},
}
for method, data in grp_exp.items():
if "args" not in data:
data["args"] = []
grouped = df.groupby("a")
res = getattr(grouped, method)(*data["args"])
assert res.iloc[0].b == data["expected"]
@pytest.mark.parametrize(
"func, values",
[
("idxmin", {"c_int": [0, 2], "c_float": [1, 3], "c_date": [1, 2]}),
("idxmax", {"c_int": [1, 3], "c_float": [0, 2], "c_date": [0, 3]}),
],
)
def test_idxmin_idxmax_returns_int_types(func, values):
# GH 25444
df = pd.DataFrame(
{
"name": ["A", "A", "B", "B"],
"c_int": [1, 2, 3, 4],
"c_float": [4.02, 3.03, 2.04, 1.05],
"c_date": ["2019", "2018", "2016", "2017"],
}
)
df["c_date"] = pd.to_datetime(df["c_date"])
result = getattr(df.groupby("name"), func)()
expected = pd.DataFrame(values, index=Index(["A", "B"], name="name"))
tm.assert_frame_equal(result, expected)
def test_fill_consistency():
# GH9221
# pass thru keyword arguments to the generated wrapper
# are set if the passed kw is None (only)
df = DataFrame(
index=pd.MultiIndex.from_product(
[["value1", "value2"], date_range("2014-01-01", "2014-01-06")]
),
columns=Index(["1", "2"], name="id"),
)
df["1"] = [
np.nan,
1,
np.nan,
np.nan,
11,
np.nan,
np.nan,
2,
np.nan,
np.nan,
22,
np.nan,
]
df["2"] = [
np.nan,
3,
np.nan,
np.nan,
33,
np.nan,
np.nan,
4,
np.nan,
np.nan,
44,
np.nan,
]
expected = df.groupby(level=0, axis=0).fillna(method="ffill")
result = df.T.groupby(level=0, axis=1).fillna(method="ffill").T
tm.assert_frame_equal(result, expected)
def test_groupby_cumprod():
# GH 4095
df = pd.DataFrame({"key": ["b"] * 10, "value": 2})
actual = df.groupby("key")["value"].cumprod()
expected = df.groupby("key")["value"].apply(lambda x: x.cumprod())
expected.name = "value"
tm.assert_series_equal(actual, expected)
df = pd.DataFrame({"key": ["b"] * 100, "value": 2})
actual = df.groupby("key")["value"].cumprod()
# if overflows, groupby product casts to float
# while numpy passes back invalid values
df["value"] = df["value"].astype(float)
expected = df.groupby("key")["value"].apply(lambda x: x.cumprod())
expected.name = "value"
tm.assert_series_equal(actual, expected)
def scipy_sem(*args, **kwargs):
from scipy.stats import sem
return sem(*args, ddof=1, **kwargs)
@pytest.mark.parametrize(
"op,targop",
[
("mean", np.mean),
("median", np.median),
("std", np.std),
("var", np.var),
("sum", np.sum),
("prod", np.prod),
("min", np.min),
("max", np.max),
("first", lambda x: x.iloc[0]),
("last", lambda x: x.iloc[-1]),
("count", np.size),
pytest.param("sem", scipy_sem, marks=td.skip_if_no_scipy),
],
)
def test_ops_general(op, targop):
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
result = getattr(df.groupby(labels), op)().astype(float)
expected = df.groupby(labels).agg(targop)
tm.assert_frame_equal(result, expected)
def test_max_nan_bug():
raw = """,Date,app,File
-04-23,2013-04-23 00:00:00,,log080001.log
-05-06,2013-05-06 00:00:00,,log.log
-05-07,2013-05-07 00:00:00,OE,xlsx"""
df = pd.read_csv(StringIO(raw), parse_dates=[0])
gb = df.groupby("Date")
r = gb[["File"]].max()
e = gb["File"].max().to_frame()
tm.assert_frame_equal(r, e)
assert not r["File"].isna().any()
def test_nlargest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list("a" * 5 + "b" * 5))
gb = a.groupby(b)
r = gb.nlargest(3)
e = Series(
[7, 5, 3, 10, 9, 6],
index=MultiIndex.from_arrays([list("aaabbb"), [3, 2, 1, 9, 5, 8]]),
)
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series(
[3, 2, 1, 3, 3, 2],
index=MultiIndex.from_arrays([list("aaabbb"), [2, 3, 1, 6, 5, 7]]),
)
tm.assert_series_equal(gb.nlargest(3, keep="last"), e)
def test_nlargest_mi_grouper():
# see gh-21411
npr = np.random.RandomState(123456789)
dts = date_range("20180101", periods=10)
iterables = [dts, ["one", "two"]]
idx = MultiIndex.from_product(iterables, names=["first", "second"])
s = Series(npr.randn(20), index=idx)
result = s.groupby("first").nlargest(1)
exp_idx = MultiIndex.from_tuples(
[
(dts[0], dts[0], "one"),
(dts[1], dts[1], "one"),
(dts[2], dts[2], "one"),
(dts[3], dts[3], "two"),
(dts[4], dts[4], "one"),
(dts[5], dts[5], "one"),
(dts[6], dts[6], "one"),
(dts[7], dts[7], "one"),
(dts[8], dts[8], "two"),
(dts[9], dts[9], "one"),
],
names=["first", "first", "second"],
)
exp_values = [
2.2129019979039612,
1.8417114045748335,
0.858963679564603,
1.3759151378258088,
0.9430284594687134,
0.5296914208183142,
0.8318045593815487,
-0.8476703342910327,
0.3804446884133735,
-0.8028845810770998,
]
expected = Series(exp_values, index=exp_idx)
tm.assert_series_equal(result, expected, check_exact=False, rtol=1e-3)
def test_nsmallest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list("a" * 5 + "b" * 5))
gb = a.groupby(b)
r = gb.nsmallest(3)
e = Series(
[1, 2, 3, 0, 4, 6],
index=MultiIndex.from_arrays([list("aaabbb"), [0, 4, 1, 6, 7, 8]]),
)
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series(
[0, 1, 1, 0, 1, 2],
index=MultiIndex.from_arrays([list("aaabbb"), [4, 1, 0, 9, 8, 7]]),
)
tm.assert_series_equal(gb.nsmallest(3, keep="last"), e)
@pytest.mark.parametrize("func", ["cumprod", "cumsum"])
def test_numpy_compat(func):
# see gh-12811
df = pd.DataFrame({"A": [1, 2, 1], "B": [1, 2, 3]})
g = df.groupby("A")
msg = "numpy operations are not valid with groupby"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(foo=1)
def test_cummin(numpy_dtypes_for_minmax):
dtype = numpy_dtypes_for_minmax[0]
min_val = numpy_dtypes_for_minmax[1]
# GH 15048
base_df = pd.DataFrame(
{"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]}
)
expected_mins = [3, 3, 3, 2, 2, 2, 2, 1]
df = base_df.astype(dtype)
expected = pd.DataFrame({"B": expected_mins}).astype(dtype)
result = df.groupby("A").cummin()
tm.assert_frame_equal(result, expected)
result = df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# Test w/ min value for dtype
df.loc[[2, 6], "B"] = min_val
expected.loc[[2, 3, 6, 7], "B"] = min_val
result = df.groupby("A").cummin()
tm.assert_frame_equal(result, expected)
expected = df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# Test nan in some values
base_df.loc[[0, 2, 4, 6], "B"] = np.nan
expected = pd.DataFrame({"B": [np.nan, 4, np.nan, 2, np.nan, 3, np.nan, 1]})
result = base_df.groupby("A").cummin()
tm.assert_frame_equal(result, expected)
expected = base_df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# GH 15561
df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(["2001"])))
expected = pd.Series(pd.to_datetime("2001"), index=[0], name="b")
result = df.groupby("a")["b"].cummin()
tm.assert_series_equal(expected, result)
# GH 15635
df = pd.DataFrame(dict(a=[1, 2, 1], b=[1, 2, 2]))
result = df.groupby("a").b.cummin()
expected = pd.Series([1, 2, 1], name="b")
tm.assert_series_equal(result, expected)
def test_cummin_all_nan_column():
base_df = pd.DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [np.nan] * 8})
expected = pd.DataFrame({"B": [np.nan] * 8})
result = base_df.groupby("A").cummin()
tm.assert_frame_equal(expected, result)
result = base_df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(expected, result)
def test_cummax(numpy_dtypes_for_minmax):
dtype = numpy_dtypes_for_minmax[0]
max_val = numpy_dtypes_for_minmax[2]
# GH 15048
base_df = pd.DataFrame(
{"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]}
)
expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3]
df = base_df.astype(dtype)
expected = pd.DataFrame({"B": expected_maxs}).astype(dtype)
result = df.groupby("A").cummax()
tm.assert_frame_equal(result, expected)
result = df.groupby("A").B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test w/ max value for dtype
df.loc[[2, 6], "B"] = max_val
expected.loc[[2, 3, 6, 7], "B"] = max_val
result = df.groupby("A").cummax()
tm.assert_frame_equal(result, expected)
expected = df.groupby("A").B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test nan in some values
base_df.loc[[0, 2, 4, 6], "B"] = np.nan
expected = pd.DataFrame({"B": [np.nan, 4, np.nan, 4, np.nan, 3, np.nan, 3]})
result = base_df.groupby("A").cummax()
tm.assert_frame_equal(result, expected)
expected = base_df.groupby("A").B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# GH 15561
df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(["2001"])))
expected = pd.Series(pd.to_datetime("2001"), index=[0], name="b")
result = df.groupby("a")["b"].cummax()
tm.assert_series_equal(expected, result)
# GH 15635
df = pd.DataFrame(dict(a=[1, 2, 1], b=[2, 1, 1]))
result = df.groupby("a").b.cummax()
expected = pd.Series([2, 1, 2], name="b")
tm.assert_series_equal(result, expected)
def test_cummax_all_nan_column():
base_df = pd.DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [np.nan] * 8})
expected = pd.DataFrame({"B": [np.nan] * 8})
result = base_df.groupby("A").cummax()
tm.assert_frame_equal(expected, result)
result = base_df.groupby("A").B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize(
"in_vals, out_vals",
[
# Basics: strictly increasing (T), strictly decreasing (F),
# abs val increasing (F), non-strictly increasing (T)
([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1], [True, False, False, True]),
# Test with inf vals
(
[1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf],
[True, False, True, False],
),
# Test with nan vals; should always be False
(
[1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False],
),
],
)
def test_is_monotonic_increasing(in_vals, out_vals):
# GH 17015
source_dict = {
"A": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"],
"B": ["a", "a", "a", "b", "b", "b", "c", "c", "c", "d", "d"],
"C": in_vals,
}
df = pd.DataFrame(source_dict)
result = df.groupby("B").C.is_monotonic_increasing
index = Index(list("abcd"), name="B")
expected = pd.Series(index=index, data=out_vals, name="C")
tm.assert_series_equal(result, expected)
# Also check result equal to manually taking x.is_monotonic_increasing.
expected = df.groupby(["B"]).C.apply(lambda x: x.is_monotonic_increasing)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"in_vals, out_vals",
[
# Basics: strictly decreasing (T), strictly increasing (F),
# abs val decreasing (F), non-strictly increasing (T)
([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1], [True, False, False, True]),
# Test with inf vals
(
[np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf],
[True, True, False, True],
),
# Test with nan vals; should always be False
(
[1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False],
),
],
)
def test_is_monotonic_decreasing(in_vals, out_vals):
# GH 17015
source_dict = {
"A": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"],
"B": ["a", "a", "a", "b", "b", "b", "c", "c", "c", "d", "d"],
"C": in_vals,
}
df = pd.DataFrame(source_dict)
result = df.groupby("B").C.is_monotonic_decreasing
index = Index(list("abcd"), name="B")
expected = pd.Series(index=index, data=out_vals, name="C")
tm.assert_series_equal(result, expected)
# describe
# --------------------------------
def test_apply_describe_bug(mframe):
grouped = mframe.groupby(level="first")
grouped.describe() # it works!
def test_series_describe_multikey():
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
tm.assert_series_equal(result["mean"], grouped.mean(), check_names=False)
tm.assert_series_equal(result["std"], grouped.std(), check_names=False)
tm.assert_series_equal(result["min"], grouped.min(), check_names=False)
def test_series_describe_single():
ts = tm.makeTimeSeries()
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x.describe())
expected = grouped.describe().stack()
tm.assert_series_equal(result, expected)
def test_series_index_name(df):
grouped = df.loc[:, ["C"]].groupby(df["A"])
result = grouped.agg(lambda x: x.mean())
assert result.index.name == "A"
def test_frame_describe_multikey(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
desc_groups = []
for col in tsframe:
group = grouped[col].describe()
# GH 17464 - Remove duplicate MultiIndex levels
group_col = pd.MultiIndex(
levels=[[col], group.columns],
codes=[[0] * len(group.columns), range(len(group.columns))],
)
group = pd.DataFrame(group.values, columns=group_col, index=group.index)
desc_groups.append(group)
expected = pd.concat(desc_groups, axis=1)
tm.assert_frame_equal(result, expected)
groupedT = tsframe.groupby({"A": 0, "B": 0, "C": 1, "D": 1}, axis=1)
result = groupedT.describe()
expected = tsframe.describe().T
expected.index = pd.MultiIndex(
levels=[[0, 1], expected.index],
codes=[[0, 0, 1, 1], range(len(expected.index))],
)
tm.assert_frame_equal(result, expected)
def test_frame_describe_tupleindex():
# GH 14848 - regression from 0.19.0 to 0.19.1
df1 = DataFrame(
{
"x": [1, 2, 3, 4, 5] * 3,
"y": [10, 20, 30, 40, 50] * 3,
"z": [100, 200, 300, 400, 500] * 3,
}
)
df1["k"] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5
df2 = df1.rename(columns={"k": "key"})
msg = "Names should be list-like for a MultiIndex"
with pytest.raises(ValueError, match=msg):
df1.groupby("k").describe()
with pytest.raises(ValueError, match=msg):
df2.groupby("key").describe()
def test_frame_describe_unstacked_format():
# GH 4792
prices = {
pd.Timestamp("2011-01-06 10:59:05", tz=None): 24990,
pd.Timestamp("2011-01-06 12:43:33", tz=None): 25499,
pd.Timestamp("2011-01-06 12:54:09", tz=None): 25499,
}
volumes = {
pd.Timestamp("2011-01-06 10:59:05", tz=None): 1500000000,
pd.Timestamp("2011-01-06 12:43:33", tz=None): 5000000000,
pd.Timestamp("2011-01-06 12:54:09", tz=None): 100000000,
}
df = pd.DataFrame({"PRICE": prices, "VOLUME": volumes})
result = df.groupby("PRICE").VOLUME.describe()
data = [
df[df.PRICE == 24990].VOLUME.describe().values.tolist(),
df[df.PRICE == 25499].VOLUME.describe().values.tolist(),
]
expected = pd.DataFrame(
data,
index=pd.Index([24990, 25499], name="PRICE"),
columns=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
tm.assert_frame_equal(result, expected)
def test_groupby_mean_no_overflow():
# Regression test for (#22487)
df = pd.DataFrame(
{
"user": ["A", "A", "A", "A", "A"],
"connections": [4970, 4749, 4719, 4704, 18446744073699999744],
}
)
assert df.groupby("user")["connections"].mean()["A"] == 3689348814740003840
@pytest.mark.parametrize(
"values",
[
{
"a": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"b": [1, pd.NA, 2, 1, pd.NA, 2, 1, pd.NA, 2],
},
{"a": [1, 1, 2, 2, 3, 3], "b": [1, 2, 1, 2, 1, 2]},
],
)
@pytest.mark.parametrize("function", ["mean", "median", "var"])
def test_apply_to_nullable_integer_returns_float(values, function):
# https://github.com/pandas-dev/pandas/issues/32219
output = 0.5 if function == "var" else 1.5
arr = np.array([output] * 3, dtype=float)
idx = pd.Index([1, 2, 3], dtype=object, name="a")
expected = pd.DataFrame({"b": arr}, index=idx)
groups = pd.DataFrame(values, dtype="Int64").groupby("a")
result = getattr(groups, function)()
tm.assert_frame_equal(result, expected)
result = groups.agg(function)
tm.assert_frame_equal(result, expected)
result = groups.agg([function])
expected.columns = MultiIndex.from_tuples([("b", function)])
tm.assert_frame_equal(result, expected)
def test_groupby_sum_below_mincount_nullable_integer():
# https://github.com/pandas-dev/pandas/issues/32861
df = pd.DataFrame({"a": [0, 1, 2], "b": [0, 1, 2], "c": [0, 1, 2]}, dtype="Int64")
grouped = df.groupby("a")
idx = pd.Index([0, 1, 2], dtype=object, name="a")
result = grouped["b"].sum(min_count=2)
expected = pd.Series([pd.NA] * 3, dtype="Int64", index=idx, name="b")
tm.assert_series_equal(result, expected)
result = grouped.sum(min_count=2)
expected = pd.DataFrame(
{"b": [pd.NA] * 3, "c": [pd.NA] * 3}, dtype="Int64", index=idx
)
tm.assert_frame_equal(result, expected)
| 27,267 | 0 | 864 |
1b4d91fd688cefd8353cdc6165ee82de3e239164 | 3,032 | py | Python | CoREATAC_PredictionTool.py | UcarLab/CoRE-ATAC | 0a6f4c380d9ad0c542aa01368855ba9ebcd4246b | [
"MIT"
] | 7 | 2020-02-20T18:55:09.000Z | 2022-01-06T21:38:38.000Z | CoREATAC_PredictionTool.py | UcarLab/CoRE-ATAC | 0a6f4c380d9ad0c542aa01368855ba9ebcd4246b | [
"MIT"
] | 4 | 2021-07-30T13:23:30.000Z | 2022-03-02T14:41:52.000Z | CoREATAC_PredictionTool.py | UcarLab/CoRE-ATAC | 0a6f4c380d9ad0c542aa01368855ba9ebcd4246b | [
"MIT"
] | null | null | null | from __future__ import print_function
import tensorflow as tf
import keras
from tensorflow.keras.models import load_model
from keras import backend as K
from keras.layers import Input
import numpy as np
import subprocess
from tensorloader import TensorLoader as tl
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from sklearn import preprocessing
from sklearn.metrics import accuracy_score, roc_curve, auc, precision_recall_curve,average_precision_score, confusion_matrix
import pandas as pd
from sklearn import impute
import argparse
import os
import time
#Step 0: Process arguments
parser = argparse.ArgumentParser(description='CoRE-ATAC Prediction Tool')
parser.add_argument("datadirectory")
parser.add_argument("basename")
parser.add_argument("model")
parser.add_argument("outputfile")
parser.add_argument('--pf', dest='pf', type=str, default="",
help='Destination of PEAS features)')
parser.add_argument('--le', dest='le', type=str, default="",
help='Destination of LabelEncoder.)')
parser.add_argument('--swapchannels', default=False, action='store_true', dest='swap')
args = parser.parse_args()
datadirectory = args.datadirectory
basename = args.basename
model = args.model
outputfile = args.outputfile
featurefile = args.pf
labelencoder = args.le
swapchannels = args.swap
predict(datadirectory, basename, model, outputfile, featurefile, labelencoder, swapchannels)
| 36.53012 | 133 | 0.742744 | from __future__ import print_function
import tensorflow as tf
import keras
from tensorflow.keras.models import load_model
from keras import backend as K
from keras.layers import Input
import numpy as np
import subprocess
from tensorloader import TensorLoader as tl
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from sklearn import preprocessing
from sklearn.metrics import accuracy_score, roc_curve, auc, precision_recall_curve,average_precision_score, confusion_matrix
import pandas as pd
from sklearn import impute
import argparse
import os
import time
#Step 0: Process arguments
parser = argparse.ArgumentParser(description='CoRE-ATAC Prediction Tool')
parser.add_argument("datadirectory")
parser.add_argument("basename")
parser.add_argument("model")
parser.add_argument("outputfile")
parser.add_argument('--pf', dest='pf', type=str, default="",
help='Destination of PEAS features)')
parser.add_argument('--le', dest='le', type=str, default="",
help='Destination of LabelEncoder.)')
parser.add_argument('--swapchannels', default=False, action='store_true', dest='swap')
args = parser.parse_args()
datadirectory = args.datadirectory
basename = args.basename
model = args.model
outputfile = args.outputfile
featurefile = args.pf
labelencoder = args.le
swapchannels = args.swap
def predict(datadirectory, basename, model, outputfile, featurefile, labelencoder, swapchannels):
model = load_model(model)
if featurefile == "":
featurefile = "/CoRE-ATAC/PEAS/features.txt"
if labelencoder == "":
labelencoder = "/CoRE-ATAC/PEAS/labelencoder.txt"
#Step 1: Load the data
start_time = time.time()
seqdata,sigdata,annot,summitpeaks,peaks = tl.readTensors(basename, datadirectory, 600, sequence=True, signal=True)
peasfeatures = tl.getPEASFeatures(datadirectory+"/peak_features/"+basename+"_features.txt", featurefile, labelencoder, peaks)
#num_classes = 4
peasfeatures = np.expand_dims(peasfeatures, axis=2)
sigseqdata = tl.getSeqSigTensor(seqdata, sigdata)
print("--- Data loaded in %s seconds ---" % (time.time() - start_time))
x_test_sigseq = sigseqdata
if swapchannels == False:
x_test_sigseq = np.moveaxis(x_test_sigseq, 1, -1) #Originally had channels first, but CPU tensorflow requires channels last
x_test_peas = peasfeatures
#Step 2: Make predictions
start_time = time.time()
sig_predictions, peas_predictions, predictions = model.predict([x_test_sigseq, x_test_peas])
print("--- Data predicted in %s seconds ---" % (time.time() - start_time))
#Write the output file:
columns = ["Chr", "Start", "End", "Promoter Probability", "Enhancer Probability", "Insulator Probability", "Other Probability"]
pd.DataFrame(np.concatenate((peaks, predictions), axis=1), columns=columns).to_csv(outputfile, header=None, index=None, sep="\t")
predict(datadirectory, basename, model, outputfile, featurefile, labelencoder, swapchannels)
| 1,547 | 0 | 23 |
138350e62c4891a09583d2b849d0df077a842cca | 68 | py | Python | chars/scripts/link.py | camsdu59/Zelda_BlenderGame | 0f5d5d15bfa79e9f8ea15f0ebcb76bce92f77a21 | [
"FSFAP"
] | 27 | 2016-01-13T14:16:13.000Z | 2022-01-03T05:38:44.000Z | chars/scripts/link.py | camsdu59/Zelda_BlenderGame | 0f5d5d15bfa79e9f8ea15f0ebcb76bce92f77a21 | [
"FSFAP"
] | 1 | 2017-04-29T00:51:26.000Z | 2017-04-29T00:54:43.000Z | chars/scripts/link.py | camsdu59/Zelda_BlenderGame | 0f5d5d15bfa79e9f8ea15f0ebcb76bce92f77a21 | [
"FSFAP"
] | 14 | 2016-01-20T21:02:37.000Z | 2020-07-19T05:47:20.000Z | from bge import logic | 13.6 | 21 | 0.705882 | from bge import logic
def main(cont):
own = cont.owner
own.main() | 24 | 0 | 23 |
014facf593970e594a62a84d02b0c51102ebfd0a | 8,303 | py | Python | examples/characterization/wigner-tomography/configuration.py | qua-platform/qua-libs | 805a3b1a69980b939b370b3ba09434bc26dc45ec | [
"BSD-3-Clause"
] | 21 | 2021-05-21T08:23:34.000Z | 2022-03-25T11:30:55.000Z | examples/characterization/wigner-tomography/configuration.py | qua-platform/qua-libs | 805a3b1a69980b939b370b3ba09434bc26dc45ec | [
"BSD-3-Clause"
] | 9 | 2021-05-13T19:56:00.000Z | 2021-12-21T05:11:04.000Z | examples/characterization/wigner-tomography/configuration.py | qua-platform/qua-libs | 805a3b1a69980b939b370b3ba09434bc26dc45ec | [
"BSD-3-Clause"
] | 2 | 2021-06-21T10:56:40.000Z | 2021-12-19T14:21:33.000Z | import numpy as np
import matplotlib.pyplot as plt
#############################
# simulation helpers #
#############################
lo_freq_cavity = 8.0e9
cavity_IF = 180e6
lo_freq_qubit = 7.4e9
qubit_IF = 60e6
lo_freq_rr = 9.3e9
rr_IF = 60e6
readout_len = 380
IF_freq = rr_IF
Td = 200
Ts = readout_len - Td
power = 0.2
const_I = [power] * Ts
const_Q = [power] * Ts
alpha = 1 + 1j
sigma_displace = 4
power_displace = alpha / np.sqrt(2 * np.pi) / sigma_displace
displace_len = 8 * sigma_displace
displace_I = gauss(
np.real(alpha),
displace_len / 2 - 3 * int(sigma_displace + 1),
sigma_displace,
displace_len,
)
displace_Q = gauss(
np.imag(alpha),
displace_len / 2 - 3 * int(sigma_displace + 1),
sigma_displace,
displace_len,
)
gauss_sigma = 6
pulse_len = 8 * gauss_sigma
gauss_pulse = gauss(0.2, -(pulse_len / 2 - 3 * gauss_sigma), gauss_sigma, pulse_len)
k = 0.04
chi = 0.023
[tdis_, Idis_, Qdis_, Sdis_] = simulate_pulse(
cavity_IF, -1 * chi, k, displace_len - 1, 0, displace_I, displace_Q
)
[tg_, Ig_, Qg_, Sg_] = simulate_pulse(rr_IF, -1 * chi, k, Ts, Td, const_I, const_Q)
[te_, Ie_, Qe_, Se_] = simulate_pulse(rr_IF, 1 * chi, k, Ts, Td, const_I, const_Q)
divide_signal_factor = 10
config = {
"version": 1,
"controllers": {
"con1": {
"type": "opx1",
"analog_outputs": {
1: {"offset": 0},
2: {"offset": 0},
3: {"offset": 0},
4: {"offset": 0},
5: {"offset": 0},
6: {"offset": 0},
},
"digital_outputs": {
1: {},
},
"analog_inputs": {
1: {"offset": 0},
2: {"offset": 0},
},
},
},
"elements": {
"cavity_I": {
"singleInput": {
"port": ("con1", 3),
# 'lo_frequency': lo_freq_cavity,
# 'mixer': 'mixer_cavity',
},
"intermediate_frequency": cavity_IF,
"operations": {
"displace_I": "displace_pulse_I",
},
"time_of_flight": 188,
"smearing": 0,
},
"cavity_Q": {
"singleInput": {
"port": ("con1", 4),
# 'lo_frequency': lo_freq_cavity,
# 'mixer': 'mixer_cavity',
},
"intermediate_frequency": cavity_IF,
"operations": {
"displace_Q": "displace_pulse_Q",
},
"time_of_flight": 188,
"smearing": 0,
},
"rr": {
"mixInputs": {
"I": ("con1", 1),
"Q": ("con1", 2),
"lo_frequency": lo_freq_rr,
"mixer": "mixer_rr",
},
"intermediate_frequency": rr_IF,
"operations": {
"readout": "readout_pulse",
"readout_g": "readout_pulse_g",
"readout_e": "readout_pulse_e",
},
"outputs": {
"out1": ("con1", 1),
"out2": ("con1", 2),
},
"time_of_flight": 188,
"smearing": 0,
},
"qubit": {
"mixInputs": {
"I": ("con1", 5),
"Q": ("con1", 6),
"lo_frequency": lo_freq_qubit,
"mixer": "mixer_qubit",
},
"intermediate_frequency": qubit_IF,
"operations": {
"x_pi/2": "x_pi/2_pulse",
},
"time_of_flight": 188,
"smearing": 0,
},
},
"pulses": {
"readout_pulse": {
"operation": "measurement",
"length": readout_len,
"waveforms": {"I": "Ig_wf", "Q": "Qg_wf"},
"integration_weights": {
"integW_cos": "integW_cos",
"integW_sin": "integW_sin",
},
"digital_marker": "ON",
},
"readout_pulse_g": {
"operation": "measurement",
"length": readout_len,
"waveforms": {"I": "Ig_wf", "Q": "Qg_wf"},
"integration_weights": {
"integW_cos": "integW_cos",
"integW_sin": "integW_sin",
},
"digital_marker": "ON",
},
"readout_pulse_e": {
"operation": "measurement",
"length": readout_len,
"waveforms": {"I": "Ie_wf", "Q": "Qe_wf"},
"integration_weights": {
"integW_cos": "integW_cos",
"integW_sin": "integW_sin",
},
"digital_marker": "ON",
},
"x_pi/2_pulse": {
"operation": "control",
"length": pulse_len,
"waveforms": {"I": "gauss_wf", "Q": "zero_wf"},
},
"displace_pulse_I": {
"operation": "control",
"length": displace_len,
"waveforms": {
"single": "Idis_wf",
},
},
"displace_pulse_Q": {
"operation": "control",
"length": displace_len,
"waveforms": {"single": "Qdis_wf"},
},
},
"waveforms": {
"zero_wf": {"type": "constant", "sample": 0.0},
"const_wf": {"type": "constant", "sample": 0.1},
"gauss_wf": {"type": "arbitrary", "samples": gauss_pulse},
"Ig_wf": {
"type": "arbitrary",
"samples": [float(arg / divide_signal_factor) for arg in Ig_],
},
"Qg_wf": {
"type": "arbitrary",
"samples": [float(arg / divide_signal_factor) for arg in Qg_],
},
"Ie_wf": {
"type": "arbitrary",
"samples": [float(arg / divide_signal_factor) for arg in Ie_],
},
"Qe_wf": {
"type": "arbitrary",
"samples": [float(arg / divide_signal_factor) for arg in Qe_],
},
"Idis_wf": {"type": "arbitrary", "samples": [float(arg) for arg in displace_I]},
"Qdis_wf": {"type": "arbitrary", "samples": [float(arg) for arg in displace_Q]},
},
"digital_waveforms": {
"ON": {"samples": [(1, 0)]},
},
"integration_weights": {
"integW_cos": {
"cosine": [1.0] * 120,
"sine": [0.0] * 120,
},
"integW_sin": {
"cosine": [0.0] * 120,
"sine": [1.0] * 120,
},
},
"mixers": {
"mixer_cavity": [
{
"intermediate_frequency": cavity_IF,
"lo_frequency": lo_freq_cavity,
"correction": [1, 0, 0, 1],
},
],
"mixer_rr": [
{
"intermediate_frequency": rr_IF,
"lo_frequency": lo_freq_rr,
"correction": [1, 0, 0, 1],
},
],
"mixer_qubit": [
{
"intermediate_frequency": qubit_IF,
"lo_frequency": lo_freq_qubit,
"correction": [1, 0, 0, 1],
},
],
},
}
| 29.759857 | 89 | 0.427797 | import numpy as np
import matplotlib.pyplot as plt
#############################
# simulation helpers #
#############################
def gauss(amplitude, mu, sigma, length):
t = np.linspace(-length / 2, length / 2, length)
gauss_wave = amplitude * np.exp(-((t - mu) ** 2) / (2 * sigma ** 2))
return np.array([float(x) for x in gauss_wave])
def simulate_pulse(IF_freq, chi, k, Ts, Td, input_I, input_Q):
I = [0]
Q = [0]
# solve numerically a simplified version of the readout resonator
for t in range(Ts):
I.append(I[-1] + (input_I[t] / 2 - k * I[-1] + Q[-1] * chi))
Q.append(Q[-1] + (input_Q[t] / 2 - k * Q[-1] - I[-1] * chi))
for t in range(Td - 1):
I.append(I[-1] + (-k * I[-1] + Q[-1] * chi))
Q.append(Q[-1] + (-k * Q[-1] - I[-1] * chi))
I = np.array(I)
Q = np.array(Q)
t = np.arange(len(I))
S = I * np.cos(2 * np.pi * IF_freq * t * 1e-9) + Q * np.sin(
2 * np.pi * IF_freq * t * 1e-9
)
return t, I, Q, S
lo_freq_cavity = 8.0e9
cavity_IF = 180e6
lo_freq_qubit = 7.4e9
qubit_IF = 60e6
lo_freq_rr = 9.3e9
rr_IF = 60e6
readout_len = 380
IF_freq = rr_IF
Td = 200
Ts = readout_len - Td
power = 0.2
const_I = [power] * Ts
const_Q = [power] * Ts
alpha = 1 + 1j
sigma_displace = 4
power_displace = alpha / np.sqrt(2 * np.pi) / sigma_displace
displace_len = 8 * sigma_displace
displace_I = gauss(
np.real(alpha),
displace_len / 2 - 3 * int(sigma_displace + 1),
sigma_displace,
displace_len,
)
displace_Q = gauss(
np.imag(alpha),
displace_len / 2 - 3 * int(sigma_displace + 1),
sigma_displace,
displace_len,
)
gauss_sigma = 6
pulse_len = 8 * gauss_sigma
gauss_pulse = gauss(0.2, -(pulse_len / 2 - 3 * gauss_sigma), gauss_sigma, pulse_len)
k = 0.04
chi = 0.023
[tdis_, Idis_, Qdis_, Sdis_] = simulate_pulse(
cavity_IF, -1 * chi, k, displace_len - 1, 0, displace_I, displace_Q
)
[tg_, Ig_, Qg_, Sg_] = simulate_pulse(rr_IF, -1 * chi, k, Ts, Td, const_I, const_Q)
[te_, Ie_, Qe_, Se_] = simulate_pulse(rr_IF, 1 * chi, k, Ts, Td, const_I, const_Q)
divide_signal_factor = 10
config = {
"version": 1,
"controllers": {
"con1": {
"type": "opx1",
"analog_outputs": {
1: {"offset": 0},
2: {"offset": 0},
3: {"offset": 0},
4: {"offset": 0},
5: {"offset": 0},
6: {"offset": 0},
},
"digital_outputs": {
1: {},
},
"analog_inputs": {
1: {"offset": 0},
2: {"offset": 0},
},
},
},
"elements": {
"cavity_I": {
"singleInput": {
"port": ("con1", 3),
# 'lo_frequency': lo_freq_cavity,
# 'mixer': 'mixer_cavity',
},
"intermediate_frequency": cavity_IF,
"operations": {
"displace_I": "displace_pulse_I",
},
"time_of_flight": 188,
"smearing": 0,
},
"cavity_Q": {
"singleInput": {
"port": ("con1", 4),
# 'lo_frequency': lo_freq_cavity,
# 'mixer': 'mixer_cavity',
},
"intermediate_frequency": cavity_IF,
"operations": {
"displace_Q": "displace_pulse_Q",
},
"time_of_flight": 188,
"smearing": 0,
},
"rr": {
"mixInputs": {
"I": ("con1", 1),
"Q": ("con1", 2),
"lo_frequency": lo_freq_rr,
"mixer": "mixer_rr",
},
"intermediate_frequency": rr_IF,
"operations": {
"readout": "readout_pulse",
"readout_g": "readout_pulse_g",
"readout_e": "readout_pulse_e",
},
"outputs": {
"out1": ("con1", 1),
"out2": ("con1", 2),
},
"time_of_flight": 188,
"smearing": 0,
},
"qubit": {
"mixInputs": {
"I": ("con1", 5),
"Q": ("con1", 6),
"lo_frequency": lo_freq_qubit,
"mixer": "mixer_qubit",
},
"intermediate_frequency": qubit_IF,
"operations": {
"x_pi/2": "x_pi/2_pulse",
},
"time_of_flight": 188,
"smearing": 0,
},
},
"pulses": {
"readout_pulse": {
"operation": "measurement",
"length": readout_len,
"waveforms": {"I": "Ig_wf", "Q": "Qg_wf"},
"integration_weights": {
"integW_cos": "integW_cos",
"integW_sin": "integW_sin",
},
"digital_marker": "ON",
},
"readout_pulse_g": {
"operation": "measurement",
"length": readout_len,
"waveforms": {"I": "Ig_wf", "Q": "Qg_wf"},
"integration_weights": {
"integW_cos": "integW_cos",
"integW_sin": "integW_sin",
},
"digital_marker": "ON",
},
"readout_pulse_e": {
"operation": "measurement",
"length": readout_len,
"waveforms": {"I": "Ie_wf", "Q": "Qe_wf"},
"integration_weights": {
"integW_cos": "integW_cos",
"integW_sin": "integW_sin",
},
"digital_marker": "ON",
},
"x_pi/2_pulse": {
"operation": "control",
"length": pulse_len,
"waveforms": {"I": "gauss_wf", "Q": "zero_wf"},
},
"displace_pulse_I": {
"operation": "control",
"length": displace_len,
"waveforms": {
"single": "Idis_wf",
},
},
"displace_pulse_Q": {
"operation": "control",
"length": displace_len,
"waveforms": {"single": "Qdis_wf"},
},
},
"waveforms": {
"zero_wf": {"type": "constant", "sample": 0.0},
"const_wf": {"type": "constant", "sample": 0.1},
"gauss_wf": {"type": "arbitrary", "samples": gauss_pulse},
"Ig_wf": {
"type": "arbitrary",
"samples": [float(arg / divide_signal_factor) for arg in Ig_],
},
"Qg_wf": {
"type": "arbitrary",
"samples": [float(arg / divide_signal_factor) for arg in Qg_],
},
"Ie_wf": {
"type": "arbitrary",
"samples": [float(arg / divide_signal_factor) for arg in Ie_],
},
"Qe_wf": {
"type": "arbitrary",
"samples": [float(arg / divide_signal_factor) for arg in Qe_],
},
"Idis_wf": {"type": "arbitrary", "samples": [float(arg) for arg in displace_I]},
"Qdis_wf": {"type": "arbitrary", "samples": [float(arg) for arg in displace_Q]},
},
"digital_waveforms": {
"ON": {"samples": [(1, 0)]},
},
"integration_weights": {
"integW_cos": {
"cosine": [1.0] * 120,
"sine": [0.0] * 120,
},
"integW_sin": {
"cosine": [0.0] * 120,
"sine": [1.0] * 120,
},
},
"mixers": {
"mixer_cavity": [
{
"intermediate_frequency": cavity_IF,
"lo_frequency": lo_freq_cavity,
"correction": [1, 0, 0, 1],
},
],
"mixer_rr": [
{
"intermediate_frequency": rr_IF,
"lo_frequency": lo_freq_rr,
"correction": [1, 0, 0, 1],
},
],
"mixer_qubit": [
{
"intermediate_frequency": qubit_IF,
"lo_frequency": lo_freq_qubit,
"correction": [1, 0, 0, 1],
},
],
},
}
| 853 | 0 | 50 |
0b15d56cbaad2a4b854cb25da85ed580da2ed79c | 3,994 | py | Python | litex/build/efinix/dbparser.py | navan93/litex | 2886fe170160d8481b77a53c11132d881b7bfa48 | [
"ADSL"
] | 1,501 | 2016-04-19T18:16:21.000Z | 2022-03-31T17:46:31.000Z | litex/build/efinix/dbparser.py | enjoy-digital/litex | 8fa4de5ede281f599879c6b1b2cd87b74ac67240 | [
"ADSL"
] | 1,135 | 2016-04-19T05:49:14.000Z | 2022-03-31T15:21:19.000Z | litex/build/efinix/dbparser.py | navan93/litex | 2886fe170160d8481b77a53c11132d881b7bfa48 | [
"ADSL"
] | 357 | 2016-04-19T05:00:24.000Z | 2022-03-31T11:28:32.000Z | #
# This file is part of LiteX.
#
# Copyright (c) 2021 Franck Jullien <franck.jullien@collshade.fr>
# SPDX-License-Identifier: BSD-2-Clause
import os
import csv
import re
import xml.etree.ElementTree as et
# NameSpaces ---------------------------------------------------------------------------------------
namespaces = {
'efxpt' : "http://www.efinixinc.com/peri_device_db",
'xi' : "http://www.w3.org/2001/XInclude"
}
# Efinix Database Parser ---------------------------------------------------------------------------
| 31.698413 | 100 | 0.551077 | #
# This file is part of LiteX.
#
# Copyright (c) 2021 Franck Jullien <franck.jullien@collshade.fr>
# SPDX-License-Identifier: BSD-2-Clause
import os
import csv
import re
import xml.etree.ElementTree as et
# NameSpaces ---------------------------------------------------------------------------------------
namespaces = {
'efxpt' : "http://www.efinixinc.com/peri_device_db",
'xi' : "http://www.w3.org/2001/XInclude"
}
# Efinix Database Parser ---------------------------------------------------------------------------
class EfinixDbParser():
def __init__(self, efinity_path, device):
self.efinity_db_path = efinity_path + '/pt/db/'
self.device = device
def get_device_map(self, device):
with open(self.efinity_db_path + 'devicemap.csv') as f:
reader = csv.reader(f)
data = list(reader)
for d in data:
if d[0] == device:
return d
return None
def get_package_file_name(self, dmap):
tree = et.parse(self.efinity_db_path + dmap[2])
root = tree.getroot()
inc = root.findall('xi:include', namespaces)
for i in inc:
if 'package' in i.get('href'):
return i.get('href').split('/')[1]
return None
def get_die_file_name(self, dmap):
tree = et.parse(self.efinity_db_path + dmap[2])
root = tree.getroot()
inc = root.findall('xi:include', namespaces)
for i in inc:
if 'die' in i.get('href'):
return i.get('href').split('/')[1]
return None
def get_pad_name_xml(self, dmap, pin):
package_file = self.get_package_file_name(dmap)
tree = et.parse(self.efinity_db_path + 'package/' + package_file)
root = tree.getroot()
pm = root.findall('efxpt:package_map', namespaces)
for p in pm:
if p.get('package_pin') == pin:
return (p.get('pad_name'))
return None
def get_instance_name_xml(self, dmap, pad):
die = self.get_die_file_name(dmap)
tree = et.parse(self.efinity_db_path + 'die/' + die)
root = tree.getroot()
ipd = root.find('efxpt:io_pad_definition', namespaces)
ios = ipd.findall('efxpt:io_pad_map', namespaces)
for io in ios:
if io.get('pad_name') == pad:
return (io.get('instance'))
return None
def get_block_instance_names(self, block):
dmap = self.get_device_map(self.device)
die = self.get_die_file_name(dmap)
tree = et.parse(self.efinity_db_path + 'die/' + die)
root = tree.getroot()
peri = root.findall('efxpt:periphery_instance', namespaces)
names = []
for p in peri:
if p.get('block') == block:
names.append(p.get('name'))
print(f"block {block}: names:{names}")
return names
def get_pll_inst_from_gpio_inst(self, dmap, inst):
die = self.get_die_file_name(dmap)
tree = et.parse(self.efinity_db_path + 'die/' + die)
root = tree.getroot()
peri = root.findall('efxpt:periphery_instance', namespaces)
for p in peri:
if p.get('block') == 'pll':
conn = p.findall('efxpt:single_conn', namespaces)
for c in conn:
if c.get('instance') == inst:
refclk_no = 0
if c.get('index') == '3':
refclk_no = 1
return (p.get('name'), refclk_no)
return None
def get_pll_inst_from_pin(self, pin):
dmap = self.get_device_map(self.device)
pad = self.get_pad_name_xml(dmap, pin)
inst = self.get_instance_name_xml(dmap, pad)
return self.get_pll_inst_from_gpio_inst(dmap, inst)
def get_gpio_instance_from_pin(self, pin):
dmap = self.get_device_map(self.device)
return self.get_pad_name_xml(dmap, pin)
| 3,165 | 2 | 292 |
f0c04942cd991d526606ad9c0e44cc698dbfb490 | 837 | py | Python | tests/test_regexps.py | Luminaar/cocobump | 5771b0d1bb3be011f9485bc77057c0bc15ce0352 | [
"BSD-3-Clause"
] | null | null | null | tests/test_regexps.py | Luminaar/cocobump | 5771b0d1bb3be011f9485bc77057c0bc15ce0352 | [
"BSD-3-Clause"
] | 3 | 2021-07-20T08:01:36.000Z | 2021-08-01T03:11:32.000Z | tests/test_regexps.py | Luminaar/cocobump | 5771b0d1bb3be011f9485bc77057c0bc15ce0352 | [
"BSD-3-Clause"
] | null | null | null | import re
import pytest
from convbump.regexps import ensure_regexp_dots, to_regexp
@pytest.mark.parametrize(
"value, expected",
(
("one two three", r"one two three"),
("{version}", r"{version}"),
("-{type}.{number}", r"-{type}\.{number}"),
("{type}{number}", r"{type}{number}"),
),
)
@pytest.mark.parametrize(
"value, expected",
(
("one two three", re.compile(r"^one two three$")),
("{version}", re.compile(r"^(?P<version>.+)$")),
("-{type}.{number}", re.compile(r"^-(?P<type>.+)\.(?P<number>.+)$")),
("{type}{number}", re.compile(r"^(?P<type>.+)(?P<number>.+)$")),
),
)
| 26.15625 | 77 | 0.567503 | import re
import pytest
from convbump.regexps import ensure_regexp_dots, to_regexp
@pytest.mark.parametrize(
"value, expected",
(
("one two three", r"one two three"),
("{version}", r"{version}"),
("-{type}.{number}", r"-{type}\.{number}"),
("{type}{number}", r"{type}{number}"),
),
)
def test_ensure_regexp_dots(value, expected):
assert ensure_regexp_dots(value) == expected
@pytest.mark.parametrize(
"value, expected",
(
("one two three", re.compile(r"^one two three$")),
("{version}", re.compile(r"^(?P<version>.+)$")),
("-{type}.{number}", re.compile(r"^-(?P<type>.+)\.(?P<number>.+)$")),
("{type}{number}", re.compile(r"^(?P<type>.+)(?P<number>.+)$")),
),
)
def test_to_regexp(value, expected):
assert to_regexp(value) == expected
| 128 | 0 | 44 |
2a57be2e3d64e49a08677ee2dad2e7e1738f3265 | 366 | py | Python | CONTENT/PYTHON/LEETCODE/172_factorial_trailing_zeros/factorial_trailing_zeros.py | impastasyndrome/DS-ALGO-OFFICIAL | c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a | [
"Apache-2.0"
] | 13 | 2021-03-11T00:25:22.000Z | 2022-03-19T00:19:23.000Z | CONTENT/PYTHON/LEETCODE/172_factorial_trailing_zeros/factorial_trailing_zeros.py | impastasyndrome/DS-ALGO-OFFICIAL | c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a | [
"Apache-2.0"
] | 162 | 2021-03-09T01:52:11.000Z | 2022-03-12T01:09:07.000Z | CONTENT/PYTHON/LEETCODE/172_factorial_trailing_zeros/factorial_trailing_zeros.py | impastasyndrome/DS-ALGO-OFFICIAL | c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a | [
"Apache-2.0"
] | 12 | 2021-04-26T19:43:01.000Z | 2022-01-31T08:36:29.000Z | # @param {integer} n
# @return {integer}
| 20.333333 | 32 | 0.393443 | class Solution:
# @param {integer} n
# @return {integer}
def trailingZeroes(self, n):
if n <= 0:
return 0
twos = 0
fives = 0
n2 = n
while n2:
n2 = n2 / 2
twos += n2
n5 = n
while n5:
n5 = n5 / 5
fives += n5
return min(twos, fives)
| 275 | -6 | 48 |
34116009d828dd230bd4ece7f92d1e2518654b63 | 4,717 | py | Python | app/shared_code/imap_sync.py | KubaTaba1uga/spam_recycler | 76056635fdf610c6304e8034a3f66f235761ca76 | [
"MIT"
] | 2 | 2021-12-17T18:12:51.000Z | 2021-12-17T18:12:52.000Z | app/shared_code/imap_sync.py | KubaTaba1uga/spam_recycler | 76056635fdf610c6304e8034a3f66f235761ca76 | [
"MIT"
] | null | null | null | app/shared_code/imap_sync.py | KubaTaba1uga/spam_recycler | 76056635fdf610c6304e8034a3f66f235761ca76 | [
"MIT"
] | null | null | null | from imap_tools.errors import MailboxFolderSelectError
from imap_tools import MailBox, AND
import logging
import datetime
def validate_credentials(email_address, server_address, password):
"""Validate IMAP credentials.
If IMAP validation succeed
return True
Args:
URL (str): [address to which app will connect to, using IMAP]
email_address (str): [username which app should validate]
password (str): [password which app should validate]
Returns:
[MailBox/bool]: [MailBox if credentials are valid,
False if credentials are not valid]
"""
try:
return MailBox(server_address).login(email_address, password)
except Exception as e:
logging.warning(f"Validate creadentials failed - {e}\n email address: {email_address}\n server address {server_address}")
""" Because exceptions types thrown by `imap_tools` are not predictible,
`Exception` is used
Code before:
except ConnectionRefusedError:
pass
except IMAP4.error:
pass
except MailboxLoginError:
pass
"""
return False
def create_search_from_str(start_at, end_at):
""" Str formats:
start_at: "YYYY-MM-DD"
end_at: "YYYY-MM-DD"
"""
start_date_list = start_at.split('-')
end_date_list = end_at.split('-')
start_at_date = datetime.date(
int(start_date_list[0]),
int(start_date_list[1]),
int(start_date_list[2]))
end_at_date = datetime.date(
int(end_date_list[0]),
int(end_date_list[1]),
int(end_date_list[2]))
return AND(
AND(date_gte=start_at_date),
AND(date_lt=end_at_date))
def validate_folder(mailbox, folder):
"""Chack folder exsistance, inside mailbox
If folder validation succeed
return True else False
Args:
mailbox (imap_tools.MailBox): [mailbox which app use to check folder exsistance]
folder_name (str): [folder which app validate]
Returns:
[bool]: [True if folder exsist in mailbox,
False if not]
"""
try:
mailbox.folder.set(folder)
except MailboxFolderSelectError:
return False
return True
def validate_folder_list(folder_list, mailbox, form):
"""Validate folders list for report usage
in case of error, add them to `form` object
"""
if len(folder_list) == 0:
form.add_error(None, 'No folder selected')
else:
for folder in folder_list:
if not validate_folder(mailbox, folder):
form.add_error(None,
f'Folder: {folder}\n is unavailable for scan')
if len(form.errors) == 0:
return True
else:
return False
def create_mailbox_decorator(func):
""" If function use Mailbox object,
use decorator to avoid creating MailBox object
inside function
Example:
Without decorator
def get_mailbox_folder_list(email_address, server_address, password):
mailbox = create_mailbox(email_address, server_address, password)
folder_list = mailbox.folder.list()
mailbox.logut()
return folder_list
With decorator
@create_mailbox_decorator
def get_mailbox_folder_list(mailbox):
return mailbox.folder.list()
"""
return decorator
@create_mailbox_decorator
@create_mailbox_decorator
def gather_emails_GUIDs(mailbox, search, folder):
""" Download GUID of messages passing search requirements
"""
mailbox.folder.set(folder)
return (email for email in mailbox.uids(search))
@create_mailbox_decorator
| 27.747059 | 129 | 0.632817 | from imap_tools.errors import MailboxFolderSelectError
from imap_tools import MailBox, AND
import logging
import datetime
def validate_credentials(email_address, server_address, password):
"""Validate IMAP credentials.
If IMAP validation succeed
return True
Args:
URL (str): [address to which app will connect to, using IMAP]
email_address (str): [username which app should validate]
password (str): [password which app should validate]
Returns:
[MailBox/bool]: [MailBox if credentials are valid,
False if credentials are not valid]
"""
try:
return MailBox(server_address).login(email_address, password)
except Exception as e:
logging.warning(f"Validate creadentials failed - {e}\n email address: {email_address}\n server address {server_address}")
""" Because exceptions types thrown by `imap_tools` are not predictible,
`Exception` is used
Code before:
except ConnectionRefusedError:
pass
except IMAP4.error:
pass
except MailboxLoginError:
pass
"""
return False
def create_search_from_str(start_at, end_at):
""" Str formats:
start_at: "YYYY-MM-DD"
end_at: "YYYY-MM-DD"
"""
start_date_list = start_at.split('-')
end_date_list = end_at.split('-')
start_at_date = datetime.date(
int(start_date_list[0]),
int(start_date_list[1]),
int(start_date_list[2]))
end_at_date = datetime.date(
int(end_date_list[0]),
int(end_date_list[1]),
int(end_date_list[2]))
return AND(
AND(date_gte=start_at_date),
AND(date_lt=end_at_date))
def create_mailbox(email_address, server_address, password):
return MailBox(server_address).login(
email_address, password)
def validate_folder(mailbox, folder):
"""Chack folder exsistance, inside mailbox
If folder validation succeed
return True else False
Args:
mailbox (imap_tools.MailBox): [mailbox which app use to check folder exsistance]
folder_name (str): [folder which app validate]
Returns:
[bool]: [True if folder exsist in mailbox,
False if not]
"""
try:
mailbox.folder.set(folder)
except MailboxFolderSelectError:
return False
return True
def validate_folder_list(folder_list, mailbox, form):
"""Validate folders list for report usage
in case of error, add them to `form` object
"""
if len(folder_list) == 0:
form.add_error(None, 'No folder selected')
else:
for folder in folder_list:
if not validate_folder(mailbox, folder):
form.add_error(None,
f'Folder: {folder}\n is unavailable for scan')
if len(form.errors) == 0:
return True
else:
return False
def create_mailbox_decorator(func):
""" If function use Mailbox object,
use decorator to avoid creating MailBox object
inside function
Example:
Without decorator
def get_mailbox_folder_list(email_address, server_address, password):
mailbox = create_mailbox(email_address, server_address, password)
folder_list = mailbox.folder.list()
mailbox.logut()
return folder_list
With decorator
@create_mailbox_decorator
def get_mailbox_folder_list(mailbox):
return mailbox.folder.list()
"""
def decorator(mailbox_credentials, *args, **kwargs):
mailbox = create_mailbox(**mailbox_credentials)
result = func(mailbox, *args, **kwargs)
mailbox.logout()
return result
return decorator
@create_mailbox_decorator
def get_mailbox_folder_list(mailbox):
folder_list = mailbox.folder.list()
return folder_list
@create_mailbox_decorator
def gather_emails_GUIDs(mailbox, search, folder):
""" Download GUID of messages passing search requirements
"""
mailbox.folder.set(folder)
return (email for email in mailbox.uids(search))
@create_mailbox_decorator
def download_message_by_guid(mailbox, guid):
for email in mailbox.fetch(AND(uid=[guid])):
return email
def parse_message(message):
return {
'subject': message.subject,
'sender': message.from_values.email,
'to_recipients': " ,".join(to.email for to in message.to_values),
'received_at': message.date,
'body': message.html,
'orginal_message': message.obj
}
| 758 | 0 | 116 |
52b9d43bb25a77f006fb97c8fe40e8e7b244fb52 | 365 | py | Python | src/metrics.py | K-T-Ng/Ultrasound_Nerve_Segmentation | 51caf40b971995abd13d4158df53609943a508fe | [
"MIT"
] | null | null | null | src/metrics.py | K-T-Ng/Ultrasound_Nerve_Segmentation | 51caf40b971995abd13d4158df53609943a508fe | [
"MIT"
] | null | null | null | src/metrics.py | K-T-Ng/Ultrasound_Nerve_Segmentation | 51caf40b971995abd13d4158df53609943a508fe | [
"MIT"
] | 2 | 2022-01-04T08:51:29.000Z | 2022-01-04T12:02:00.000Z | import torch
| 26.071429 | 53 | 0.641096 | import torch
def compute_dice(inputs, targets):
smooth_ep = 1e-5
batch_size = inputs.shape[0]
inputs = inputs.reshape(batch_size, -1)
targets = targets.reshape(batch_size, -1)
nume = torch.sum(inputs*targets, dim=1)
deno = torch.sum(inputs+targets, dim=1)
dice = (2*nume + smooth_ep) / (deno + smooth_ep)
return dice
| 324 | 0 | 25 |