hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
163743d78c16425801fc8839cd339747e3afc7a4 | 15,109 | py | Python | dynaconf/cli.py | aruncse01/dynaconf | 61c361821cf89b728e3bf3ae2c0b773e3387e120 | [
"MIT"
] | null | null | null | dynaconf/cli.py | aruncse01/dynaconf | 61c361821cf89b728e3bf3ae2c0b773e3387e120 | [
"MIT"
] | null | null | null | dynaconf/cli.py | aruncse01/dynaconf | 61c361821cf89b728e3bf3ae2c0b773e3387e120 | [
"MIT"
] | null | null | null | import io
import os
import sys
import toml
import click
import pprint
import importlib
import webbrowser
from pathlib import Path
from dynaconf import default_settings
from dynaconf import constants
from dynaconf.validator import Validator
from dynaconf.utils.parse_conf import parse_conf_data
from dotenv import cli as dotenv_cli
from contextlib import suppress
flask_app = None
django_app = None
if 'FLASK_APP' in os.environ: # pragma: no cover
with suppress(ImportError, click.UsageError):
from flask.cli import ScriptInfo
flask_app = ScriptInfo().load_app()
settings = flask_app.config
click.echo(click.style('Flask app detected', fg='white', bg='black'))
if 'DJANGO_SETTINGS_MODULE' in os.environ: # pragma: no cover
sys.path.insert(0, os.path.abspath('.'))
with suppress(Exception):
import dynaconf.contrib.django_dynaconf # noqa
from django.conf import settings as django_settings
django_settings.configure()
settings = django_settings
django_app = True
click.echo(click.style('Django app detected', fg='white', bg='black'))
if not django_app and not flask_app:
from dynaconf import settings
CWD = Path.cwd()
ENVS = ['default', 'development', 'staging', 'testing', 'production', 'global']
EXTS = ['ini', 'toml', 'yaml', 'json', 'py', 'env']
WRITERS = ['ini', 'toml', 'yaml', 'json', 'py', 'redis', 'vault', 'env']
ENC = default_settings.ENCODING_FOR_DYNACONF
def split_vars(_vars):
"""Splits values like foo=bar=zaz in {'foo': 'bar=zaz'}"""
return {
k.upper().strip(): parse_conf_data(v.strip(), tomlfy=True)
for k, _, v
in [item.partition('=') for item in _vars]
} if _vars else {}
def read_file_in_root_directory(*names, **kwargs):
"""Read a file."""
return io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf-8')
).read().strip()
def show_banner(ctx, param, value):
"""Shows dynaconf awesome banner"""
if not value or ctx.resilient_parsing:
return
click.echo(settings.dynaconf_banner)
click.echo('Learn more at: http://github.com/rochacbruno/dynaconf')
ctx.exit()
@click.group()
@click.option('--version', is_flag=True, callback=print_version,
expose_value=False, is_eager=True, help="Show dynaconf version")
@click.option('--docs', is_flag=True, callback=open_docs, expose_value=False,
is_eager=True, help="Open documentation in browser")
@click.option('--banner', is_flag=True, callback=show_banner,
expose_value=False, is_eager=True, help="Show awesome banner")
def main():
"""Dynaconf - Command Line Interface\n
Documentation: http://dynaconf.readthedocs.io/
"""
@main.command()
@click.option('--format', 'fileformat', '-f', default='toml',
type=click.Choice(EXTS))
@click.option('--path', '-p', default=CWD,
help='defaults to current directory')
@click.option('--env', '-e', default=None,
help='Sets the working env in `.env` file')
@click.option('--vars', '_vars', '-v', multiple=True, default=None,
help=(
'extra values to write to settings file '
'file e.g: `dynaconf init -v NAME=foo -v X=2'
))
@click.option('--secrets', '_secrets', '-s', multiple=True, default=None,
help=(
'secret key values to be written in .secrets '
'e.g: `dynaconf init -s TOKEN=kdslmflds'
))
@click.option('--wg/--no-wg', default=True)
@click.option('-y', default=False, is_flag=True)
def init(fileformat, path, env, _vars, _secrets, wg, y):
"""Inits a dynaconf project
By default it creates a settings.toml and a .secrets.toml
for [default|development|staging|testing|production|global] envs.
The format of the files can be changed passing
--format=yaml|json|ini|py.
This command must run on the project's root folder or you must pass
--path=/myproject/root/folder.
If you want to have a .env created with the ENV defined there e.g:
`ENV_FOR_DYNACONF=production` just pass --env=production and then .env
will also be created and the env defined to production.
"""
click.echo('Cofiguring your Dynaconf environment')
env = env or settings.current_env.lower()
loader = importlib.import_module(
"dynaconf.loaders.{}_loader".format(fileformat)
)
# Turn foo=bar=zaz in {'foo': 'bar=zaz'}
env_data = split_vars(_vars)
_secrets = split_vars(_secrets)
# create placeholder data for every env
settings_data = {k: {'value': 'value for {}'.format(k)} for k in ENVS}
secrets_data = {k: {'secret': 'secret for {}'.format(k)} for k in ENVS}
if env_data:
settings_data[env] = env_data
settings_data['default'] = {k: 'default' for k in env_data}
if _secrets:
secrets_data[env] = _secrets
secrets_data['default'] = {k: 'default' for k in _secrets}
path = Path(path)
if str(path).endswith(constants.ALL_EXTENSIONS + ('py',)):
settings_path = path
secrets_path = path.parent / '.secrets.{}'.format(fileformat)
dotenv_path = path.parent / '.env'
gitignore_path = path.parent / '.gitignore'
else:
if fileformat == 'env':
if str(path) in ('.env', './.env'): # pragma: no cover
settings_path = path
elif str(path).endswith('/.env'):
settings_path = path
elif str(path).endswith('.env'): # pragma: no cover
settings_path = path.parent / '.env'
else:
settings_path = path / '.env'
Path.touch(settings_path)
secrets_path = None
else:
settings_path = path / 'settings.{}'.format(fileformat)
secrets_path = path / '.secrets.{}'.format(fileformat)
dotenv_path = path / '.env'
gitignore_path = path / '.gitignore'
if fileformat in ['py', 'env']:
# for Python and .env files writes a single env
settings_data = settings_data[env]
secrets_data = secrets_data[env]
if not y and settings_path and settings_path.exists(): # pragma: no cover
click.confirm(
'{} exists do you want to overwrite it?'.format(settings_path),
abort=True
)
if not y and secrets_path and secrets_path.exists(): # pragma: no cover
click.confirm(
'{} exists do you want to overwrite it?'.format(secrets_path),
abort=True
)
if settings_path and settings_data:
loader.write(settings_path, settings_data, merge=True)
if secrets_path and secrets_data:
loader.write(secrets_path, secrets_data, merge=True)
# write .env file
# if env not in ['default', 'development']: # pragma: no cover
if not dotenv_path.exists(): # pragma: no cover
Path.touch(dotenv_path)
dotenv_cli.set_key(str(dotenv_path), 'ENV_FOR_DYNACONF', env.upper())
else: # pragma: no cover
click.echo(
'.env already exists please set ENV_FOR_DYNACONF={}'.format(
env.upper()
)
)
if wg:
# write .gitignore
ignore_line = ".secrets.*"
comment = "\n# Ignore dynaconf secret files\n"
if not gitignore_path.exists():
with io.open(str(gitignore_path), 'w', encoding=ENC) as f:
f.writelines([comment, ignore_line, '\n'])
else:
existing = ignore_line in io.open(
str(gitignore_path), encoding=ENC
).read()
if not existing: # pragma: no cover
with io.open(str(gitignore_path), 'a+', encoding=ENC) as f:
f.writelines(
[comment, ignore_line, '\n']
)
@main.command(name='list')
@click.option('--env', '-e', default=None,
help='Filters the env to get the values')
@click.option('--key', '-k', default=None, help='Filters a single key')
@click.option('--more', '-m', default=None,
help='Pagination more|less style', is_flag=True)
@click.option('--loader', '-l', default=None,
help='a loader identifier to filter e.g: toml|yaml')
def _list(env, key, more, loader):
"""Lists all defined config values"""
if env:
env = env.strip()
if key:
key = key.strip()
if loader:
loader = loader.strip()
if env:
settings.setenv(env)
cur_env = settings.current_env.lower()
click.echo(
click.style(
'Working in %s environment ' % cur_env,
bold=True, bg='blue', fg='white'
)
)
if not loader:
data = settings.store
else:
identifier = '{}_{}'.format(loader, cur_env)
data = settings._loaded_by_loaders.get(identifier, {})
data = data or settings._loaded_by_loaders.get(loader, {})
# remove to avoid displaying twice
data.pop('SETTINGS_MODULE', None)
if not key:
datalines = '\n'.join(
'%s: %s' % (click.style(k, bg=color(k), fg='white'),
pprint.pformat(v))
for k, v in data.items()
)
(click.echo_via_pager if more else click.echo)(datalines)
else:
key = key.upper()
value = data.get(key)
if not value:
click.echo(click.style('Key not found', bg='red', fg='white'))
return
click.echo(
'%s: %s' % (
click.style(key.upper(), bg=color(key), fg='white'),
pprint.pformat(value)
)
)
if env:
settings.setenv()
@main.command()
@click.argument('to', required=True, type=click.Choice(WRITERS))
@click.option('--vars', '_vars', '-v', multiple=True, default=None,
help=(
'key values to be written '
'e.g: `dynaconf write toml -e NAME=foo -e X=2'
))
@click.option('--secrets', '_secrets', '-s', multiple=True, default=None,
help=(
'secret key values to be written in .secrets '
'e.g: `dynaconf write toml -s TOKEN=kdslmflds -s X=2'
))
@click.option('--path', '-p', default=CWD,
help='defaults to current directory/settings.{ext}')
@click.option(
'--env', '-e', default='default',
help=(
'env to write to defaults to DEVELOPMENT for files '
'for external sources like Redis and Vault '
'it will be DYNACONF or the value set in '
'$GLOBAL_ENV_FOR_DYNACONF'
)
)
@click.option('-y', default=False, is_flag=True)
def write(to, _vars, _secrets, path, env, y):
"""Writes data to specific source"""
_vars = split_vars(_vars)
_secrets = split_vars(_secrets)
loader = importlib.import_module("dynaconf.loaders.{}_loader".format(to))
if to in EXTS:
# Lets write to a file
path = Path(path)
if str(path).endswith(constants.ALL_EXTENSIONS + ('py',)):
settings_path = path
secrets_path = path.parent / '.secrets.{}'.format(to)
else:
if to == 'env':
if str(path) in ('.env', './.env'): # pragma: no cover
settings_path = path
elif str(path).endswith('/.env'):
settings_path = path
elif str(path).endswith('.env'):
settings_path = path.parent / '.env'
else:
settings_path = path / '.env'
Path.touch(settings_path)
secrets_path = None
_vars.update(_secrets)
else:
settings_path = path / 'settings.{}'.format(to)
secrets_path = path / '.secrets.{}'.format(to)
if _vars and not y and settings_path and settings_path.exists(): # pragma: no cover # noqa
click.confirm(
'{} exists do you want to overwrite it?'.format(settings_path),
abort=True
)
if _secrets and not y and secrets_path and secrets_path.exists(): # pragma: no cover # noqa
click.confirm(
'{} exists do you want to overwrite it?'.format(secrets_path),
abort=True
)
if to not in ['py', 'env']:
if _vars:
_vars = {env: _vars}
if _secrets:
_secrets = {env: _secrets}
if _vars and settings_path:
loader.write(settings_path, _vars, merge=True)
click.echo('Data successful written to {}'.format(settings_path))
if _secrets and secrets_path:
loader.write(secrets_path, _secrets, merge=True)
click.echo('Data successful written to {}'.format(secrets_path))
else: # pragma: no cover
# lets write to external source
loader.write(settings, _vars, **_secrets)
click.echo('Data successful written to {}'.format(to))
@main.command()
@click.option('--path', '-p', default=CWD,
help='defaults to current directory')
def validate(path): # pragma: no cover
"""Validates Dynaconf settings based on rules defined in
dynaconf_validators.toml"""
# reads the 'dynaconf_validators.toml' from path
# for each section register the validator for specific env
# call validate
if not str(path).endswith('.toml'):
path = path / "dynaconf_validators.toml"
if not Path(path).exists(): # pragma: no cover # noqa
click.echo(click.style(
"{} not found".format(path), fg="white", bg="red"
))
sys.exit(1)
validation_data = toml.load(open(str(path)))
for env, name_data in validation_data.items():
for name, data in name_data.items():
if not isinstance(data, dict): # pragma: no cover
click.echo(click.style(
"Invalid rule for parameter '{}'".format(name),
fg="white", bg="yellow"
))
else: # pragma: no cover
data.setdefault('env', env)
click.echo(click.style(
"Validating '{}' with '{}'".format(name, data),
fg="white", bg="blue"
))
Validator(name, **data).validate(settings)
# pragma: no cover
click.echo(click.style(
"Validation success!", fg="white", bg="green"
))
| 34.813364 | 101 | 0.586538 | import io
import os
import sys
import toml
import click
import pprint
import importlib
import webbrowser
from pathlib import Path
from dynaconf import default_settings
from dynaconf import constants
from dynaconf.validator import Validator
from dynaconf.utils.parse_conf import parse_conf_data
from dotenv import cli as dotenv_cli
from contextlib import suppress
flask_app = None
django_app = None
if 'FLASK_APP' in os.environ: # pragma: no cover
with suppress(ImportError, click.UsageError):
from flask.cli import ScriptInfo
flask_app = ScriptInfo().load_app()
settings = flask_app.config
click.echo(click.style('Flask app detected', fg='white', bg='black'))
if 'DJANGO_SETTINGS_MODULE' in os.environ: # pragma: no cover
sys.path.insert(0, os.path.abspath('.'))
with suppress(Exception):
import dynaconf.contrib.django_dynaconf # noqa
from django.conf import settings as django_settings
django_settings.configure()
settings = django_settings
django_app = True
click.echo(click.style('Django app detected', fg='white', bg='black'))
if not django_app and not flask_app:
from dynaconf import settings
CWD = Path.cwd()
ENVS = ['default', 'development', 'staging', 'testing', 'production', 'global']
EXTS = ['ini', 'toml', 'yaml', 'json', 'py', 'env']
WRITERS = ['ini', 'toml', 'yaml', 'json', 'py', 'redis', 'vault', 'env']
ENC = default_settings.ENCODING_FOR_DYNACONF
def split_vars(_vars):
"""Splits values like foo=bar=zaz in {'foo': 'bar=zaz'}"""
return {
k.upper().strip(): parse_conf_data(v.strip(), tomlfy=True)
for k, _, v
in [item.partition('=') for item in _vars]
} if _vars else {}
def read_file_in_root_directory(*names, **kwargs):
"""Read a file."""
return io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf-8')
).read().strip()
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo(read_file_in_root_directory('VERSION'))
ctx.exit()
def open_docs(ctx, param, value): # pragma: no cover
if not value or ctx.resilient_parsing:
return
url = 'http://dynaconf.readthedocs.io/'
webbrowser.open(url, new=2)
click.echo("{} opened in browser".format(url))
ctx.exit()
def show_banner(ctx, param, value):
"""Shows dynaconf awesome banner"""
if not value or ctx.resilient_parsing:
return
click.echo(settings.dynaconf_banner)
click.echo('Learn more at: http://github.com/rochacbruno/dynaconf')
ctx.exit()
@click.group()
@click.option('--version', is_flag=True, callback=print_version,
expose_value=False, is_eager=True, help="Show dynaconf version")
@click.option('--docs', is_flag=True, callback=open_docs, expose_value=False,
is_eager=True, help="Open documentation in browser")
@click.option('--banner', is_flag=True, callback=show_banner,
expose_value=False, is_eager=True, help="Show awesome banner")
def main():
"""Dynaconf - Command Line Interface\n
Documentation: http://dynaconf.readthedocs.io/
"""
@main.command()
@click.option('--format', 'fileformat', '-f', default='toml',
type=click.Choice(EXTS))
@click.option('--path', '-p', default=CWD,
help='defaults to current directory')
@click.option('--env', '-e', default=None,
help='Sets the working env in `.env` file')
@click.option('--vars', '_vars', '-v', multiple=True, default=None,
help=(
'extra values to write to settings file '
'file e.g: `dynaconf init -v NAME=foo -v X=2'
))
@click.option('--secrets', '_secrets', '-s', multiple=True, default=None,
help=(
'secret key values to be written in .secrets '
'e.g: `dynaconf init -s TOKEN=kdslmflds'
))
@click.option('--wg/--no-wg', default=True)
@click.option('-y', default=False, is_flag=True)
def init(fileformat, path, env, _vars, _secrets, wg, y):
"""Inits a dynaconf project
By default it creates a settings.toml and a .secrets.toml
for [default|development|staging|testing|production|global] envs.
The format of the files can be changed passing
--format=yaml|json|ini|py.
This command must run on the project's root folder or you must pass
--path=/myproject/root/folder.
If you want to have a .env created with the ENV defined there e.g:
`ENV_FOR_DYNACONF=production` just pass --env=production and then .env
will also be created and the env defined to production.
"""
click.echo('Cofiguring your Dynaconf environment')
env = env or settings.current_env.lower()
loader = importlib.import_module(
"dynaconf.loaders.{}_loader".format(fileformat)
)
# Turn foo=bar=zaz in {'foo': 'bar=zaz'}
env_data = split_vars(_vars)
_secrets = split_vars(_secrets)
# create placeholder data for every env
settings_data = {k: {'value': 'value for {}'.format(k)} for k in ENVS}
secrets_data = {k: {'secret': 'secret for {}'.format(k)} for k in ENVS}
if env_data:
settings_data[env] = env_data
settings_data['default'] = {k: 'default' for k in env_data}
if _secrets:
secrets_data[env] = _secrets
secrets_data['default'] = {k: 'default' for k in _secrets}
path = Path(path)
if str(path).endswith(constants.ALL_EXTENSIONS + ('py',)):
settings_path = path
secrets_path = path.parent / '.secrets.{}'.format(fileformat)
dotenv_path = path.parent / '.env'
gitignore_path = path.parent / '.gitignore'
else:
if fileformat == 'env':
if str(path) in ('.env', './.env'): # pragma: no cover
settings_path = path
elif str(path).endswith('/.env'):
settings_path = path
elif str(path).endswith('.env'): # pragma: no cover
settings_path = path.parent / '.env'
else:
settings_path = path / '.env'
Path.touch(settings_path)
secrets_path = None
else:
settings_path = path / 'settings.{}'.format(fileformat)
secrets_path = path / '.secrets.{}'.format(fileformat)
dotenv_path = path / '.env'
gitignore_path = path / '.gitignore'
if fileformat in ['py', 'env']:
# for Python and .env files writes a single env
settings_data = settings_data[env]
secrets_data = secrets_data[env]
if not y and settings_path and settings_path.exists(): # pragma: no cover
click.confirm(
'{} exists do you want to overwrite it?'.format(settings_path),
abort=True
)
if not y and secrets_path and secrets_path.exists(): # pragma: no cover
click.confirm(
'{} exists do you want to overwrite it?'.format(secrets_path),
abort=True
)
if settings_path and settings_data:
loader.write(settings_path, settings_data, merge=True)
if secrets_path and secrets_data:
loader.write(secrets_path, secrets_data, merge=True)
# write .env file
# if env not in ['default', 'development']: # pragma: no cover
if not dotenv_path.exists(): # pragma: no cover
Path.touch(dotenv_path)
dotenv_cli.set_key(str(dotenv_path), 'ENV_FOR_DYNACONF', env.upper())
else: # pragma: no cover
click.echo(
'.env already exists please set ENV_FOR_DYNACONF={}'.format(
env.upper()
)
)
if wg:
# write .gitignore
ignore_line = ".secrets.*"
comment = "\n# Ignore dynaconf secret files\n"
if not gitignore_path.exists():
with io.open(str(gitignore_path), 'w', encoding=ENC) as f:
f.writelines([comment, ignore_line, '\n'])
else:
existing = ignore_line in io.open(
str(gitignore_path), encoding=ENC
).read()
if not existing: # pragma: no cover
with io.open(str(gitignore_path), 'a+', encoding=ENC) as f:
f.writelines(
[comment, ignore_line, '\n']
)
@main.command(name='list')
@click.option('--env', '-e', default=None,
help='Filters the env to get the values')
@click.option('--key', '-k', default=None, help='Filters a single key')
@click.option('--more', '-m', default=None,
help='Pagination more|less style', is_flag=True)
@click.option('--loader', '-l', default=None,
help='a loader identifier to filter e.g: toml|yaml')
def _list(env, key, more, loader):
"""Lists all defined config values"""
if env:
env = env.strip()
if key:
key = key.strip()
if loader:
loader = loader.strip()
if env:
settings.setenv(env)
cur_env = settings.current_env.lower()
click.echo(
click.style(
'Working in %s environment ' % cur_env,
bold=True, bg='blue', fg='white'
)
)
if not loader:
data = settings.store
else:
identifier = '{}_{}'.format(loader, cur_env)
data = settings._loaded_by_loaders.get(identifier, {})
data = data or settings._loaded_by_loaders.get(loader, {})
# remove to avoid displaying twice
data.pop('SETTINGS_MODULE', None)
def color(_k):
if _k in dir(default_settings):
return 'blue'
return 'green'
if not key:
datalines = '\n'.join(
'%s: %s' % (click.style(k, bg=color(k), fg='white'),
pprint.pformat(v))
for k, v in data.items()
)
(click.echo_via_pager if more else click.echo)(datalines)
else:
key = key.upper()
value = data.get(key)
if not value:
click.echo(click.style('Key not found', bg='red', fg='white'))
return
click.echo(
'%s: %s' % (
click.style(key.upper(), bg=color(key), fg='white'),
pprint.pformat(value)
)
)
if env:
settings.setenv()
@main.command()
@click.argument('to', required=True, type=click.Choice(WRITERS))
@click.option('--vars', '_vars', '-v', multiple=True, default=None,
help=(
'key values to be written '
'e.g: `dynaconf write toml -e NAME=foo -e X=2'
))
@click.option('--secrets', '_secrets', '-s', multiple=True, default=None,
help=(
'secret key values to be written in .secrets '
'e.g: `dynaconf write toml -s TOKEN=kdslmflds -s X=2'
))
@click.option('--path', '-p', default=CWD,
help='defaults to current directory/settings.{ext}')
@click.option(
'--env', '-e', default='default',
help=(
'env to write to defaults to DEVELOPMENT for files '
'for external sources like Redis and Vault '
'it will be DYNACONF or the value set in '
'$GLOBAL_ENV_FOR_DYNACONF'
)
)
@click.option('-y', default=False, is_flag=True)
def write(to, _vars, _secrets, path, env, y):
"""Writes data to specific source"""
_vars = split_vars(_vars)
_secrets = split_vars(_secrets)
loader = importlib.import_module("dynaconf.loaders.{}_loader".format(to))
if to in EXTS:
# Lets write to a file
path = Path(path)
if str(path).endswith(constants.ALL_EXTENSIONS + ('py',)):
settings_path = path
secrets_path = path.parent / '.secrets.{}'.format(to)
else:
if to == 'env':
if str(path) in ('.env', './.env'): # pragma: no cover
settings_path = path
elif str(path).endswith('/.env'):
settings_path = path
elif str(path).endswith('.env'):
settings_path = path.parent / '.env'
else:
settings_path = path / '.env'
Path.touch(settings_path)
secrets_path = None
_vars.update(_secrets)
else:
settings_path = path / 'settings.{}'.format(to)
secrets_path = path / '.secrets.{}'.format(to)
if _vars and not y and settings_path and settings_path.exists(): # pragma: no cover # noqa
click.confirm(
'{} exists do you want to overwrite it?'.format(settings_path),
abort=True
)
if _secrets and not y and secrets_path and secrets_path.exists(): # pragma: no cover # noqa
click.confirm(
'{} exists do you want to overwrite it?'.format(secrets_path),
abort=True
)
if to not in ['py', 'env']:
if _vars:
_vars = {env: _vars}
if _secrets:
_secrets = {env: _secrets}
if _vars and settings_path:
loader.write(settings_path, _vars, merge=True)
click.echo('Data successful written to {}'.format(settings_path))
if _secrets and secrets_path:
loader.write(secrets_path, _secrets, merge=True)
click.echo('Data successful written to {}'.format(secrets_path))
else: # pragma: no cover
# lets write to external source
loader.write(settings, _vars, **_secrets)
click.echo('Data successful written to {}'.format(to))
@main.command()
@click.option('--path', '-p', default=CWD,
help='defaults to current directory')
def validate(path): # pragma: no cover
"""Validates Dynaconf settings based on rules defined in
dynaconf_validators.toml"""
# reads the 'dynaconf_validators.toml' from path
# for each section register the validator for specific env
# call validate
if not str(path).endswith('.toml'):
path = path / "dynaconf_validators.toml"
if not Path(path).exists(): # pragma: no cover # noqa
click.echo(click.style(
"{} not found".format(path), fg="white", bg="red"
))
sys.exit(1)
validation_data = toml.load(open(str(path)))
for env, name_data in validation_data.items():
for name, data in name_data.items():
if not isinstance(data, dict): # pragma: no cover
click.echo(click.style(
"Invalid rule for parameter '{}'".format(name),
fg="white", bg="yellow"
))
else: # pragma: no cover
data.setdefault('env', env)
click.echo(click.style(
"Validating '{}' with '{}'".format(name, data),
fg="white", bg="blue"
))
Validator(name, **data).validate(settings)
# pragma: no cover
click.echo(click.style(
"Validation success!", fg="white", bg="green"
))
| 458 | 0 | 73 |
e03ddb71ae236fed5ccd6de147c4d4b99638e687 | 5,850 | py | Python | cbandits/core/contextual_dataset.py | AlliedToasters/dev_bandits | 7e3655bd5a91854951a52d0f037ee06aefb2922c | [
"MIT"
] | null | null | null | cbandits/core/contextual_dataset.py | AlliedToasters/dev_bandits | 7e3655bd5a91854951a52d0f037ee06aefb2922c | [
"MIT"
] | null | null | null | cbandits/core/contextual_dataset.py | AlliedToasters/dev_bandits | 7e3655bd5a91854951a52d0f037ee06aefb2922c | [
"MIT"
] | null | null | null | """Define a data buffer for contextual bandit algorithms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
class ContextualDataset(object):
"""The buffer is able to append new data, and sample random minibatches."""
def __init__(self, context_dim, num_actions, buffer_s=-1, memory_size=-1, intercept=False):
"""Creates a ContextualDataset object.
The data is stored in attributes: contexts and rewards.
The sequence of taken actions are stored in attribute actions.
Args:
context_dim: Dimension of the contexts.
num_actions: Number of arms for the multi-armed bandit.
buffer_s: Size of buffer for training. Only last buffer_s will be
returned as minibatch. If buffer_s = -1, all data will be used.
memory_size: Specify the number of examples to store in memory.
if buffer_s = -1, all data will be stored.
intercept: If True, it adds a constant (1.0) dimension to each context X,
at the end.
"""
self._context_dim = context_dim
self._num_actions = num_actions
self._contexts = None
self._rewards = None
self.actions = []
self.buffer_s = buffer_s
self.memory_size = memory_size
self.intercept = intercept
def add(self, context, action, reward):
"""Adds a new triplet (context, action, reward) to the dataset.
The reward for the actions that weren't played is assumed to be zero.
Args:
context: A d-dimensional vector with the context.
action: Integer between 0 and k-1 representing the chosen arm.
reward: Real number representing the reward for the (context, action).
"""
if self.intercept:
c = np.array(context[:])
c = np.append(c, 1.0).reshape((1, self.context_dim + 1))
else:
c = np.array(context[:]).reshape((1, self.context_dim))
if self.contexts is None:
self.contexts = c
else:
self.contexts = np.vstack((self.contexts, c))
r = np.zeros((1, self.num_actions))
r[0, action] = reward
if self.rewards is None:
self.rewards = r
else:
self.rewards = np.vstack((self.rewards, r))
self.actions.append(action)
#Drop oldest example if memory constraint
if self.memory_size != -1:
if self.contexts.shape[0] > self.memory_size:
self.contexts = self.contexts[1:, :]
self.rewards = self.rewards[1:, :]
self.actions = self.actions[1:]
#Assert lengths match
assert len(self.actions) == len(self.rewards)
assert len(self.actions) == len(self.contexts)
def get_batch(self, batch_size):
"""Returns a random minibatch of (contexts, rewards) with batch_size."""
n, _ = self.contexts.shape
if self.buffer_s == -1:
# use all the data
ind = np.random.choice(range(n), batch_size)
else:
# use only buffer (last buffer_s observations)
ind = np.random.choice(range(max(0, n - self.buffer_s), n), batch_size)
return self.contexts[ind, :], self.rewards[ind, :]
def get_data(self, action):
"""Returns all (context, reward) where the action was played."""
n, _ = self.contexts.shape
ind = np.array([i for i in range(n) if self.actions[i] == action])
return self.contexts[ind, :], self.rewards[ind, action]
def get_data_with_weights(self):
"""Returns all observations with one-hot weights for actions."""
weights = np.zeros((self.contexts.shape[0], self.num_actions))
a_ind = np.array([(i, val) for i, val in enumerate(self.actions)])
weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
return self.contexts, self.rewards, weights
def get_batch_with_weights(self, batch_size):
"""Returns a random mini-batch with one-hot weights for actions."""
n, _ = self.contexts.shape
if self.buffer_s == -1:
# use all the data
ind = np.random.choice(range(n), batch_size)
else:
# use only buffer (last buffer_s obs)
ind = np.random.choice(range(max(0, n - self.buffer_s), n), batch_size)
weights = np.zeros((batch_size, self.num_actions))
sampled_actions = np.array(self.actions)[ind]
a_ind = np.array([(i, val) for i, val in enumerate(sampled_actions)])
weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
return self.contexts[ind, :], self.rewards[ind, :], weights
def num_points(self, f=None):
"""Returns number of points in the buffer (after applying function f)."""
if f is not None:
return f(self.contexts.shape[0])
return self.contexts.shape[0]
@property
@property
@property
@contexts.setter
@property
@actions.setter
@property
@rewards.setter | 36.335404 | 95 | 0.607692 | """Define a data buffer for contextual bandit algorithms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
class ContextualDataset(object):
"""The buffer is able to append new data, and sample random minibatches."""
def __init__(self, context_dim, num_actions, buffer_s=-1, memory_size=-1, intercept=False):
"""Creates a ContextualDataset object.
The data is stored in attributes: contexts and rewards.
The sequence of taken actions are stored in attribute actions.
Args:
context_dim: Dimension of the contexts.
num_actions: Number of arms for the multi-armed bandit.
buffer_s: Size of buffer for training. Only last buffer_s will be
returned as minibatch. If buffer_s = -1, all data will be used.
memory_size: Specify the number of examples to store in memory.
if buffer_s = -1, all data will be stored.
intercept: If True, it adds a constant (1.0) dimension to each context X,
at the end.
"""
self._context_dim = context_dim
self._num_actions = num_actions
self._contexts = None
self._rewards = None
self.actions = []
self.buffer_s = buffer_s
self.memory_size = memory_size
self.intercept = intercept
def add(self, context, action, reward):
"""Adds a new triplet (context, action, reward) to the dataset.
The reward for the actions that weren't played is assumed to be zero.
Args:
context: A d-dimensional vector with the context.
action: Integer between 0 and k-1 representing the chosen arm.
reward: Real number representing the reward for the (context, action).
"""
if self.intercept:
c = np.array(context[:])
c = np.append(c, 1.0).reshape((1, self.context_dim + 1))
else:
c = np.array(context[:]).reshape((1, self.context_dim))
if self.contexts is None:
self.contexts = c
else:
self.contexts = np.vstack((self.contexts, c))
r = np.zeros((1, self.num_actions))
r[0, action] = reward
if self.rewards is None:
self.rewards = r
else:
self.rewards = np.vstack((self.rewards, r))
self.actions.append(action)
#Drop oldest example if memory constraint
if self.memory_size != -1:
if self.contexts.shape[0] > self.memory_size:
self.contexts = self.contexts[1:, :]
self.rewards = self.rewards[1:, :]
self.actions = self.actions[1:]
#Assert lengths match
assert len(self.actions) == len(self.rewards)
assert len(self.actions) == len(self.contexts)
def replace_data(self, contexts=None, actions=None, rewards=None):
if contexts is not None:
self.contexts = contexts
if actions is not None:
self.actions = actions
if rewards is not None:
self.rewards = rewards
def get_batch(self, batch_size):
"""Returns a random minibatch of (contexts, rewards) with batch_size."""
n, _ = self.contexts.shape
if self.buffer_s == -1:
# use all the data
ind = np.random.choice(range(n), batch_size)
else:
# use only buffer (last buffer_s observations)
ind = np.random.choice(range(max(0, n - self.buffer_s), n), batch_size)
return self.contexts[ind, :], self.rewards[ind, :]
def get_data(self, action):
"""Returns all (context, reward) where the action was played."""
n, _ = self.contexts.shape
ind = np.array([i for i in range(n) if self.actions[i] == action])
return self.contexts[ind, :], self.rewards[ind, action]
def get_data_with_weights(self):
"""Returns all observations with one-hot weights for actions."""
weights = np.zeros((self.contexts.shape[0], self.num_actions))
a_ind = np.array([(i, val) for i, val in enumerate(self.actions)])
weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
return self.contexts, self.rewards, weights
def get_batch_with_weights(self, batch_size):
"""Returns a random mini-batch with one-hot weights for actions."""
n, _ = self.contexts.shape
if self.buffer_s == -1:
# use all the data
ind = np.random.choice(range(n), batch_size)
else:
# use only buffer (last buffer_s obs)
ind = np.random.choice(range(max(0, n - self.buffer_s), n), batch_size)
weights = np.zeros((batch_size, self.num_actions))
sampled_actions = np.array(self.actions)[ind]
a_ind = np.array([(i, val) for i, val in enumerate(sampled_actions)])
weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
return self.contexts[ind, :], self.rewards[ind, :], weights
def num_points(self, f=None):
"""Returns number of points in the buffer (after applying function f)."""
if f is not None:
return f(self.contexts.shape[0])
return self.contexts.shape[0]
@property
def context_dim(self):
return self._context_dim
@property
def num_actions(self):
return self._num_actions
@property
def contexts(self):
return self._contexts
@contexts.setter
def contexts(self, value):
self._contexts = value
@property
def actions(self):
return self._actions
@actions.setter
def actions(self, value):
self._actions = value
@property
def rewards(self):
return self._rewards
@rewards.setter
def rewards(self, value):
self._rewards = value | 501 | 0 | 235 |
4210e8540a38faec5e55ddf81f43aaa043dc7da9 | 921 | py | Python | web-app/migrations/versions/20210626_114817_822caa87a652_add_quiz_game_setups_table.py | philipp-mos/iubh-quiz-app | 3ea4a6b673bba6df0ece273e17e65c8fe5edc341 | [
"MIT"
] | null | null | null | web-app/migrations/versions/20210626_114817_822caa87a652_add_quiz_game_setups_table.py | philipp-mos/iubh-quiz-app | 3ea4a6b673bba6df0ece273e17e65c8fe5edc341 | [
"MIT"
] | 65 | 2021-06-01T15:52:12.000Z | 2021-10-01T15:44:50.000Z | web-app/migrations/versions/20210626_114817_822caa87a652_add_quiz_game_setups_table.py | philipp-mos/iubh-quiz-app | 3ea4a6b673bba6df0ece273e17e65c8fe5edc341 | [
"MIT"
] | 1 | 2022-03-29T14:13:45.000Z | 2022-03-29T14:13:45.000Z | """Add quiz_game_setups Table
Revision ID: 822caa87a652
Revises: 3b809ceaf543
Create Date: 2021-06-26 11:48:17.948434
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '822caa87a652'
down_revision = '3b809ceaf543'
branch_labels = None
depends_on = None
| 26.314286 | 74 | 0.697068 | """Add quiz_game_setups Table
Revision ID: 822caa87a652
Revises: 3b809ceaf543
Create Date: 2021-06-26 11:48:17.948434
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '822caa87a652'
down_revision = '3b809ceaf543'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('quiz_game_setups',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('quizquestion_text', sa.String(), nullable=False),
sa.Column('quizquestion_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['quizquestion_id'], ['quiz_questions.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('quiz_game_setups')
# ### end Alembic commands ###
| 563 | 0 | 46 |
12e7db795759076fbdbf6d2c0d3b75f7bcb8889a | 2,368 | py | Python | clamor/rest/endpoints/emoji.py | TomSputz/Clamor | 13222b90532938e6ebdbe8aea0430512e7d22817 | [
"MIT"
] | 15 | 2019-07-05T20:26:18.000Z | 2020-09-18T12:44:16.000Z | clamor/rest/endpoints/emoji.py | TomSputz/Clamor | 13222b90532938e6ebdbe8aea0430512e7d22817 | [
"MIT"
] | 7 | 2019-07-07T19:55:07.000Z | 2019-08-20T22:07:31.000Z | clamor/rest/endpoints/emoji.py | TomSputz/Clamor | 13222b90532938e6ebdbe8aea0430512e7d22817 | [
"MIT"
] | 6 | 2019-07-07T20:39:29.000Z | 2020-11-06T10:12:20.000Z | # -*- coding: utf-8 -*-
from ..routes import Routes
from .base import *
__all__ = (
'EmojiWrapper',
)
class EmojiWrapper(EndpointsWrapper):
"""A higher-level wrapper around Emoji endpoints.
.. seealso:: Emoji endpoints https://discordapp.com/developers/docs/resources/emoji
"""
| 36.430769 | 87 | 0.472551 | # -*- coding: utf-8 -*-
from ..routes import Routes
from .base import *
__all__ = (
'EmojiWrapper',
)
class EmojiWrapper(EndpointsWrapper):
"""A higher-level wrapper around Emoji endpoints.
.. seealso:: Emoji endpoints https://discordapp.com/developers/docs/resources/emoji
"""
def __init__(self, token: str, guild_id: Snowflake):
super().__init__(token)
self.guild_id = guild_id
async def list_guild_emojis(self) -> list:
return await self.http.make_request(Routes.LIST_GUILD_EMOJIS,
dict(guild=self.guild_id))
async def get_guild_emoji(self, emoji_id: Snowflake) -> dict:
return await self.http.make_request(Routes.GET_GUILD_EMOJI,
dict(guild=self.guild_id, emoji=emoji_id))
async def create_guild_emoji(self,
name: str,
image: str,
roles: list,
reason: str = None) -> dict:
params = {
'name': name,
'image': image,
'roles': roles
}
return await self.http.make_request(Routes.CREATE_GUILD_EMOJI,
dict(guild=self.guild_id),
json=params,
reason=reason)
async def modify_guild_emoji(self,
emoji_id: Snowflake,
name: str = None,
roles: list = None,
reason: str = None) -> dict:
params = optional(**{
'name': name,
'roles': roles
})
return await self.http.make_request(Routes.MODIFY_GUILD_EMOJI,
dict(guild=self.guild_id, emoji=emoji_id),
json=params,
reason=reason)
async def delete_guild_emoji(self, emoji_id: Snowflake, reason: str = None):
return await self.http.make_request(Routes.DELETE_GUILD_EMOJI,
dict(guild=self.guild_id, emoji=emoji_id),
reason=reason)
| 1,907 | 0 | 162 |
879bf6cedca3a228a0ed5d68b969c46f3730e446 | 7,697 | py | Python | api/base/filters.py | bdyetton/prettychart | e8b33a7dfdc8c33d15969586be7f68172795f76d | [
"Apache-2.0"
] | null | null | null | api/base/filters.py | bdyetton/prettychart | e8b33a7dfdc8c33d15969586be7f68172795f76d | [
"Apache-2.0"
] | null | null | null | api/base/filters.py | bdyetton/prettychart | e8b33a7dfdc8c33d15969586be7f68172795f76d | [
"Apache-2.0"
] | null | null | null | import re
import functools
from modularodm import Q
from rest_framework.filters import OrderingFilter
from rest_framework import serializers as ser
class ODMOrderingFilter(OrderingFilter):
"""Adaptation of rest_framework.filters.OrderingFilter to work with modular-odm."""
# override
query_pattern = re.compile(r'filter\[\s*(?P<field>\S*)\s*\]\s*')
# Used to make intersection "reduce-able"
class FilterMixin(object):
""" View mixin with helper functions for filtering. """
TRUTHY = set(['true', 'True', 1, '1'])
FALSY = set(['false', 'False', 0, '0'])
DEFAULT_OPERATOR = 'eq'
# Used so that that queries by _id will work
# Used to convert string values from query params to Python booleans when necessary
class ODMFilterMixin(FilterMixin):
"""View mixin that adds a get_query_from_request method which converts query params
of the form `filter[field_name]=value` into an ODM Query object.
Subclasses must define `get_default_odm_query()`.
Serializers that want to restrict which fields are used for filtering need to have a variable called
filterable_fields which is a frozenset of strings representing the field names as they appear in the serialization.
"""
# TODO Handle simple and complex non-standard fields
field_comparison_operators = {
ser.CharField: 'icontains',
ser.ListField: 'in',
}
def query_params_to_odm_query(self, query_params):
"""Convert query params to a modularodm Query object."""
fields_dict = query_params_to_fields(query_params)
if fields_dict:
query_parts = [
Q(self.convert_key(key=key), self.get_comparison_operator(key=key), self.convert_value(value=value, field=key))
for key, value in fields_dict.items() if self.is_filterable_field(key=key)
]
# TODO Ensure that if you try to filter on an invalid field, it returns a useful error. Fix related test.
try:
query = functools.reduce(intersect, query_parts)
except TypeError:
query = None
else:
query = None
return query
class ListFilterMixin(FilterMixin):
"""View mixin that adds a get_queryset_from_request method which uses query params
of the form `filter[field_name]=value` to filter a list of objects.
Subclasses must define `get_default_queryset()`.
Serializers that want to restrict which fields are used for filtering need to have a variable called
filterable_fields which is a frozenset of strings representing the field names as they appear in the serialization.
"""
def param_queryset(self, query_params, default_queryset):
"""filters default queryset based on query parameters"""
fields_dict = query_params_to_fields(query_params)
queryset = set(default_queryset)
if fields_dict:
for field_name, value in fields_dict.items():
if self.is_filterable_field(key=field_name):
queryset = queryset.intersection(set(self.get_filtered_queryset(field_name, value, default_queryset)))
return list(queryset)
def get_filtered_queryset(self, field_name, value, default_queryset):
"""filters default queryset based on the serializer field type"""
field = self.serializer_class._declared_fields[field_name]
if isinstance(field, ser.SerializerMethodField):
return_val = [item for item in default_queryset if self.get_serializer_method(field_name)(item) == self.convert_value(value, field_name)]
elif isinstance(field, ser.BooleanField):
return_val = [item for item in default_queryset if getattr(item, field_name, None) == self.convert_value(value, field_name)]
elif isinstance(field, ser.CharField):
return_val = [item for item in default_queryset if value.lower() in getattr(item, field_name, None).lower()]
else:
# TODO Ensure that if you try to filter on an invalid field, it returns a useful error.
return_val = [item for item in default_queryset if value in getattr(item, field_name, None)]
return return_val
def get_serializer_method(self, field_name):
"""
:param field_name: The name of a SerializerMethodField
:return: The function attached to the SerializerMethodField to get its value
"""
serializer = self.get_serializer()
serializer_method_name = 'get_' + field_name
return getattr(serializer, serializer_method_name)
| 39.071066 | 149 | 0.678706 | import re
import functools
from modularodm import Q
from rest_framework.filters import OrderingFilter
from rest_framework import serializers as ser
class ODMOrderingFilter(OrderingFilter):
"""Adaptation of rest_framework.filters.OrderingFilter to work with modular-odm."""
# override
def filter_queryset(self, request, queryset, view):
ordering = self.get_ordering(request, queryset, view)
if ordering:
return queryset.sort(*ordering)
return queryset
query_pattern = re.compile(r'filter\[\s*(?P<field>\S*)\s*\]\s*')
def query_params_to_fields(query_params):
return {
query_pattern.match(key).groupdict()['field']: value
for key, value in query_params.items()
if query_pattern.match(key)
}
# Used to make intersection "reduce-able"
def intersect(x, y):
return x & y
class FilterMixin(object):
""" View mixin with helper functions for filtering. """
TRUTHY = set(['true', 'True', 1, '1'])
FALSY = set(['false', 'False', 0, '0'])
DEFAULT_OPERATOR = 'eq'
def __init__(self, *args, **kwargs):
super(FilterMixin, self).__init__(*args, **kwargs)
if not self.serializer_class:
raise NotImplementedError()
def is_filterable_field(self, key):
try:
return key.strip() in self.serializer_class.filterable_fields
except AttributeError:
return key.strip() in self.serializer_class._declared_fields
# Used so that that queries by _id will work
def convert_key(self, key):
key = key.strip()
if self.serializer_class._declared_fields[key].source:
return self.serializer_class._declared_fields[key].source
return key
# Used to convert string values from query params to Python booleans when necessary
def convert_value(self, value, field):
field_type = type(self.serializer_class._declared_fields[field])
value = value.strip()
if field_type == ser.BooleanField:
if value in self.TRUTHY:
return True
elif value in self.FALSY:
return False
# TODO Should we handle if the value is neither TRUTHY nor FALSY (first add test for how we'd expect it to
# work, then ensure that it works that way).
else:
return value
class ODMFilterMixin(FilterMixin):
"""View mixin that adds a get_query_from_request method which converts query params
of the form `filter[field_name]=value` into an ODM Query object.
Subclasses must define `get_default_odm_query()`.
Serializers that want to restrict which fields are used for filtering need to have a variable called
filterable_fields which is a frozenset of strings representing the field names as they appear in the serialization.
"""
# TODO Handle simple and complex non-standard fields
field_comparison_operators = {
ser.CharField: 'icontains',
ser.ListField: 'in',
}
def __init__(self, *args, **kwargs):
super(FilterMixin, self).__init__(*args, **kwargs)
if not self.serializer_class:
raise NotImplementedError()
def get_comparison_operator(self, key):
field_type = type(self.serializer_class._declared_fields[key])
if field_type in self.field_comparison_operators:
return self.field_comparison_operators[field_type]
else:
return self.DEFAULT_OPERATOR
def get_default_odm_query(self):
raise NotImplementedError('Must define get_default_odm_query')
def get_query_from_request(self):
param_query = self.query_params_to_odm_query(self.request.QUERY_PARAMS)
default_query = self.get_default_odm_query()
if param_query:
query = param_query & default_query
else:
query = default_query
return query
def query_params_to_odm_query(self, query_params):
"""Convert query params to a modularodm Query object."""
fields_dict = query_params_to_fields(query_params)
if fields_dict:
query_parts = [
Q(self.convert_key(key=key), self.get_comparison_operator(key=key), self.convert_value(value=value, field=key))
for key, value in fields_dict.items() if self.is_filterable_field(key=key)
]
# TODO Ensure that if you try to filter on an invalid field, it returns a useful error. Fix related test.
try:
query = functools.reduce(intersect, query_parts)
except TypeError:
query = None
else:
query = None
return query
class ListFilterMixin(FilterMixin):
"""View mixin that adds a get_queryset_from_request method which uses query params
of the form `filter[field_name]=value` to filter a list of objects.
Subclasses must define `get_default_queryset()`.
Serializers that want to restrict which fields are used for filtering need to have a variable called
filterable_fields which is a frozenset of strings representing the field names as they appear in the serialization.
"""
def __init__(self, *args, **kwargs):
super(FilterMixin, self).__init__(*args, **kwargs)
if not self.serializer_class:
raise NotImplementedError()
def get_default_queryset(self):
raise NotImplementedError('Must define get_default_queryset')
def get_queryset_from_request(self):
default_queryset = self.get_default_queryset()
if self.request.QUERY_PARAMS:
param_queryset = self.param_queryset(self.request.QUERY_PARAMS, default_queryset)
return param_queryset
else:
return default_queryset
def param_queryset(self, query_params, default_queryset):
"""filters default queryset based on query parameters"""
fields_dict = query_params_to_fields(query_params)
queryset = set(default_queryset)
if fields_dict:
for field_name, value in fields_dict.items():
if self.is_filterable_field(key=field_name):
queryset = queryset.intersection(set(self.get_filtered_queryset(field_name, value, default_queryset)))
return list(queryset)
def get_filtered_queryset(self, field_name, value, default_queryset):
"""filters default queryset based on the serializer field type"""
field = self.serializer_class._declared_fields[field_name]
if isinstance(field, ser.SerializerMethodField):
return_val = [item for item in default_queryset if self.get_serializer_method(field_name)(item) == self.convert_value(value, field_name)]
elif isinstance(field, ser.BooleanField):
return_val = [item for item in default_queryset if getattr(item, field_name, None) == self.convert_value(value, field_name)]
elif isinstance(field, ser.CharField):
return_val = [item for item in default_queryset if value.lower() in getattr(item, field_name, None).lower()]
else:
# TODO Ensure that if you try to filter on an invalid field, it returns a useful error.
return_val = [item for item in default_queryset if value in getattr(item, field_name, None)]
return return_val
def get_serializer_method(self, field_name):
"""
:param field_name: The name of a SerializerMethodField
:return: The function attached to the SerializerMethodField to get its value
"""
serializer = self.get_serializer()
serializer_method_name = 'get_' + field_name
return getattr(serializer, serializer_method_name)
| 2,736 | 0 | 366 |
6a9ee4906295ec1af783379bcd1b815404b3ad4d | 1,597 | py | Python | engine/gui/uielement.py | UnidayStudio/Easy-2D-Game-Engine | 1a8501cba538d7542b0e24bf64eead388085480f | [
"MIT"
] | 8 | 2019-12-15T22:32:30.000Z | 2021-06-14T07:38:51.000Z | engine/gui/uielement.py | UnidayStudio/Easy-2D-Game-Engine | 1a8501cba538d7542b0e24bf64eead388085480f | [
"MIT"
] | null | null | null | engine/gui/uielement.py | UnidayStudio/Easy-2D-Game-Engine | 1a8501cba538d7542b0e24bf64eead388085480f | [
"MIT"
] | 2 | 2020-09-10T17:34:23.000Z | 2021-03-11T09:26:26.000Z | import pygame
import pygame_gui
import engine.app
from engine.math import Vector
import engine.math
############################
| 22.814286 | 67 | 0.690044 | import pygame
import pygame_gui
import engine.app
from engine.math import Vector
import engine.math
class UIElement():
def __init__(self):
self._element = None
self._lastResolution = [-1, -1]
self._position = Vector(0.4, 0.45)
self._scale = Vector(0.2, 0.1)
self._killed = False
def isKilled(self):
return self._killed
def kill(self):
if not self._element:
return
self._element.kill()
self._killed = True
def setPosition(self, x, y):
self._position.x = engine.math.clamp(x, 0.0, 1.0)
self._position.y = engine.math.clamp(y, 0.0, 1.0)
self.updateResolution(True)
def setScale(self, x, y):
self._scale.x = engine.math.clamp(x, 0.01, 1.0)
self._scale.y = engine.math.clamp(y, 0.01, 1.0)
self.updateResolution(True)
def updateResolution(self, force=False):
if not self._element:
return
renderer = engine.app.getApp().getRenderer()
res = renderer.getResolution()
if res == self._lastResolution and not force:
return
self._lastResolution = res
res= Vector(res[0], res[1])
position = res * self._position
scale = res * self._scale
self._updateElementTransform(position.getList(), scale.getList())
def _updateElementTransform(self, position, scale):
self._element.set_position(position)
self._element.set_dimensions(scale)
############################
def loadJsonData(self, jsonData):
if "position" in jsonData:
pos = jsonData["position"]
self.setPosition(pos[0], pos[1])
if "scale" in jsonData:
scale = jsonData["scale"]
self.setScale(scale[0], scale[1])
def update(self):
self.updateResolution()
| 1,229 | -3 | 238 |
8c117027b71e3ca13907608742ca89676f486313 | 504 | py | Python | test/test_scripta.py | rec/ascript | 99975ca4e94c5791ee09864ce7cb23e2d43f7cd6 | [
"MIT"
] | 1 | 2021-08-04T18:06:37.000Z | 2021-08-04T18:06:37.000Z | test/test_scripta.py | rec/scripta | 99975ca4e94c5791ee09864ce7cb23e2d43f7cd6 | [
"MIT"
] | 6 | 2020-06-25T16:16:36.000Z | 2020-06-25T23:54:57.000Z | test/test_scripta.py | rec/ascript | 99975ca4e94c5791ee09864ce7cb23e2d43f7cd6 | [
"MIT"
] | null | null | null | from scripta import cast_recorder
from scripta import scripta
from unittest import IsolatedAsyncioTestCase
from unittest.mock import patch
import asyncio
import tdir
| 26.526316 | 68 | 0.765873 | from scripta import cast_recorder
from scripta import scripta
from unittest import IsolatedAsyncioTestCase
from unittest.mock import patch
import asyncio
import tdir
class CastRecorder(cast_recorder.CastRecorder):
async def record_to(self, cast_file, cast):
await asyncio.sleep(0)
class TestScripta(IsolatedAsyncioTestCase):
@patch('scripta.scripta.CastRecorder', side_effect=CastRecorder)
async def NO_test_scripta(self):
with tdir():
await scripta.scripta()
| 121 | 143 | 72 |
016c3b8beea50db52081dfe286ec8ca22e8adc82 | 5,347 | py | Python | src/libs/core/results/console.py | VirtualVFix/AndroidTestFramework | 1feb769c6aca39a78e6daefd6face0a1e4d62cd4 | [
"MIT"
] | null | null | null | src/libs/core/results/console.py | VirtualVFix/AndroidTestFramework | 1feb769c6aca39a78e6daefd6face0a1e4d62cd4 | [
"MIT"
] | null | null | null | src/libs/core/results/console.py | VirtualVFix/AndroidTestFramework | 1feb769c6aca39a78e6daefd6face0a1e4d62cd4 | [
"MIT"
] | null | null | null | # All rights reserved by forest fairy.
# You cannot modify or share anything without sacrifice.
# If you don't agree, keep calm and don't look at code bellow!
__author__ = "VirtualV <https://github.com/virtualvfix>"
__date__ = "12/18/2017 4:43 PM"
from config import CONFIG
from libs.core.tools.utility import Utility
from libs.core.template import CASE, NAME, PARAMS
from libs.core.unittest.config import RESULT_NAMES
from libs.core.logger import getLogger, getSysLogger
from .config import CONSOLE_RESULT_TABLE_SIZES as SIZES, EMPTY_RECORD
class Console:
"""
Print TestCases results to console
"""
@staticmethod
def print_results(logger=None, case=None, cycle=None, suite=None):
"""
Print current TestSuite results to console
Args:
logger (logging): Logger to print
case (dict): TestCase dict
suite (dict): TestSuite dict
cycle (int): Current global cycle
"""
logger = logger or getLogger(__file__)
syslogger = getSysLogger()
try:
for _cycle in range(CONFIG.SYSTEM.TOTAL_CYCLES_GLOBAL):
if cycle is not None and _cycle != cycle-1:
continue
# TestCases
for _case in CONFIG.UNITTEST.SELECTED_TEST_CASES:
if case is not None and _case != case:
continue
logger.newline()
logger.table('_*', border_delimiter=' ')
# logger.table(' ')
logger.table(('Results of %s TestCase. Cycle %d/%d'
% (CASE.safe_substitute(case=_case['name'], index=_case['index']),
_cycle+1, CONFIG.SYSTEM.TOTAL_CYCLES_GLOBAL), 'C'))
# TestSuites
for _suite in _case['suites']:
if suite is not None and _suite != suite:
continue
logger.table(('%s TestSuite %s' % (NAME.safe_substitute(name=_suite['name']),
'with parameters: %s'
% PARAMS.safe_substitute(name=_suite['params'])
if _suite['params'] is not None
else 'without parameters'), 'C'))
logger.table('-*')
logger.table(('#', SIZES['number'], 'C'), # number
('Test id'.upper(), SIZES['test_id'], 'C'), # id
('Test name'.upper(), SIZES['test_name'], 'C'), # name
('Description'.upper(), SIZES['description'], 'C'), # description
('Cycles'.upper(), SIZES['cycles'], 'C'), # cycles
('Time'.upper(), SIZES['time'], 'C'), # time
('Result'.upper(), SIZES['result'], 'C'), # result
('Pass Rate'.upper(), SIZES['rate'], 'C')) # pass rate
logger.table('-*')
# Tests
for t, _test in enumerate(_suite['tests']):
_res = _test['results'][_cycle] if _test['results'] is not None and \
len(_test['results']) > _cycle else None
_res_cycle = _res['cycle'] if _res is not None else 0
_res_cycles = _res['cycles'] if _res is not None else 0
_time = _res['time'] if _res is not None else EMPTY_RECORD
if _time != EMPTY_RECORD and _time > 60:
_time = Utility.seconds_to_time_format(_time)
_result = _res['result'] if _res is not None else RESULT_NAMES['not run']
_rate = _res['rate'] if _res is not None else 0
logger.table(('%d' % (t+1), SIZES['number'], 'C'), # number
('%s' % _test['id'], SIZES['test_id'], 'C'), # id
('%s' % (_test['name'] or EMPTY_RECORD), SIZES['test_name'], 'C'), # name
('%s' % (_test['desc'] or EMPTY_RECORD), SIZES['description'], 'C'), # description
(('%d/%d' % (_res_cycle, _res_cycles)) if _res is not None else EMPTY_RECORD,
SIZES['cycles'], 'C'), # cycles
('%s' % _time, SIZES['time'], 'C'), # time
('%s' % _result, SIZES['result'], 'C'), # result
('%.1f %%' % _rate, SIZES['rate'], 'C')) # pass rate
logger.table('-*')
logger.newline()
except Exception as e:
syslogger.exception(e)
if CONFIG.SYSTEM.DEBUG:
raise
logger.error(e)
| 55.123711 | 124 | 0.438751 | # All rights reserved by forest fairy.
# You cannot modify or share anything without sacrifice.
# If you don't agree, keep calm and don't look at code bellow!
__author__ = "VirtualV <https://github.com/virtualvfix>"
__date__ = "12/18/2017 4:43 PM"
from config import CONFIG
from libs.core.tools.utility import Utility
from libs.core.template import CASE, NAME, PARAMS
from libs.core.unittest.config import RESULT_NAMES
from libs.core.logger import getLogger, getSysLogger
from .config import CONSOLE_RESULT_TABLE_SIZES as SIZES, EMPTY_RECORD
class Console:
"""
Print TestCases results to console
"""
@staticmethod
def print_results(logger=None, case=None, cycle=None, suite=None):
"""
Print current TestSuite results to console
Args:
logger (logging): Logger to print
case (dict): TestCase dict
suite (dict): TestSuite dict
cycle (int): Current global cycle
"""
logger = logger or getLogger(__file__)
syslogger = getSysLogger()
try:
for _cycle in range(CONFIG.SYSTEM.TOTAL_CYCLES_GLOBAL):
if cycle is not None and _cycle != cycle-1:
continue
# TestCases
for _case in CONFIG.UNITTEST.SELECTED_TEST_CASES:
if case is not None and _case != case:
continue
logger.newline()
logger.table('_*', border_delimiter=' ')
# logger.table(' ')
logger.table(('Results of %s TestCase. Cycle %d/%d'
% (CASE.safe_substitute(case=_case['name'], index=_case['index']),
_cycle+1, CONFIG.SYSTEM.TOTAL_CYCLES_GLOBAL), 'C'))
# TestSuites
for _suite in _case['suites']:
if suite is not None and _suite != suite:
continue
logger.table(('%s TestSuite %s' % (NAME.safe_substitute(name=_suite['name']),
'with parameters: %s'
% PARAMS.safe_substitute(name=_suite['params'])
if _suite['params'] is not None
else 'without parameters'), 'C'))
logger.table('-*')
logger.table(('#', SIZES['number'], 'C'), # number
('Test id'.upper(), SIZES['test_id'], 'C'), # id
('Test name'.upper(), SIZES['test_name'], 'C'), # name
('Description'.upper(), SIZES['description'], 'C'), # description
('Cycles'.upper(), SIZES['cycles'], 'C'), # cycles
('Time'.upper(), SIZES['time'], 'C'), # time
('Result'.upper(), SIZES['result'], 'C'), # result
('Pass Rate'.upper(), SIZES['rate'], 'C')) # pass rate
logger.table('-*')
# Tests
for t, _test in enumerate(_suite['tests']):
_res = _test['results'][_cycle] if _test['results'] is not None and \
len(_test['results']) > _cycle else None
_res_cycle = _res['cycle'] if _res is not None else 0
_res_cycles = _res['cycles'] if _res is not None else 0
_time = _res['time'] if _res is not None else EMPTY_RECORD
if _time != EMPTY_RECORD and _time > 60:
_time = Utility.seconds_to_time_format(_time)
_result = _res['result'] if _res is not None else RESULT_NAMES['not run']
_rate = _res['rate'] if _res is not None else 0
logger.table(('%d' % (t+1), SIZES['number'], 'C'), # number
('%s' % _test['id'], SIZES['test_id'], 'C'), # id
('%s' % (_test['name'] or EMPTY_RECORD), SIZES['test_name'], 'C'), # name
('%s' % (_test['desc'] or EMPTY_RECORD), SIZES['description'], 'C'), # description
(('%d/%d' % (_res_cycle, _res_cycles)) if _res is not None else EMPTY_RECORD,
SIZES['cycles'], 'C'), # cycles
('%s' % _time, SIZES['time'], 'C'), # time
('%s' % _result, SIZES['result'], 'C'), # result
('%.1f %%' % _rate, SIZES['rate'], 'C')) # pass rate
logger.table('-*')
logger.newline()
except Exception as e:
syslogger.exception(e)
if CONFIG.SYSTEM.DEBUG:
raise
logger.error(e)
| 0 | 0 | 0 |
78fee5d7b83ceaeb50369b8d08b4b4f569f3d5fc | 115 | py | Python | web_scraping/ec2files/ec2file32.py | nikibhatt/Groa | fc2d4ae87cb825e6d54a0831c72be16541eebe61 | [
"MIT"
] | 1 | 2020-04-08T20:11:48.000Z | 2020-04-08T20:11:48.000Z | web_scraping/ec2files/ec2file32.py | cmgospod/Groa | 31b3624bfe61e772b55f8175b4e95d63c9e67966 | [
"MIT"
] | null | null | null | web_scraping/ec2files/ec2file32.py | cmgospod/Groa | 31b3624bfe61e772b55f8175b4e95d63c9e67966 | [
"MIT"
] | 1 | 2020-09-12T07:07:41.000Z | 2020-09-12T07:07:41.000Z | from scraper import *
s = Scraper(start=57024, end=58805, max_iter=30, scraper_instance=32)
s.scrape_letterboxd() | 38.333333 | 70 | 0.773913 | from scraper import *
s = Scraper(start=57024, end=58805, max_iter=30, scraper_instance=32)
s.scrape_letterboxd() | 0 | 0 | 0 |
427c94638f2f3f91cf67eb6119a5530d3b4ed8b8 | 8,175 | py | Python | catalog.py | healingbrew/HeroesEmojiSlicer | 3c2cab9db8a0afea6b3bf3eb60c2a0e45d840355 | [
"MIT"
] | 1 | 2020-09-18T20:48:10.000Z | 2020-09-18T20:48:10.000Z | catalog.py | healingbrew/HeroesEmojiSlicer | 3c2cab9db8a0afea6b3bf3eb60c2a0e45d840355 | [
"MIT"
] | null | null | null | catalog.py | healingbrew/HeroesEmojiSlicer | 3c2cab9db8a0afea6b3bf3eb60c2a0e45d840355 | [
"MIT"
] | null | null | null | # encoding: utf-8
from Storm.Localized import Strings as LocalizedStrings
from Storm.GameData import Catalog
from Storm.DepotIndex import DepotIndex
from Storm.DepotCataFile import DepotCataFile, TYPE_NONE, TYPE_PRODUCTS, TYPE_LICENSES
from sys import argv
from os.path import exists
if __name__ != '__main__':
print_utf8('catalog.py is a CLI file, not a module')
exit(-1)
if len(argv) < 2:
print_utf8('Usage: python %s path_to_mods_dir [path_to_program_data [locale [region]]]' % (argv[0]))
exit(1)
MissingProducts = []
MissingLicenses = []
RootDir = argv[1]
RootDirLength = len(RootDir)
RootDepotDir = 'C:/ProgramData'
if len(argv) > 2:
RootDepotDir = argv[2]
RootDepotDir = '%s/Blizzard Entertainment/Battle.net/' % (argv[2])
RootDepotDirLength = len(RootDepotDir)
RootLocale = 'enus'
if len(argv) > 3:
RootLocale = argv[3]
# 1 = us, 2 = eu, 3 = ko, 5? = cn, 98 = xx (ww ptr), ?? = cxx (cn ptr), ?? = xx-02 (tournament)
RootRegion = 1
if len(argv) > 4:
RootRegion = int(argv[4])
print_utf8('Loading economy data')
Depot = DepotIndex(RootDepotDir)
EconomyCatalogs = list(map(lambda x: DepotCataFile(x.path), Depot.cata))
GameDataList = ['%s/heroesdata.stormmod' % RootDir]
GameDataList += list(map(lambda x: '%s/%s/' % (RootDir, x.get('value').lower()[5:]), Catalog('%s/heroesdata.stormmod/base.stormdata/Includes.xml' % RootDir)))
CRewardById = {}
CCatalogs = []
CCombinedLocale = {}
print_utf8('Loading reward data')
for gameDataDir in GameDataList:
gameDataPath = '%s/base.stormdata/GameData.xml' % gameDataDir
if not exists(gameDataPath):
print_utf8('Catalog stormmod %s does not exist!' % gameDataPath[RootDirLength:])
continue
CCombinedLocale = LocalizedStrings(CCombinedLocale).Load('%s/%s.stormdata/LocalizedData/GameStrings.txt' % (gameDataDir, RootLocale)).data
GameDataCatalog = Catalog(gameDataPath)
for CatalogEntry in GameDataCatalog:
catalogPath = '%s/base.stormdata/%s' % (gameDataDir, CatalogEntry)
if not exists(catalogPath):
print_utf8('Catalog file %s does not exist!' % catalogPath[RootDirLength:])
continue
CatalogFile = Catalog(catalogPath)
CCatalogs.append(CatalogFile)
for CRewardType in ['Banner', 'VoiceLine', 'Spray', 'Hero', 'Skin', 'Mount', 'AnnouncerPack', 'Icon']:
CRewards = CatalogFile.findall('CReward%s' % CRewardType)
for CReward in CRewards: CRewardById[CReward.get('id')] = CReward
CCombinedLocale = LocalizedStrings(CCombinedLocale)
print_utf8('Parsing reward data')
for CatalogFile in CCatalogs:
CItems = []
for CRewardType in ['Banner', 'VoiceLine', 'Spray', 'Hero', 'Skin', 'Mount', 'AnnouncerPack', 'Icon']:
CItems += CatalogFile.findall('C%s' % CRewardType)
parseRewards(CItems, RootRegion, EconomyCatalogs, CCombinedLocale, CatalogFile, CRewardById)
print_utf8("Missing Products: \n\t%s" % '\n\t'.join(MissingProducts))
print_utf8("Missing Licenses: \n\t%s" % '\n\t'.join(MissingLicenses))
| 41.923077 | 159 | 0.596697 | # encoding: utf-8
from Storm.Localized import Strings as LocalizedStrings
from Storm.GameData import Catalog
from Storm.DepotIndex import DepotIndex
from Storm.DepotCataFile import DepotCataFile, TYPE_NONE, TYPE_PRODUCTS, TYPE_LICENSES
from sys import argv
from os.path import exists
def print_utf8(text):
print(text.encode('utf-8'))
if __name__ != '__main__':
print_utf8('catalog.py is a CLI file, not a module')
exit(-1)
if len(argv) < 2:
print_utf8('Usage: python %s path_to_mods_dir [path_to_program_data [locale [region]]]' % (argv[0]))
exit(1)
MissingProducts = []
MissingLicenses = []
def printProduct(Product, Region):
print_utf8('\t\tName: %s' % Product["m_name"])
print_utf8('\t\tLife Cycle: %s' % Product["m_lifeCycle"])
print_utf8('\t\tFlags: %s' % Product["m_flags"])
print_utf8('\t\tVisible: %s' % Product["m_visible"])
print_utf8('\t\tRegion Visibility: %s' % next((p['m_visible'] for p in Product["m_regionalSettings"] if p['m_region'] == Region), False))
XHS = next((p for p in Product["m_prices"] if p['m_currency'] == 'XHS' and p['m_priceType'] == 'RETAIL_VALUE'), None)
if XHS != None:
print_utf8('\t\tShard Cost: %s' % (XHS['m_price'] / 10000))
XHG = next((p for p in Product["m_prices"] if p['m_currency'] == 'XHG' and p['m_priceType'] == 'RETAIL_VALUE'), None)
if XHG != None:
print_utf8('\t\tGold Cost: %s' % (XHG['m_price'] / 10000))
XHC = next((p for p in Product["m_prices"] if p['m_currency'] == 'XHC' and p['m_priceType'] == 'RETAIL_VALUE'), None)
if XHC != None:
print_utf8('\t\tGem Cost: %s' % (XHC['m_price'] / 10000))
def parseRewards(CEntries, Region, Economy, Locale, CatalogTree, RewardEntries):
global MissingProducts, MissingLicenses
for CEntry in CEntries:
RequiredRewardArray = set(map(lambda x: x.get('value'), CEntry.findall('RequiredRewardArray')))
CType = CEntry.tag[1:]
CId = CEntry.get('id')
RequiredRewardArray.add(CId)
try:
CName = Locale.get('%s/Name/%s' % (CType, CId))
except:
CName = CId
print_utf8('%s (%s)' % (CName, CId))
print_utf8('\tType: %s' % CType)
CInfoText = CEntry.find('InfoText')
if CInfoText != None:
print_utf8('\tInfo: %s' % Locale.get(CInfoText.get('value')))
CSortName = CEntry.find('SortName')
SortName = ''
if CSortName != None:
SortName = Locale.get(CSortName.get('value'))
print_utf8('\tSort Name: %s' % SortName)
CRarity = CEntry.find('Rarity')
if CRarity == None:
CRarity = 'Common'
else:
CRarity = CRarity.get('value')
print_utf8('\tRarity: %s' % CRarity)
CReleaseDate = CEntry.find('ReleaseDate')
if CReleaseDate != None:
print_utf8('\tRelease Date: %s/%s/%s' % (CReleaseDate.get('Year'), CReleaseDate.get('Month'), CReleaseDate.get('Year')))
CUniverse = CEntry.find('Universe')
if CUniverse != None:
print_utf8('\tUniverse: %s' % CUniverse.get('value'))
CCollectionCategory = CEntry.find('CollectionCategory')
if CCollectionCategory != None:
print_utf8('\tCategory: %s' % CCollectionCategory.get('value'))
CHyperlinkId = CEntry.find('HyperlinkId')
CHyperlink = ''
if CHyperlinkId != None:
CHyperlink = 'battlenet://heroes/%s/%s/%s' % (CType.lower(), Region, CHyperlinkId.get('value'))
print_utf8('\tHyperlink: %s' % CHyperlink)
CEventName = CEntry.find('EventName')
if CEventName != None:
print_utf8('\tEvent: %s' % CEventName.get('value'))
CProductId = CEntry.find('ProductId')
if CProductId != None:
CProductId = CProductId.get('value')
LicenseIds = []
for RequiredReward in RequiredRewardArray:
if not RequiredReward in RewardEntries:
continue
LicenseIds += list(map(lambda x: x.get('value'), RewardEntries[RequiredReward].findall("License")))
Done = False
for EconomyCatalog in Economy:
if EconomyCatalog.type & TYPE_PRODUCTS == TYPE_PRODUCTS:
if CProductId != None:
EconomyProduct = EconomyCatalog.findProducts(int(CProductId))
if EconomyProduct != None:
CProductId = None
if not Done:
Done = True
print_utf8('\tProduct Info:')
printProduct(EconomyProduct, Region)
NewLicenseIds = []
for LicenseId in LicenseIds:
EconomyLicenses = EconomyCatalog.findLicenses(int(LicenseId))
if EconomyLicenses == None:
NewLicenseIds.append(LicenseId)
else:
for EconomyLicense in EconomyLicenses:
print_utf8('\tBundle Info:')
printProduct(EconomyLicense, Region)
LicenseIds = NewLicenseIds
if CType == 'VoiceLine': continue
if CProductId != None:
MissingProducts.append('%s (%s) %s %s' % (CName, CId, CHyperlink, SortName))
if len(LicenseIds) > 0:
MissingLicenses.append('%s (%s) %s %s' % (CName, CId, CHyperlink, SortName))
RootDir = argv[1]
RootDirLength = len(RootDir)
RootDepotDir = 'C:/ProgramData'
if len(argv) > 2:
RootDepotDir = argv[2]
RootDepotDir = '%s/Blizzard Entertainment/Battle.net/' % (argv[2])
RootDepotDirLength = len(RootDepotDir)
RootLocale = 'enus'
if len(argv) > 3:
RootLocale = argv[3]
# 1 = us, 2 = eu, 3 = ko, 5? = cn, 98 = xx (ww ptr), ?? = cxx (cn ptr), ?? = xx-02 (tournament)
RootRegion = 1
if len(argv) > 4:
RootRegion = int(argv[4])
print_utf8('Loading economy data')
Depot = DepotIndex(RootDepotDir)
EconomyCatalogs = list(map(lambda x: DepotCataFile(x.path), Depot.cata))
GameDataList = ['%s/heroesdata.stormmod' % RootDir]
GameDataList += list(map(lambda x: '%s/%s/' % (RootDir, x.get('value').lower()[5:]), Catalog('%s/heroesdata.stormmod/base.stormdata/Includes.xml' % RootDir)))
CRewardById = {}
CCatalogs = []
CCombinedLocale = {}
print_utf8('Loading reward data')
for gameDataDir in GameDataList:
gameDataPath = '%s/base.stormdata/GameData.xml' % gameDataDir
if not exists(gameDataPath):
print_utf8('Catalog stormmod %s does not exist!' % gameDataPath[RootDirLength:])
continue
CCombinedLocale = LocalizedStrings(CCombinedLocale).Load('%s/%s.stormdata/LocalizedData/GameStrings.txt' % (gameDataDir, RootLocale)).data
GameDataCatalog = Catalog(gameDataPath)
for CatalogEntry in GameDataCatalog:
catalogPath = '%s/base.stormdata/%s' % (gameDataDir, CatalogEntry)
if not exists(catalogPath):
print_utf8('Catalog file %s does not exist!' % catalogPath[RootDirLength:])
continue
CatalogFile = Catalog(catalogPath)
CCatalogs.append(CatalogFile)
for CRewardType in ['Banner', 'VoiceLine', 'Spray', 'Hero', 'Skin', 'Mount', 'AnnouncerPack', 'Icon']:
CRewards = CatalogFile.findall('CReward%s' % CRewardType)
for CReward in CRewards: CRewardById[CReward.get('id')] = CReward
CCombinedLocale = LocalizedStrings(CCombinedLocale)
print_utf8('Parsing reward data')
for CatalogFile in CCatalogs:
CItems = []
for CRewardType in ['Banner', 'VoiceLine', 'Spray', 'Hero', 'Skin', 'Mount', 'AnnouncerPack', 'Icon']:
CItems += CatalogFile.findall('C%s' % CRewardType)
parseRewards(CItems, RootRegion, EconomyCatalogs, CCombinedLocale, CatalogFile, CRewardById)
print_utf8("Missing Products: \n\t%s" % '\n\t'.join(MissingProducts))
print_utf8("Missing Licenses: \n\t%s" % '\n\t'.join(MissingLicenses))
| 4,997 | 0 | 75 |
fc9232403d12cc2eba69cc683ada204241083f20 | 2,640 | py | Python | src/graph_notebook/options/options.py | joywa/graph-notebook | 2c55b4fb5b6fb3c3205d0786a45a9101a44288a4 | [
"ISC",
"Apache-2.0",
"CC0-1.0"
] | 1 | 2021-07-10T14:20:09.000Z | 2021-07-10T14:20:09.000Z | src/graph_notebook/options/options.py | QPC-database/graph-notebook | ea162e47c2c2e5600417e6ad9ac34aa7ac462899 | [
"ISC",
"Apache-2.0",
"CC0-1.0"
] | null | null | null | src/graph_notebook/options/options.py | QPC-database/graph-notebook | ea162e47c2c2e5600417e6ad9ac34aa7ac462899 | [
"ISC",
"Apache-2.0",
"CC0-1.0"
] | null | null | null | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
"""
OPTIONS_DEFAULT_DIRECTED = {
"nodes": {
"borderWidthSelected": 0,
"borderWidth": 0,
"color": {
"background": "rgba(210, 229, 255, 1)",
"border": "transparent",
"highlight": {
"background": "rgba(9, 104, 178, 1)",
"border": "rgba(8, 62, 100, 1)"
}
},
"shadow": {
"enabled": False
},
"shape": "circle",
"widthConstraint": {
"minimum": 70,
"maximum": 70
},
"font": {
"face": "courier new",
"color": "black",
"size": 12
},
},
"edges": {
"color": {
"inherit": False
},
"smooth": {
"enabled": True,
"type": "straightCross"
},
"arrows": {
"to": {
"enabled": True,
"type": "arrow"
}
},
"font": {
"face": "courier new"
}
},
"interaction": {
"hover": True,
"hoverConnectedEdges": True,
"selectConnectedEdges": False
},
"physics": {
"minVelocity": 0.75,
"barnesHut": {
"centralGravity": 0.1,
"gravitationalConstant": -50450,
"springLength": 95,
"springConstant": 0.04,
"damping": 0.09,
"avoidOverlap": 0.1
},
"solver": "barnesHut",
"enabled": True,
"adaptiveTimestep": True,
"stabilization": {
"enabled": True,
"iterations": 1
}
}
}
def vis_options_merge(original, target):
"""Merge the target dict with the original dict, without modifying the input dicts.
:param original: the original dict.
:param target: the target dict that takes precedence when there are type conflicts or value conflicts.
:return: a new dict containing references to objects in both inputs.
"""
resultdict = {}
common_keys = original.keys() & target.keys()
for key in common_keys:
obj1 = original[key]
obj2 = target[key]
if type(obj1) is dict and type(obj2) is dict:
resultdict[key] = vis_options_merge(obj1, obj2)
else:
resultdict[key] = obj2
for key in (original.keys() - target.keys()):
resultdict[key] = original[key]
for key in (target.keys() - original.keys()):
resultdict[key] = target[key]
return resultdict
| 25.882353 | 106 | 0.485606 | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
"""
OPTIONS_DEFAULT_DIRECTED = {
"nodes": {
"borderWidthSelected": 0,
"borderWidth": 0,
"color": {
"background": "rgba(210, 229, 255, 1)",
"border": "transparent",
"highlight": {
"background": "rgba(9, 104, 178, 1)",
"border": "rgba(8, 62, 100, 1)"
}
},
"shadow": {
"enabled": False
},
"shape": "circle",
"widthConstraint": {
"minimum": 70,
"maximum": 70
},
"font": {
"face": "courier new",
"color": "black",
"size": 12
},
},
"edges": {
"color": {
"inherit": False
},
"smooth": {
"enabled": True,
"type": "straightCross"
},
"arrows": {
"to": {
"enabled": True,
"type": "arrow"
}
},
"font": {
"face": "courier new"
}
},
"interaction": {
"hover": True,
"hoverConnectedEdges": True,
"selectConnectedEdges": False
},
"physics": {
"minVelocity": 0.75,
"barnesHut": {
"centralGravity": 0.1,
"gravitationalConstant": -50450,
"springLength": 95,
"springConstant": 0.04,
"damping": 0.09,
"avoidOverlap": 0.1
},
"solver": "barnesHut",
"enabled": True,
"adaptiveTimestep": True,
"stabilization": {
"enabled": True,
"iterations": 1
}
}
}
def vis_options_merge(original, target):
"""Merge the target dict with the original dict, without modifying the input dicts.
:param original: the original dict.
:param target: the target dict that takes precedence when there are type conflicts or value conflicts.
:return: a new dict containing references to objects in both inputs.
"""
resultdict = {}
common_keys = original.keys() & target.keys()
for key in common_keys:
obj1 = original[key]
obj2 = target[key]
if type(obj1) is dict and type(obj2) is dict:
resultdict[key] = vis_options_merge(obj1, obj2)
else:
resultdict[key] = obj2
for key in (original.keys() - target.keys()):
resultdict[key] = original[key]
for key in (target.keys() - original.keys()):
resultdict[key] = target[key]
return resultdict
| 0 | 0 | 0 |
8846f6302398d9ef67a8e1cf2f29121a9029efb4 | 722 | py | Python | module/other-test/unroll/unroll-test.py | dbunker/SABR | 556fd9d9e152c3067ea873e401d68d7656284808 | [
"MIT"
] | 8 | 2017-03-06T01:37:06.000Z | 2020-11-05T09:58:01.000Z | module/other-test/unroll/unroll-test.py | dbunker/SABR | 556fd9d9e152c3067ea873e401d68d7656284808 | [
"MIT"
] | null | null | null | module/other-test/unroll/unroll-test.py | dbunker/SABR | 556fd9d9e152c3067ea873e401d68d7656284808 | [
"MIT"
] | 2 | 2018-12-19T23:01:51.000Z | 2021-09-19T07:13:08.000Z | # shell game
import sys, os, time, random, math
relFold = '../../../'
sys.path.append(relFold+'module')
import sabr
sabrUnroll() | 17.609756 | 55 | 0.587258 | # shell game
import sys, os, time, random, math
relFold = '../../../'
sys.path.append(relFold+'module')
import sabr
def sabrUnroll():
sabrObj = sabr.SabrObj()
# sym
sabrObj.setSym(['0','1','2'])
# board
sabrObj.setBoard(['a','b','c'])
# start
sabrObj.setStart(['2','0','1'])
# end
sabrObj.setEnd(['0','1','2'])
# trans
sabrObj.addTransSim('T1','O1',['v1','v2'],['v2','v1'])
sabrObj.addTransSim('T2','O2',['w1','w2'],['w2','w1'])
sabrObj.addDesObj('D1','O1',['a','b'])
sabrObj.addDesObj('D2','O2',['b','c'])
sabrObj.addDesObj('D3','O1',['b','c'])
sabrObj.unroll()
sabrObj.removeUseless()
sabrObj.source('unroll.tb')
res = sabrObj.process(relFold+'sabr',2)
print res
sabrUnroll() | 569 | 0 | 23 |
b1a15c0179cdcdd136cb7dc16f3e4a530d7b79cb | 289 | py | Python | release/stubs.min/Autodesk/Revit/DB/__init___parts/ExternalResourceType.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/Autodesk/Revit/DB/__init___parts/ExternalResourceType.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/Autodesk/Revit/DB/__init___parts/ExternalResourceType.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | class ExternalResourceType(GuidEnum):
"""
A type class used to distinguish between different kinds of external resource.
ExternalResourceType(guid: Guid)
"""
@staticmethod
def __new__(self, guid):
""" __new__(cls: type,guid: Guid) """
pass
| 20.642857 | 80 | 0.633218 | class ExternalResourceType(GuidEnum):
"""
A type class used to distinguish between different kinds of external resource.
ExternalResourceType(guid: Guid)
"""
@staticmethod
def __new__(self, guid):
""" __new__(cls: type,guid: Guid) """
pass
| 0 | 0 | 0 |
b9932aaaeb9920723bc0da7fce971d77b6853de5 | 2,485 | py | Python | assembly_ai/app.py | mattheweverette/Lectern | cc06a4df7af7fad9c320eb4803746da3669f1275 | [
"MIT"
] | 1 | 2022-03-31T20:47:02.000Z | 2022-03-31T20:47:02.000Z | assembly_ai/app.py | mattheweverette/Lectern | cc06a4df7af7fad9c320eb4803746da3669f1275 | [
"MIT"
] | null | null | null | assembly_ai/app.py | mattheweverette/Lectern | cc06a4df7af7fad9c320eb4803746da3669f1275 | [
"MIT"
] | null | null | null | import streamlit as st
import glob
import json
from podcasts import pipeline
from threading import Thread
st.title("Podcast Summaries")
json_files = glob.glob('*.json')
episode_id = st.sidebar.text_input("Episode ID")
button = st.sidebar.button("Download Episode summary")
if button and episode_id:
st.sidebar.write("Get auto chapters...")
#pipeline(episode_id)
t = Thread(target=pipeline, args=(episode_id,))
t.start()
for file in json_files:
with open(file, 'r') as f:
data = json.load(f)
chapter = data['chapters']
episode_title = data['episode_title']
thumbnail = data['thumbnail']
podcast_title = data['podcast_title']
audio = data['audio_url']
with st.expander(f"{podcast_title} - {episode_title}"):
st.image(thumbnail, width=200)
st.markdown(f'#### {episode_title}')
st.write(get_clean_summary(chapter))
| 32.272727 | 104 | 0.591952 | import streamlit as st
import glob
import json
from podcasts import pipeline
from threading import Thread
st.title("Podcast Summaries")
json_files = glob.glob('*.json')
episode_id = st.sidebar.text_input("Episode ID")
button = st.sidebar.button("Download Episode summary")
if button and episode_id:
st.sidebar.write("Get auto chapters...")
#pipeline(episode_id)
t = Thread(target=pipeline, args=(episode_id,))
t.start()
def get_clean_summary(chapters):
txt = ''
chapterCounter = 1;
for chapter in chapters:
start_ms = chapter['start']
seconds = int((start_ms / 1000) % 60)
minutes = int((start_ms / (1000 * 60)) % 60)
hours = int((start_ms / (1000 * 60 * 60)) % 24)
'''
chapterHyperLink = "https://www.listennotes.com/podcasts/"
for item in podcast_title:
chapterHyperLink += item.lower()
chapterHyperLink += "-"
chapterHyperLink = chapterHyperLink[:-1]
chapterHyperLink += "/"
for item in episode_title:
chapterHyperLink += item.lower()
chapterHyperLink += "-"
chapterHyperLink += str(seconds) + "/mohnish-pabrai-lecture-at-uX40PI8klFM//"
'''
if hours > 0:
txt += 'Chapter '+str(chapterCounter)+f' Timestamp: {hours:02d}:{minutes:02d}:{seconds:02d}'
else:
txt += 'Chapter '+str(chapterCounter)+f' Timestamp: {minutes:02d}:{seconds:02d}'
txt += '\n\n'
txt += 'Key Detail: ' + chapter['headline']
txt += '\n\n'
#if statement based on whether or the not the first letter of the summary is capitalized or not.
if ((chapter['summary'][0]).isupper()):
txt += 'Summary: ' + chapter['summary']
elif ():
chapter['summary'] = (chapter['summary']).capitalize()
txt += 'Summary: ' + chapter['summary']
txt += 'Summary: ' + chapter['summary']
txt += '\n\n\n\n'
chapterCounter += 1;
return txt
for file in json_files:
with open(file, 'r') as f:
data = json.load(f)
chapter = data['chapters']
episode_title = data['episode_title']
thumbnail = data['thumbnail']
podcast_title = data['podcast_title']
audio = data['audio_url']
with st.expander(f"{podcast_title} - {episode_title}"):
st.image(thumbnail, width=200)
st.markdown(f'#### {episode_title}')
st.write(get_clean_summary(chapter))
| 1,566 | 0 | 23 |
f6ef9e916c9d652c9865097a55a09201ba1bdecc | 3,364 | py | Python | rootpy/utils/ext_glob.py | masonproffitt/rootpy | 3926935e1f2100d8ba68070c2ab44055d4800f73 | [
"BSD-3-Clause"
] | 146 | 2015-01-04T15:16:44.000Z | 2022-01-27T11:29:31.000Z | rootpy/utils/ext_glob.py | masonproffitt/rootpy | 3926935e1f2100d8ba68070c2ab44055d4800f73 | [
"BSD-3-Clause"
] | 143 | 2015-01-07T00:20:42.000Z | 2021-11-04T07:48:26.000Z | rootpy/utils/ext_glob.py | masonproffitt/rootpy | 3926935e1f2100d8ba68070c2ab44055d4800f73 | [
"BSD-3-Clause"
] | 56 | 2015-01-30T11:11:07.000Z | 2022-03-28T09:42:06.000Z | """
Reproduce the standard glob package behaviour but use TSystem to be able to
query remote file systems such as xrootd
"""
from __future__ import print_function
from rootpy.ROOT import gSystem
import glob as gl
import os.path
import fnmatch
__all__ = ["glob", "iglob"]
if __name__ == "__main__":
test_paths = [
"*.*",
"*/*.txt",
"data/L1Ntuple_test_3.root",
"""root://eoscms.cern.ch//eos/cms/store/group/dpg_trigger/"""
"""comm_trigger/L1Trigger/L1Menu2016/Stage2/"""
"""l1t-integration-v88p1-CMSSW-8021/SingleMuon/"""
"""crab_l1t-integration-v88p1-CMSSW-8021__SingleMuon_2016H_v2/"""
"""161031_120512/0000/L1Ntuple_999.root""",
"""root://eoscms.cern.ch//eos/cms/store/group/dpg_trigger/"""
"""comm_trigger/L1Trigger/L1Menu2016/Stage2/"""
"""l1t-integration-v88p1-CMSSW-8021/SingleMuon/"""
"""crab_l1t-integration-v88p1-CMSSW-8021__SingleMuon_2016H_v2/"""
"""161031_120512/0000/L1Ntuple_99*.root""",
"""root://eoscms.cern.ch//eos/cms/store/group/dpg_trigger/"""
"""comm_trigger/L1Trigger/L1Menu2016/Stage2/"""
"""l1t-integration-v88p1-CMSSW-8021/SingleMuon/"""
"""crab_l1t-integration-v88p1-CMSSW-8021__SingleMuon_2016H_v*/"""
"""161031_120*/0000/L1Ntuple_99*.root""",
"""root://eoscms.cern.ch//eos/cms/store/group/dpg_trigger/"""
"""comm_trigger/L1Trigger/L1Menu2016/Stage2/"""
"""l1t-integration-v88p1-CMSSW-8021/SingleMuon/"""
"""crab_l1t-integration-v88p1-CMSSW-8021__SingleMuon_2016H_v*/"""
"""161031_120*""",
]
import pprint
for i, path in enumerate(test_paths):
print(path, "=>")
expanded = glob(path)
print(len(expanded), "files:", pprint.pformat(expanded))
| 31.439252 | 75 | 0.615933 | """
Reproduce the standard glob package behaviour but use TSystem to be able to
query remote file systems such as xrootd
"""
from __future__ import print_function
from rootpy.ROOT import gSystem
import glob as gl
import os.path
import fnmatch
__all__ = ["glob", "iglob"]
def __directory_iter(directory):
while True:
try:
file = gSystem.GetDirEntry(directory)
if not file:
break
yield file
except TypeError:
break
def glob(pathname):
# Let normal python glob try first
try_glob = gl.glob(pathname)
if try_glob:
return try_glob
# If pathname does not contain a wildcard:
if not gl.has_magic(pathname):
return [pathname]
# Else use ROOT's remote system querying
return root_glob(pathname)
def root_glob(pathname):
# Split the pathname into a directory and basename
# (which should include the wild-card)
dirs, basename = os.path.split(pathname)
if gl.has_magic(dirs):
dirs = root_glob(dirs)
else:
dirs = [dirs]
files = []
for dirname in dirs:
# Uses `TSystem` to open the directory.
# TSystem itself wraps up the calls needed to query xrootd.
dirname = gSystem.ExpandPathName(dirname)
directory = gSystem.OpenDirectory(dirname)
if directory:
for file in __directory_iter(directory):
if file in [".", ".."]:
continue
if not fnmatch.fnmatchcase(file, basename):
continue
files.append(os.path.join(dirname, file))
try:
gSystem.FreeDirectory(directory)
except TypeError:
pass
return files
def iglob(pathname):
for name in glob(pathname):
yield name
if __name__ == "__main__":
test_paths = [
"*.*",
"*/*.txt",
"data/L1Ntuple_test_3.root",
"""root://eoscms.cern.ch//eos/cms/store/group/dpg_trigger/"""
"""comm_trigger/L1Trigger/L1Menu2016/Stage2/"""
"""l1t-integration-v88p1-CMSSW-8021/SingleMuon/"""
"""crab_l1t-integration-v88p1-CMSSW-8021__SingleMuon_2016H_v2/"""
"""161031_120512/0000/L1Ntuple_999.root""",
"""root://eoscms.cern.ch//eos/cms/store/group/dpg_trigger/"""
"""comm_trigger/L1Trigger/L1Menu2016/Stage2/"""
"""l1t-integration-v88p1-CMSSW-8021/SingleMuon/"""
"""crab_l1t-integration-v88p1-CMSSW-8021__SingleMuon_2016H_v2/"""
"""161031_120512/0000/L1Ntuple_99*.root""",
"""root://eoscms.cern.ch//eos/cms/store/group/dpg_trigger/"""
"""comm_trigger/L1Trigger/L1Menu2016/Stage2/"""
"""l1t-integration-v88p1-CMSSW-8021/SingleMuon/"""
"""crab_l1t-integration-v88p1-CMSSW-8021__SingleMuon_2016H_v*/"""
"""161031_120*/0000/L1Ntuple_99*.root""",
"""root://eoscms.cern.ch//eos/cms/store/group/dpg_trigger/"""
"""comm_trigger/L1Trigger/L1Menu2016/Stage2/"""
"""l1t-integration-v88p1-CMSSW-8021/SingleMuon/"""
"""crab_l1t-integration-v88p1-CMSSW-8021__SingleMuon_2016H_v*/"""
"""161031_120*""",
]
import pprint
for i, path in enumerate(test_paths):
print(path, "=>")
expanded = glob(path)
print(len(expanded), "files:", pprint.pformat(expanded))
| 1,472 | 0 | 92 |
f700e672cd17275a041dea32beccb6a84ec37569 | 392 | py | Python | setup.py | Damaen/Travis-Hello-world | 6c88895142e708638000c9bd9550c3bc61045689 | [
"MIT"
] | null | null | null | setup.py | Damaen/Travis-Hello-world | 6c88895142e708638000c9bd9550c3bc61045689 | [
"MIT"
] | null | null | null | setup.py | Damaen/Travis-Hello-world | 6c88895142e708638000c9bd9550c3bc61045689 | [
"MIT"
] | null | null | null |
#!/usr/bin/env python
from distutils.core import setup
from glob import glob
from setuptools import find_packages
setup(name='Fibonacci',
version='1.0',
description='Python Distribution Utilities',
author='Kevin Chen',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
) | 24.5 | 76 | 0.665816 |
#!/usr/bin/env python
from distutils.core import setup
from glob import glob
from setuptools import find_packages
setup(name='Fibonacci',
version='1.0',
description='Python Distribution Utilities',
author='Kevin Chen',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
) | 0 | 0 | 0 |
140c6353a82e92848c975322c3dc412086d94517 | 125 | py | Python | tests/__init__.py | Foxbud/tagup | e55f13658006563851943ee68cd2955b79b19c3d | [
"MIT"
] | null | null | null | tests/__init__.py | Foxbud/tagup | e55f13658006563851943ee68cd2955b79b19c3d | [
"MIT"
] | 3 | 2020-06-14T22:41:10.000Z | 2020-06-16T17:15:29.000Z | tests/__init__.py | Foxbud/tagup | e55f13658006563851943ee68cd2955b79b19c3d | [
"MIT"
] | null | null | null | """
This file is part of the tagup Python module which is released under MIT.
See file LICENSE for full license details.
"""
| 25 | 73 | 0.752 | """
This file is part of the tagup Python module which is released under MIT.
See file LICENSE for full license details.
"""
| 0 | 0 | 0 |
8555ab80f0d583260980e8115da2a96b1679c256 | 5,152 | py | Python | models_dev/point_transformer.py | A-suozhang/SpatioTemporalSegmentation-ScanNet | 479de1793afe6ec20bed6c0f68498b0c49e7315c | [
"MIT"
] | 1 | 2021-05-07T08:42:40.000Z | 2021-05-07T08:42:40.000Z | models_dev/point_transformer.py | A-suozhang/SpatioTemporalSegmentation-ScanNet | 479de1793afe6ec20bed6c0f68498b0c49e7315c | [
"MIT"
] | null | null | null | models_dev/point_transformer.py | A-suozhang/SpatioTemporalSegmentation-ScanNet | 479de1793afe6ec20bed6c0f68498b0c49e7315c | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from models.pct_utils import TDLayer, TULayer, PTBlock, TRBlock
| 39.937984 | 135 | 0.638199 | import torch
import torch.nn as nn
import torch.nn.functional as F
from models.pct_utils import TDLayer, TULayer, PTBlock, TRBlock
class PointTransformer(nn.Module):
def __init__(self,config,num_class,N,normal_channel=3):
super(PointTransformer, self).__init__()
# The normal channel for Modelnet is 3, for scannet is 6, for scanobjnn is 0
in_channel = normal_channel+3 # normal ch + xyz
self.normal_channel = normal_channel
self.input_mlp = nn.Sequential(
nn.Conv1d(in_channel, 32, 1),
nn.BatchNorm1d(32),
)
# nn.ReLU(),
# nn.Conv1d(32, 32, 1),
# nn.BatchNorm1d(32))
self.in_dims = [32, 64, 128, 256]
self.out_dims = [64, 128, 256, 512]
self.neighbor_ks = [16, 16, 16, 16, 16]
self.radius = [0.1, 0.2, 0.4, 0.8, 0.8]
self.PTBlock0 = PTBlock(in_dim=self.in_dims[0], n_sample=self.neighbor_ks[0], radius=self.radius[0])
self.PTBlock1 = TRBlock(in_dim=self.in_dims[0], n_sample=self.neighbor_ks[1], fps_rate = 2, expansion=2, radius=self.radius[1])
self.PTBlock2 = TRBlock(in_dim=self.in_dims[1], n_sample=self.neighbor_ks[2], fps_rate = 2, expansion=2, radius=self.radius[2])
self.PTBlock3 = TRBlock(in_dim=self.in_dims[2], n_sample=self.neighbor_ks[3], fps_rate = 2, expansion=2, radius=self.radius[3])
# self.TDLayer4 = TDLayer(npoint=int(N/256),input_dim=self.in_dims[3], out_dim=self.out_dims[3], k=self.neighbor_ks[4])
self.PTBlock4 = TRBlock(in_dim=self.in_dims[3], n_sample=self.neighbor_ks[4], fps_rate = 2, expansion=2, radius=self.radius[4])
self.middle_linear = nn.Conv1d(self.out_dims[3], self.out_dims[3],1)
self.PTBlock_middle = PTBlock(in_dim=self.out_dims[3], n_sample=self.neighbor_ks[4], radius=self.radius[4])
self.TULayer5 = TULayer(npoint=int(N/64),input_dim=self.out_dims[3], out_dim=self.in_dims[3], k=3)
self.PTBlock5= PTBlock(in_dim=self.in_dims[3], n_sample=self.neighbor_ks[4], radius=self.radius[3])
self.TULayer6 = TULayer(npoint=int(N/16),input_dim=self.out_dims[2], out_dim=self.in_dims[2], k=3)
self.PTBlock6= PTBlock(in_dim=self.in_dims[2], n_sample=self.neighbor_ks[3], radius=self.radius[2])
self.TULayer7 = TULayer(npoint=int(N/4),input_dim=self.out_dims[1], out_dim=self.in_dims[1], k=3)
self.PTBlock7= PTBlock(in_dim=self.in_dims[1], n_sample=self.neighbor_ks[2], radius=self.radius[1])
self.TULayer8 = TULayer(npoint=int(N),input_dim=self.out_dims[0], out_dim=self.in_dims[0], k=3)
self.PTBlock8= PTBlock(in_dim=self.in_dims[0], n_sample=self.neighbor_ks[1], radius=self.radius[0])
self.fc = nn.Sequential(
nn.Linear(32,32),
nn.Dropout(0.4),
nn.Linear(32,num_class),
)
self.use_ln = False
if self.use_ln:
self.final_ln = nn.LayerNorm(256)
self.save_flag = False
self.save_dict = {}
def forward(self, inputs):
self.register_buffer('input_map', inputs)
B,_,_ = list(inputs.size())
if self.normal_channel:
l0_xyz = inputs[:, :3, :]
else:
l0_xyz = inputs
l0_points = self.input_mlp(inputs)
# here is nan
l0_xyz, l0_points = self.PTBlock0(l0_xyz, l0_points)
l1_xyz, l1_points = self.PTBlock1(l0_xyz, l0_points)
l2_xyz, l2_points = self.PTBlock2(l1_xyz, l1_points)
l3_xyz, l3_points = self.PTBlock3(l2_xyz, l2_points)
l4_xyz, l4_points = self.PTBlock4(l3_xyz, l3_points)
l4_points = self.middle_linear(l4_points)
l4_xyz, l4_points = self.PTBlock_middle(l4_xyz, l4_points)
l5_xyz, l5_points = self.TULayer5(l4_xyz, l3_xyz, l4_points, l3_points)
l5_xyz, l5_points = self.PTBlock5(l5_xyz, l5_points)
l6_xyz, l6_points = self.TULayer6(l5_xyz, l2_xyz, l5_points, l2_points)
l6_xyz, l6_points = self.PTBlock6(l6_xyz, l6_points)
l7_xyz, l7_points = self.TULayer7(l6_xyz, l1_xyz, l6_points, l1_points)
l7_xyz, l7_points = self.PTBlock7(l7_xyz, l7_points)
l8_xyz, l8_points = self.TULayer8(l7_xyz, l0_xyz, l7_points, l0_points)
l8_xyz, l8_points = self.PTBlock8(l8_xyz, l8_points)
x = self.fc(l8_points.transpose(1,2))
if torch.isinf(x).sum() > 0:
import ipdb; ipdb.set_trace()
return x
class get_loss(nn.Module):
def __init__(self):
super().__init__()
self.cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction='none')
def forward(self, predict, target, weights):
"""
:param predict: (B,N,C)
:param target: (B,N)
:param weights: (B,N)
:return:
"""
NUM_CLASSES=20
predict = predict.view(-1, NUM_CLASSES).contiguous() # B*N, C
target = target.view(-1).contiguous().cuda().long() # B*N
weights = weights.view(-1).contiguous().cuda().float() # B*N
loss = self.cross_entropy_loss(predict, target) # B*N
loss *= weights
loss = torch.mean(loss)
return loss
| 4,325 | 594 | 99 |
a9cbb09777c6a011d3778870d939d1f2d673530d | 5,138 | py | Python | backend/src/apiserver/visualization/server.py | nostro-im/pipelines | 39f5b6b74040abbf4b764cbd5b422d7548723d9e | [
"Apache-2.0"
] | 2,860 | 2018-05-24T04:55:01.000Z | 2022-03-31T13:49:56.000Z | backend/src/apiserver/visualization/server.py | nostro-im/pipelines | 39f5b6b74040abbf4b764cbd5b422d7548723d9e | [
"Apache-2.0"
] | 7,331 | 2018-05-16T09:03:26.000Z | 2022-03-31T23:22:04.000Z | backend/src/apiserver/visualization/server.py | nostro-im/pipelines | 39f5b6b74040abbf4b764cbd5b422d7548723d9e | [
"Apache-2.0"
] | 1,359 | 2018-05-15T11:05:41.000Z | 2022-03-31T09:42:09.000Z | # Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import importlib
import json
import os
from pathlib import Path
from typing import Text
from nbformat import NotebookNode
from nbformat.v4 import new_notebook, new_code_cell
import tornado.ioloop
import tornado.web
exporter = importlib.import_module("exporter")
parser = argparse.ArgumentParser(description="Server Arguments")
parser.add_argument(
"--timeout",
type=int,
default=os.getenv('KERNEL_TIMEOUT', 100),
help="Amount of time in seconds that a visualization can run for before " +
"being stopped."
)
args = parser.parse_args()
_exporter = exporter.Exporter(args.timeout)
class VisualizationHandler(tornado.web.RequestHandler):
"""Custom RequestHandler that generates visualizations via post requests.
"""
def validate_and_get_arguments_from_body(self) -> dict:
"""Validates and converts arguments from post request to dict.
Returns:
Arguments provided from post request as a dict.
"""
try:
arguments = {
"arguments": "{}",
"type": self.get_body_argument("type")
}
except tornado.web.MissingArgumentError:
raise Exception("No type provided.")
try:
arguments["arguments"] = self.get_body_argument("arguments")
except tornado.web.MissingArgumentError:
# If no arguments are provided, ignore error as arguments has been
# set to a stringified JSON object by default.
pass
try:
arguments["arguments"] = json.loads(arguments.get("arguments"))
except json.decoder.JSONDecodeError as e:
raise Exception("Invalid JSON provided as arguments: {}".format(str(e)))
# If invalid JSON is provided that is incorretly escaped
# arguments.get("arguments") can be a string. This Ensure that
# json.loads properly converts stringified JSON to dict.
if type(arguments.get("arguments")) != dict:
raise Exception("Invalid JSON provided as arguments!")
try:
arguments["source"] = self.get_body_argument("source")
except tornado.web.MissingArgumentError:
arguments["source"] = ""
if arguments.get("type") != "custom":
if len(arguments.get("source")) == 0:
raise Exception("No source provided.")
return arguments
def generate_notebook_from_arguments(
self,
arguments: dict,
source: Text,
visualization_type: Text
) -> NotebookNode:
"""Generates a NotebookNode from provided arguments.
Args:
arguments: JSON object containing provided arguments.
source: Path or path pattern to be used as data reference for
visualization.
visualization_type: Name of visualization to be generated.
Returns:
NotebookNode that contains all parameters from a post request.
"""
nb = new_notebook()
nb.cells.append(exporter.create_cell_from_args(arguments))
nb.cells.append(new_code_cell('source = "{}"'.format(source)))
if visualization_type == "custom":
code = arguments.get("code", [])
nb.cells.append(exporter.create_cell_from_custom_code(code))
else:
visualization_file = str(Path.cwd() / "types/{}.py".format(visualization_type))
nb.cells.append(exporter.create_cell_from_file(visualization_file))
return nb
def get(self):
"""Health check.
"""
self.write("alive")
def post(self):
"""Generates visualization based on provided arguments.
"""
# Validate arguments from request and return them as a dictionary.
try:
request_arguments = self.validate_and_get_arguments_from_body()
except Exception as e:
return self.send_error(400, reason=str(e))
# Create notebook with arguments from request.
nb = self.generate_notebook_from_arguments(
request_arguments.get("arguments"),
request_arguments.get("source"),
request_arguments.get("type")
)
# Generate visualization (output for notebook).
html = _exporter.generate_html_from_notebook(nb)
self.write(html)
if __name__ == "__main__":
application = tornado.web.Application([
(r"/", VisualizationHandler),
])
application.listen(8888)
tornado.ioloop.IOLoop.current().start()
| 34.253333 | 91 | 0.651226 | # Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import importlib
import json
import os
from pathlib import Path
from typing import Text
from nbformat import NotebookNode
from nbformat.v4 import new_notebook, new_code_cell
import tornado.ioloop
import tornado.web
exporter = importlib.import_module("exporter")
parser = argparse.ArgumentParser(description="Server Arguments")
parser.add_argument(
"--timeout",
type=int,
default=os.getenv('KERNEL_TIMEOUT', 100),
help="Amount of time in seconds that a visualization can run for before " +
"being stopped."
)
args = parser.parse_args()
_exporter = exporter.Exporter(args.timeout)
class VisualizationHandler(tornado.web.RequestHandler):
"""Custom RequestHandler that generates visualizations via post requests.
"""
def validate_and_get_arguments_from_body(self) -> dict:
"""Validates and converts arguments from post request to dict.
Returns:
Arguments provided from post request as a dict.
"""
try:
arguments = {
"arguments": "{}",
"type": self.get_body_argument("type")
}
except tornado.web.MissingArgumentError:
raise Exception("No type provided.")
try:
arguments["arguments"] = self.get_body_argument("arguments")
except tornado.web.MissingArgumentError:
# If no arguments are provided, ignore error as arguments has been
# set to a stringified JSON object by default.
pass
try:
arguments["arguments"] = json.loads(arguments.get("arguments"))
except json.decoder.JSONDecodeError as e:
raise Exception("Invalid JSON provided as arguments: {}".format(str(e)))
# If invalid JSON is provided that is incorretly escaped
# arguments.get("arguments") can be a string. This Ensure that
# json.loads properly converts stringified JSON to dict.
if type(arguments.get("arguments")) != dict:
raise Exception("Invalid JSON provided as arguments!")
try:
arguments["source"] = self.get_body_argument("source")
except tornado.web.MissingArgumentError:
arguments["source"] = ""
if arguments.get("type") != "custom":
if len(arguments.get("source")) == 0:
raise Exception("No source provided.")
return arguments
def generate_notebook_from_arguments(
self,
arguments: dict,
source: Text,
visualization_type: Text
) -> NotebookNode:
"""Generates a NotebookNode from provided arguments.
Args:
arguments: JSON object containing provided arguments.
source: Path or path pattern to be used as data reference for
visualization.
visualization_type: Name of visualization to be generated.
Returns:
NotebookNode that contains all parameters from a post request.
"""
nb = new_notebook()
nb.cells.append(exporter.create_cell_from_args(arguments))
nb.cells.append(new_code_cell('source = "{}"'.format(source)))
if visualization_type == "custom":
code = arguments.get("code", [])
nb.cells.append(exporter.create_cell_from_custom_code(code))
else:
visualization_file = str(Path.cwd() / "types/{}.py".format(visualization_type))
nb.cells.append(exporter.create_cell_from_file(visualization_file))
return nb
def get(self):
"""Health check.
"""
self.write("alive")
def post(self):
"""Generates visualization based on provided arguments.
"""
# Validate arguments from request and return them as a dictionary.
try:
request_arguments = self.validate_and_get_arguments_from_body()
except Exception as e:
return self.send_error(400, reason=str(e))
# Create notebook with arguments from request.
nb = self.generate_notebook_from_arguments(
request_arguments.get("arguments"),
request_arguments.get("source"),
request_arguments.get("type")
)
# Generate visualization (output for notebook).
html = _exporter.generate_html_from_notebook(nb)
self.write(html)
if __name__ == "__main__":
application = tornado.web.Application([
(r"/", VisualizationHandler),
])
application.listen(8888)
tornado.ioloop.IOLoop.current().start()
| 0 | 0 | 0 |
33a5ac76b77f1aa14a86896845f1a9bc7a935744 | 5,678 | py | Python | accenv/lib/python3.4/site-packages/IPython/nbformat/v3/tests/test_nbbase.py | adamshamsudeen/clubdin-dj | eb48c67dab3a4ae7c4032544eb4d64e0b1d7e15a | [
"MIT"
] | null | null | null | accenv/lib/python3.4/site-packages/IPython/nbformat/v3/tests/test_nbbase.py | adamshamsudeen/clubdin-dj | eb48c67dab3a4ae7c4032544eb4d64e0b1d7e15a | [
"MIT"
] | null | null | null | accenv/lib/python3.4/site-packages/IPython/nbformat/v3/tests/test_nbbase.py | adamshamsudeen/clubdin-dj | eb48c67dab3a4ae7c4032544eb4d64e0b1d7e15a | [
"MIT"
] | null | null | null | from unittest import TestCase
from ..nbbase import (
NotebookNode,
new_code_cell, new_text_cell, new_worksheet, new_notebook, new_output,
new_author, new_metadata, new_heading_cell, nbformat
)
| 36.397436 | 80 | 0.646883 | from unittest import TestCase
from ..nbbase import (
NotebookNode,
new_code_cell, new_text_cell, new_worksheet, new_notebook, new_output,
new_author, new_metadata, new_heading_cell, nbformat
)
class TestCell(TestCase):
def test_empty_code_cell(self):
cc = new_code_cell()
self.assertEqual(cc.cell_type,'code')
self.assertEqual('input' not in cc, True)
self.assertEqual('prompt_number' not in cc, True)
self.assertEqual(cc.outputs, [])
self.assertEqual(cc.collapsed, False)
def test_code_cell(self):
cc = new_code_cell(input='a=10', prompt_number=0, collapsed=True)
cc.outputs = [new_output(output_type='pyout',
output_svg='foo',output_text='10',prompt_number=0)]
self.assertEqual(cc.input, 'a=10')
self.assertEqual(cc.prompt_number, 0)
self.assertEqual(cc.language, 'python')
self.assertEqual(cc.outputs[0].svg, 'foo')
self.assertEqual(cc.outputs[0].text, '10')
self.assertEqual(cc.outputs[0].prompt_number, 0)
self.assertEqual(cc.collapsed, True)
def test_pyerr(self):
o = new_output(output_type='pyerr', ename='NameError',
evalue='Name not found', traceback=['frame 0', 'frame 1', 'frame 2']
)
self.assertEqual(o.output_type, 'pyerr')
self.assertEqual(o.ename, 'NameError')
self.assertEqual(o.evalue, 'Name not found')
self.assertEqual(o.traceback, ['frame 0', 'frame 1', 'frame 2'])
def test_empty_html_cell(self):
tc = new_text_cell('html')
self.assertEqual(tc.cell_type, 'html')
self.assertEqual('source' not in tc, True)
self.assertEqual('rendered' not in tc, True)
def test_html_cell(self):
tc = new_text_cell('html', 'hi', 'hi')
self.assertEqual(tc.source, 'hi')
self.assertEqual(tc.rendered, 'hi')
def test_empty_markdown_cell(self):
tc = new_text_cell('markdown')
self.assertEqual(tc.cell_type, 'markdown')
self.assertEqual('source' not in tc, True)
self.assertEqual('rendered' not in tc, True)
def test_markdown_cell(self):
tc = new_text_cell('markdown', 'hi', 'hi')
self.assertEqual(tc.source, 'hi')
self.assertEqual(tc.rendered, 'hi')
def test_empty_raw_cell(self):
tc = new_text_cell('raw')
self.assertEqual(tc.cell_type, 'raw')
self.assertEqual('source' not in tc, True)
self.assertEqual('rendered' not in tc, True)
def test_raw_cell(self):
tc = new_text_cell('raw', 'hi', 'hi')
self.assertEqual(tc.source, 'hi')
self.assertEqual(tc.rendered, 'hi')
def test_empty_heading_cell(self):
tc = new_heading_cell()
self.assertEqual(tc.cell_type, 'heading')
self.assertEqual('source' not in tc, True)
self.assertEqual('rendered' not in tc, True)
def test_heading_cell(self):
tc = new_heading_cell('hi', 'hi', level=2)
self.assertEqual(tc.source, 'hi')
self.assertEqual(tc.rendered, 'hi')
self.assertEqual(tc.level, 2)
class TestWorksheet(TestCase):
def test_empty_worksheet(self):
ws = new_worksheet()
self.assertEqual(ws.cells,[])
self.assertEqual('name' not in ws, True)
def test_worksheet(self):
cells = [new_code_cell(), new_text_cell('html')]
ws = new_worksheet(cells=cells,name='foo')
self.assertEqual(ws.cells,cells)
self.assertEqual(ws.name,'foo')
class TestNotebook(TestCase):
def test_empty_notebook(self):
nb = new_notebook()
self.assertEqual(nb.worksheets, [])
self.assertEqual(nb.metadata, NotebookNode())
self.assertEqual(nb.nbformat,nbformat)
def test_notebook(self):
worksheets = [new_worksheet(),new_worksheet()]
metadata = new_metadata(name='foo')
nb = new_notebook(metadata=metadata,worksheets=worksheets)
self.assertEqual(nb.metadata.name,'foo')
self.assertEqual(nb.worksheets,worksheets)
self.assertEqual(nb.nbformat,nbformat)
def test_notebook_name(self):
worksheets = [new_worksheet(),new_worksheet()]
nb = new_notebook(name='foo',worksheets=worksheets)
self.assertEqual(nb.metadata.name,'foo')
self.assertEqual(nb.worksheets,worksheets)
self.assertEqual(nb.nbformat,nbformat)
class TestMetadata(TestCase):
def test_empty_metadata(self):
md = new_metadata()
self.assertEqual('name' not in md, True)
self.assertEqual('authors' not in md, True)
self.assertEqual('license' not in md, True)
self.assertEqual('saved' not in md, True)
self.assertEqual('modified' not in md, True)
self.assertEqual('gistid' not in md, True)
def test_metadata(self):
authors = [new_author(name='Bart Simpson',email='bsimpson@fox.com')]
md = new_metadata(name='foo',license='BSD',created='today',
modified='now',gistid='21341231',authors=authors)
self.assertEqual(md.name, 'foo')
self.assertEqual(md.license, 'BSD')
self.assertEqual(md.created, 'today')
self.assertEqual(md.modified, 'now')
self.assertEqual(md.gistid, '21341231')
self.assertEqual(md.authors, authors)
class TestOutputs(TestCase):
def test_binary_png(self):
out = new_output(output_png=b'\x89PNG\r\n\x1a\n')
def test_b64b6tes_png(self):
out = new_output(output_png=b'iVBORw0KG')
def test_binary_jpeg(self):
out = new_output(output_jpeg=b'\xff\xd8')
def test_b64b6tes_jpeg(self):
out = new_output(output_jpeg=b'/9')
| 4,727 | 36 | 708 |
5daf88dd6a1a490f60f07f2c9811da15535f9510 | 1,404 | py | Python | apps/trade/src/market/QuoineConstants.py | kikei/btc-bot-ai | cb118fa1809ebef472a2025be697c9050e948009 | [
"Apache-2.0"
] | 1 | 2020-02-02T13:53:21.000Z | 2020-02-02T13:53:21.000Z | apps/trade/src/market/QuoineConstants.py | kikei/btc-bot-ai | cb118fa1809ebef472a2025be697c9050e948009 | [
"Apache-2.0"
] | null | null | null | apps/trade/src/market/QuoineConstants.py | kikei/btc-bot-ai | cb118fa1809ebef472a2025be697c9050e948009 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# 新規注文後、一定時間経ってもオーダーが残っている場合に
# 処理を続行するか否か
ORDER_IGNORE_TIMEOUT = True
# 注文完了待ち確認間隔
ORDER_EXECUTED_RETRY_INTERVAL = 2
# 注文完了待ち確認回数
ORDER_EXECUTED_RETRY = 40
# クローズ済みトレード取得時の許容時間誤差 [s]
TIME_ERROR_ALLOW = 3
# API呼び出しリトライ数
RETRY_API_CALL = 10
API_URI = 'https://api.liquid.com'
API_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.10240'
# Path Info
API_PATH_BOARD = '/products/5/price_levels'
API_PATH_TICK = '/products/5'
API_PATH_BALANCE = '/accounts/balance'
API_PATH_ACCOUNT = '/trading_accounts'
API_PATH_LIST_ORDERS = '/orders?currency_pair_code=BTCJPY&status=live&product_code=CASH'
API_PATH_EXECUTIONS = '/executions/me?product_id=5'
API_PATH_ORDERS = '/orders/'
API_PATH_TRADES = '/trades'
API_PATH_TRADE_CLOSE = '/trades/{id}/close'
PRICE_TICK_SIZE = 2.5
BOARD_SIDE_ASK = 'sell_price_levels'
BOARD_SIDE_BID = 'buy_price_levels'
BALANCE_CURRENCY = 'currency'
BALANCE_VALUE = 'balance'
BALANCE_CURRENCY_0 = 'JPY'
BALANCE_CURRENCY_1 = 'BTC'
ACCOUNT_PRODUCT_ID = 'product_id'
ACCOUNT_EQUITY = 'equity'
ACCOUNT_FREE_MARGIN = 'free_margin'
ACCOUNT_MARGIN = 'margin'
ACCOUNT_KEEPRATE = 'keep_rate'
# 成行買い
ORDER_TYPE = 'market'
ORDER_PRODUCT_ID = 5
ORDER_FUNDING_CURRENCY = 'JPY'
ORDER_SIDE_BUY = 'buy'
ORDER_SIDE_SELL = 'sell'
ORDER_LEVELAGE_LEVEL = 10
ORDER_MODELS = 'models'
| 24.206897 | 148 | 0.775641 | # -*- coding: utf-8 -*-
# 新規注文後、一定時間経ってもオーダーが残っている場合に
# 処理を続行するか否か
ORDER_IGNORE_TIMEOUT = True
# 注文完了待ち確認間隔
ORDER_EXECUTED_RETRY_INTERVAL = 2
# 注文完了待ち確認回数
ORDER_EXECUTED_RETRY = 40
# クローズ済みトレード取得時の許容時間誤差 [s]
TIME_ERROR_ALLOW = 3
# API呼び出しリトライ数
RETRY_API_CALL = 10
API_URI = 'https://api.liquid.com'
API_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.10240'
# Path Info
API_PATH_BOARD = '/products/5/price_levels'
API_PATH_TICK = '/products/5'
API_PATH_BALANCE = '/accounts/balance'
API_PATH_ACCOUNT = '/trading_accounts'
API_PATH_LIST_ORDERS = '/orders?currency_pair_code=BTCJPY&status=live&product_code=CASH'
API_PATH_EXECUTIONS = '/executions/me?product_id=5'
API_PATH_ORDERS = '/orders/'
API_PATH_TRADES = '/trades'
API_PATH_TRADE_CLOSE = '/trades/{id}/close'
PRICE_TICK_SIZE = 2.5
BOARD_SIDE_ASK = 'sell_price_levels'
BOARD_SIDE_BID = 'buy_price_levels'
BALANCE_CURRENCY = 'currency'
BALANCE_VALUE = 'balance'
BALANCE_CURRENCY_0 = 'JPY'
BALANCE_CURRENCY_1 = 'BTC'
ACCOUNT_PRODUCT_ID = 'product_id'
ACCOUNT_EQUITY = 'equity'
ACCOUNT_FREE_MARGIN = 'free_margin'
ACCOUNT_MARGIN = 'margin'
ACCOUNT_KEEPRATE = 'keep_rate'
# 成行買い
ORDER_TYPE = 'market'
ORDER_PRODUCT_ID = 5
ORDER_FUNDING_CURRENCY = 'JPY'
ORDER_SIDE_BUY = 'buy'
ORDER_SIDE_SELL = 'sell'
ORDER_LEVELAGE_LEVEL = 10
ORDER_MODELS = 'models'
| 0 | 0 | 0 |
035639992fbaa91dbf14c63e7f1d5e1d1b08b6d2 | 7,226 | py | Python | Code/scripts/Worlds/RvizElements.py | JoseAndresMR/jamrepo | b4205d55995a138f5d5e8502015b3620ab2afa10 | [
"MIT"
] | 1 | 2019-11-19T12:31:49.000Z | 2019-11-19T12:31:49.000Z | Code/scripts/Worlds/RvizElements.py | JoseAndresMR/jamrepo | b4205d55995a138f5d5e8502015b3620ab2afa10 | [
"MIT"
] | null | null | null | Code/scripts/Worlds/RvizElements.py | JoseAndresMR/jamrepo | b4205d55995a138f5d5e8502015b3620ab2afa10 | [
"MIT"
] | 2 | 2019-11-19T12:31:45.000Z | 2020-09-27T07:45:23.000Z | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------------------------------------------------
# ROS-MAGNA
# ----------------------------------------------------------------------------------------------------------------------
# The MIT License (MIT)
# Copyright (c) 2016 GRVC University of Seville
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ----------------------------------------------------------------------------------------------------------------------
"""
Created on Mon Feb 21 2018
@author: josmilrom
"""
import sys
import rospy
import std_msgs.msg
import time
import math
import numpy as np
import tf, tf2_ros
import json
import copy
import random
import rospkg
from std_msgs.msg import Header, ColorRGBA
from geometry_msgs.msg import *
from sensor_msgs.msg import *
# from xml.dom import minidom
# from gazebo_msgs.srv import DeleteModel,SpawnModel
from visualization_msgs.msg import Marker
from jsk_recognition_msgs.msg import BoundingBox, BoundingBoxArray, TorusArray, PolygonArray
from jsk_recognition_msgs.msg import Torus as jsk_Torus
# from sympy import Point3D, Line3D, Segment3D
# from sympy import Point as Point2D
# from sympy import Polygon as Polygon2D
# import xml.etree.ElementTree
from magna.srv import *
from TFElements import *
| 35.772277 | 165 | 0.640742 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------------------------------------------------
# ROS-MAGNA
# ----------------------------------------------------------------------------------------------------------------------
# The MIT License (MIT)
# Copyright (c) 2016 GRVC University of Seville
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ----------------------------------------------------------------------------------------------------------------------
"""
Created on Mon Feb 21 2018
@author: josmilrom
"""
import sys
import rospy
import std_msgs.msg
import time
import math
import numpy as np
import tf, tf2_ros
import json
import copy
import random
import rospkg
from std_msgs.msg import Header, ColorRGBA
from geometry_msgs.msg import *
from sensor_msgs.msg import *
# from xml.dom import minidom
# from gazebo_msgs.srv import DeleteModel,SpawnModel
from visualization_msgs.msg import Marker
from jsk_recognition_msgs.msg import BoundingBox, BoundingBoxArray, TorusArray, PolygonArray
from jsk_recognition_msgs.msg import Torus as jsk_Torus
# from sympy import Point3D, Line3D, Segment3D
# from sympy import Point as Point2D
# from sympy import Polygon as Polygon2D
# import xml.etree.ElementTree
from magna.srv import *
from TFElements import *
class RvizMarker(object):
def __init__(self,rviz_marker_def):
self.rviz_marker_def = rviz_marker_def
self.hyperparameters = rospy.get_param('magna_hyperparameters')
self.marker_pub = rospy.Publisher('/visualization_marker', Marker, queue_size = 1)
self.Spawner()
def Spawner(self):
marker = Marker()
marker.header.stamp = rospy.Time.now()
marker.header.frame_id = self.rviz_marker_def["parent_name"]
marker.ns = str(self.rviz_marker_def["name"])
marker.id = self.rviz_marker_def["id"]
if self.rviz_marker_def["shape"] == "arrow":
marker.type = 0
elif self.rviz_marker_def["shape"] == "cube":
marker.type = 1
elif self.rviz_marker_def["shape"] == "sphere":
marker.type = 2
elif self.rviz_marker_def["shape"] == "cylinder":
marker.type = 3
marker.action = 0
euler = self.rviz_marker_def["origin"][1]
quaternion = tf.transformations.quaternion_from_euler(euler[0],euler[1],euler[2])
marker.pose = Pose(Point(self.rviz_marker_def["origin"][0][0],self.rviz_marker_def["origin"][0][1],self.rviz_marker_def["origin"][0][2]),
Quaternion(quaternion[0],quaternion[1],quaternion[2],quaternion[3]))
marker.scale = Point(self.rviz_marker_def["scale"][0],self.rviz_marker_def["scale"][1],self.rviz_marker_def["scale"][2])
marker.color = ColorRGBA(self.rviz_marker_def["color"][0],self.rviz_marker_def["color"][1],self.rviz_marker_def["color"][2],self.rviz_marker_def["color"][3])
# marker.lifetime = 0# rospy.Duration(0)
self.marker = marker
t = 0
while not rospy.is_shutdown() and t < 10:
self.marker_pub.publish(self.marker)
t = t+1
time.sleep(0.05)
def Erase(self):
self.marker.action = 2
self.marker_pub.publish(self.marker)
def Actualize(self,posestamped):
self.marker.header.stamp = rospy.Time.now()
self.marker.pose = posestamped.pose
self.marker_pub.publish(self.marker)
class RvizPolygonArray(object):
def __init__(self,rviz_polygon_array_def):
self.rviz_polygon_array_def = rviz_polygon_array_def
self.polygon_array_pub = rospy.Publisher('/visualization_polygon_array/{}'.format(rviz_polygon_array_def["name"]), PolygonArray, queue_size = 1)
self.Spawner()
def Spawner(self):
polygon_array = PolygonArray()
polygon_array.header.stamp = rospy.Time.now()
polygon_array.header.frame_id = "map"
for i,polygon_poses in enumerate(self.rviz_polygon_array_def["polygon_array_poses"]):
polygon = PolygonStamped()
polygon.header.stamp = rospy.Time.now()
polygon.header.frame_id = "map"
for pose in polygon_poses:
polygon.polygon.points.append(Point32(pose.position.x,pose.position.y,pose.position.z))
polygon_array.labels.append(1)
polygon_array.likelihood.append(1.0)
polygon_array.polygons.append(polygon)
# polygon_array.labels = self.rviz_polygon_array_def["labels"]#[i]
# polygon_array.likelihood = self.rviz_polygon_array_def["likelihood"]#[i]
# polygon_array.labels = [1,1,1,1,1,1]
# polygon_array.likelihood = [1.0,1.0,1.0,1.0,1.0,1.0]
self.polygon_array = polygon_array
t = 0
while not rospy.is_shutdown() and t < 10:
self.polygon_array_pub.publish(self.polygon_array)
t = t+1
time.sleep(0.05)
def Erase(self):
pass
def Actualize(self,posestamped):
pass
class RvizTorusArray(object):
def __init__(self,rviz_torus_array_def):
self.rviz_torus_array_def = rviz_torus_array_def
self.torus_array_pub = rospy.Publisher('/visualization_torus_array/{}'.format(rviz_torus_array_def["name"]), TorusArray, queue_size = 1)
self.Spawner()
def Spawner(self):
torus_array = TorusArray()
torus_array.header.stamp = rospy.Time.now()
torus_array.header.frame_id = "map"
for i,torus_poses in enumerate(self.rviz_torus_array_def["torus_array_poses"]):
header = Header()
header.stamp = rospy.Time.now()
header.frame_id = "map"
torus_poses = [header] + torus_poses
torus = jsk_Torus(*torus_poses)
torus_array.toruses.append(torus)
self.torus_array = torus_array
t = 0
while not rospy.is_shutdown() and t < 10:
self.torus_array_pub.publish(self.torus_array)
t = t+1
time.sleep(0.05)
def Erase(self):
pass
def Actualize(self,posestamped):
pass
| 4,421 | 22 | 390 |
d609e4179a2fa5c1d3b6e223cb14512352e5b429 | 697 | py | Python | forecast-admin/forecast/opportunities/migrations/0007_auto_20151127_1141.py | 18F/osbu-forecast-api | 26a53dad8da4f3d2de522ac1e488e7135a56b557 | [
"CC0-1.0"
] | 3 | 2016-03-02T17:27:54.000Z | 2016-03-15T16:49:27.000Z | forecast-admin/forecast/opportunities/migrations/0007_auto_20151127_1141.py | 18F/forecast | 26a53dad8da4f3d2de522ac1e488e7135a56b557 | [
"CC0-1.0"
] | 62 | 2015-12-14T14:42:35.000Z | 2016-05-19T16:47:56.000Z | forecast-admin/forecast/opportunities/migrations/0007_auto_20151127_1141.py | 18F/forecast | 26a53dad8da4f3d2de522ac1e488e7135a56b557 | [
"CC0-1.0"
] | 5 | 2016-01-29T17:10:24.000Z | 2021-02-14T12:15:37.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| 27.88 | 110 | 0.629842 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('opportunities', '0006_auto_20151127_0336'),
]
operations = [
migrations.AlterField(
model_name='opportunity',
name='price_max',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=16, null=True, max_length=200),
),
migrations.AlterField(
model_name='opportunity',
name='price_min',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=16, null=True, max_length=200),
),
]
| 0 | 567 | 23 |
4e5f3a16117fa6e6f15130ae3b5999c7cf174f54 | 1,457 | py | Python | foreshadow/smart/intent_resolving/core/secondary_featurizers/base_featurizer_via_lambda.py | adithyabsk/foreshadow | ca2e927c396ae0d61923b287d6e32e142f3ba96f | [
"Apache-2.0"
] | 25 | 2018-07-26T17:30:31.000Z | 2021-02-23T22:54:01.000Z | foreshadow/smart/intent_resolving/core/secondary_featurizers/base_featurizer_via_lambda.py | adithyabsk/foreshadow | ca2e927c396ae0d61923b287d6e32e142f3ba96f | [
"Apache-2.0"
] | 150 | 2018-11-02T18:09:12.000Z | 2020-05-15T01:01:35.000Z | foreshadow/smart/intent_resolving/core/secondary_featurizers/base_featurizer_via_lambda.py | adithyabsk/foreshadow | ca2e927c396ae0d61923b287d6e32e142f3ba96f | [
"Apache-2.0"
] | 1 | 2019-02-20T22:24:00.000Z | 2019-02-20T22:24:00.000Z | """Class defintion for BaseFeaturizeViaLambda abstract class."""
from typing import Callable
import pandas as pd
from .base_featurizer import BaseFeaturizer
class BaseFeaturizerViaLambda(BaseFeaturizer):
"""
Abstract class to create secondary featurization via a custom function.
Attributes:
_callable {Callable[[pd.DataFrame], pd.Series]}
-- User-defined function to extract metafeatures from dataframe.
sec_feature_names {List[str]}
-- Names for secondary metafeatures
Refer to superclass for additional attributes.
"""
def __init__(
self,
method: str,
callable_: Callable[[pd.DataFrame], pd.Series],
normalizable: bool,
):
"""
Init function.
Extends superclass method.
Arguments:
method {str}
-- Description of secondary metafeatures. Used in naming
`sec_feature_names`.
callable_ {Callable[[pd.DataFrame], pd.Series]}
-- User-defined function to extract metafeatures from dataframe.
normalizable {bool}
-- Whether the generated feature should be normalized.
"""
super().__init__(method=method, normalizable=normalizable)
self._callable = callable_
self.sec_feature_names = super()._mark_nonnormalizable(
[self.method], normalizable=self.normalizable
)
| 30.354167 | 80 | 0.633493 | """Class defintion for BaseFeaturizeViaLambda abstract class."""
from typing import Callable
import pandas as pd
from .base_featurizer import BaseFeaturizer
class BaseFeaturizerViaLambda(BaseFeaturizer):
"""
Abstract class to create secondary featurization via a custom function.
Attributes:
_callable {Callable[[pd.DataFrame], pd.Series]}
-- User-defined function to extract metafeatures from dataframe.
sec_feature_names {List[str]}
-- Names for secondary metafeatures
Refer to superclass for additional attributes.
"""
def __init__(
self,
method: str,
callable_: Callable[[pd.DataFrame], pd.Series],
normalizable: bool,
):
"""
Init function.
Extends superclass method.
Arguments:
method {str}
-- Description of secondary metafeatures. Used in naming
`sec_feature_names`.
callable_ {Callable[[pd.DataFrame], pd.Series]}
-- User-defined function to extract metafeatures from dataframe.
normalizable {bool}
-- Whether the generated feature should be normalized.
"""
super().__init__(method=method, normalizable=normalizable)
self._callable = callable_
self.sec_feature_names = super()._mark_nonnormalizable(
[self.method], normalizable=self.normalizable
)
| 0 | 0 | 0 |
5834ad7c7df985e3609b87d3b72c1aec6609e340 | 666 | py | Python | ind2.py | 12W300/Three | 1faf8e5379541d409978c64ca1c8facdef58552b | [
"MIT"
] | null | null | null | ind2.py | 12W300/Three | 1faf8e5379541d409978c64ca1c8facdef58552b | [
"MIT"
] | null | null | null | ind2.py | 12W300/Three | 1faf8e5379541d409978c64ca1c8facdef58552b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
if __name__ == '__main__':
A = tuple(map(int, input().split()))
print('максимальный элемент имеет номер ', A.index(max(A)))
zero_1 = zero_2 = -1
for i, item in enumerate(A):
if (item == 0) and (zero_1 != -1) and (zero_2 == -1):
zero_2 = i
if (item == 0) and (zero_1 == -1):
zero_1 = i
print('первый нулевой элемент в позиции ', zero_1, ' второй нулевой элемент в позиции ', zero_2)
mult = 1
for item in A[zero_1 + 1:zero_2]:
mult *= item
print('произведение элементов между нулевыми элементами ', mult)
| 30.272727 | 101 | 0.558559 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
if __name__ == '__main__':
A = tuple(map(int, input().split()))
print('максимальный элемент имеет номер ', A.index(max(A)))
zero_1 = zero_2 = -1
for i, item in enumerate(A):
if (item == 0) and (zero_1 != -1) and (zero_2 == -1):
zero_2 = i
if (item == 0) and (zero_1 == -1):
zero_1 = i
print('первый нулевой элемент в позиции ', zero_1, ' второй нулевой элемент в позиции ', zero_2)
mult = 1
for item in A[zero_1 + 1:zero_2]:
mult *= item
print('произведение элементов между нулевыми элементами ', mult)
| 0 | 0 | 0 |
a36534259b6d4d6eb89bb926cda2c1dd7feac4ad | 579 | py | Python | aula7/tarefa1.py | davidpvilaca/TEP | decbf61a96863d76e1b84dc097aa37b12038aa75 | [
"MIT"
] | 2 | 2017-08-28T18:24:47.000Z | 2019-08-29T03:34:15.000Z | aula7/tarefa1.py | davidpvilaca/TEP | decbf61a96863d76e1b84dc097aa37b12038aa75 | [
"MIT"
] | null | null | null | aula7/tarefa1.py | davidpvilaca/TEP | decbf61a96863d76e1b84dc097aa37b12038aa75 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 14 08:29:23 2017
@author: davidpvilaca
"""
import matplotlib.pyplot as plt
import cv2
img1 = cv2.imread('vermelho3.jpg')
img1_hsv = cv2.cvtColor(img1, cv2.COLOR_BGR2HSV)
i = img1_hsv[:,:, 0] < 30
img1_hsv[i, 0] += 30
i = img1_hsv[:,:, 0] > 150
img1_hsv[i, 0] -= 150
img_saida = cv2.cvtColor(img1_hsv, cv2.COLOR_HSV2BGR)
plt.subplot(121), plt.imshow(cv2.cvtColor(img1, cv2.COLOR_BGR2RGB))
plt.title('Original')
plt.subplot(122), plt.imshow(cv2.cvtColor(img_saida, cv2.COLOR_BGR2RGB))
plt.title('Saída')
| 21.444444 | 72 | 0.699482 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 14 08:29:23 2017
@author: davidpvilaca
"""
import matplotlib.pyplot as plt
import cv2
img1 = cv2.imread('vermelho3.jpg')
img1_hsv = cv2.cvtColor(img1, cv2.COLOR_BGR2HSV)
i = img1_hsv[:,:, 0] < 30
img1_hsv[i, 0] += 30
i = img1_hsv[:,:, 0] > 150
img1_hsv[i, 0] -= 150
img_saida = cv2.cvtColor(img1_hsv, cv2.COLOR_HSV2BGR)
plt.subplot(121), plt.imshow(cv2.cvtColor(img1, cv2.COLOR_BGR2RGB))
plt.title('Original')
plt.subplot(122), plt.imshow(cv2.cvtColor(img_saida, cv2.COLOR_BGR2RGB))
plt.title('Saída')
| 0 | 0 | 0 |
46c72939e50816fe6d9dffd8f1c48937d6b5bd57 | 51,920 | py | Python | professors/generative_professor.py | tudor-berariu/training-teachers | 44d0e754a058bc45c3529a906127e09ae30fe670 | [
"MIT"
] | 1 | 2018-10-07T15:52:32.000Z | 2018-10-07T15:52:32.000Z | professors/generative_professor.py | tudor-berariu/training-teachers | 44d0e754a058bc45c3529a906127e09ae30fe670 | [
"MIT"
] | null | null | null | professors/generative_professor.py | tudor-berariu/training-teachers | 44d0e754a058bc45c3529a906127e09ae30fe670 | [
"MIT"
] | null | null | null | from itertools import chain
from collections import OrderedDict
import os.path
from typing import Tuple
from argparse import Namespace
from tabulate import tabulate
from termcolor import colored as clr
import numpy as np
import torch
from torch import Tensor
from torch import nn
import torch.nn.functional as F
from torch import optim
from torch import autograd
from torchvision.utils import save_image
from models import Student
from models import get_model
from models import generative, classifiers
from models.classifiers import sample_classifier
from professors.professor import Professor, PostTrainProfessor
from utils import get_optimizer, nparams, grad_info
from loss_utils import cos, mse, l2
def grad_of(outputs, inputs, grad_outputs=None):
"""Call autograd.grad with create & retain graph, and ignore other
leaf variables.
"""
return autograd.grad(outputs, inputs,
grad_outputs=grad_outputs,
create_graph=True,
retain_graph=True,
only_inputs=True)
| 42.452984 | 86 | 0.518394 | from itertools import chain
from collections import OrderedDict
import os.path
from typing import Tuple
from argparse import Namespace
from tabulate import tabulate
from termcolor import colored as clr
import numpy as np
import torch
from torch import Tensor
from torch import nn
import torch.nn.functional as F
from torch import optim
from torch import autograd
from torchvision.utils import save_image
from models import Student
from models import get_model
from models import generative, classifiers
from models.classifiers import sample_classifier
from professors.professor import Professor, PostTrainProfessor
from utils import get_optimizer, nparams, grad_info
from loss_utils import cos, mse, l2
def grad_of(outputs, inputs, grad_outputs=None):
"""Call autograd.grad with create & retain graph, and ignore other
leaf variables.
"""
return autograd.grad(outputs, inputs,
grad_outputs=grad_outputs,
create_graph=True,
retain_graph=True,
only_inputs=True)
def what_to_reset(ref_acc: float, nclasses, strategy, accs, ends):
to_reset = []
start_idx, end_idx = ends
nstudents = len(accs[start_idx:end_idx])
if ref_acc > 150. / nclasses:
if strategy == "powspace":
thrs = np.power(np.linspace(np.power((150. / nclasses), np.e),
np.power(ref_acc, np.e),
nstudents),
1/np.e)
elif strategy == "linspace":
thrs = np.linspace(150. / nclasses, ref_acc, nstudents)
else:
raise ValueError("Got " + str(strategy) + " for strategy.")
thrs = thrs[1:]
balance = 0
prev_idxs = []
for thr in thrs[::-1]:
good_idxs = []
for sidx in range(start_idx, end_idx):
if sidx not in prev_idxs and accs[sidx] > thr:
good_idxs.append(sidx)
np.random.shuffle(good_idxs)
balance += len(good_idxs) - 1
if balance > 0:
to_reset.append(np.random.choice(good_idxs))
balance -= 1
while balance > 0:
good_idxs.pop()
balance -= 1
prev_idxs.extend(good_idxs)
return to_reset
class GenerativeProfessor(Professor):
def __init__(self, args: Namespace, device,
start_params=None, ds_size=None) -> None:
super(GenerativeProfessor, self).__init__("GENE-PROF", args.verbose)
self.args = args
self.nclasses = nclasses = args.nclasses
self.in_size = args.in_size
self.nrmlz = args.nrmlz
self.crt_device = device
self.start_params = start_params
self.students_per_batch = args.students_per_batch
self.ds_size = ds_size
self._init_students()
self.nstudents = nstudents = len(self.students)
self.generator_idx = 0
self.generator, self.encoder, self.discriminator = None, None, None
self.siamese = None
self.d_optimizer, self.prof_optimizer = None, None
self.old_generator = None
self.classeye = torch.eye(args.nclasses, device=device)
self.trained_on_fake = args.trained_on_fake
if args.trained_on_fake > len(self.students):
raise ValueError("Trained on fake more than existing.")
# ----------------------------------------------------------------------
self.label_to_discriminator = args.label_to_discriminator
self.permute_before_discriminator = args.permute_before_discriminator
self.contrast_from_real_data = args.contrast_from_real_data
self.siamese_detach_other = args.siamese_detach_other
self.margin = args.siamese_margin
self.ctrl_loss = args.ctrl_loss
self._create_components()
self.coeffs = coeffs = Namespace()
self.coeffs.c_nll = args.c_nll
self.coeffs.c_kl = args.c_kl
self.coeffs.c_contrast_kl = args.c_contrast_kl
self.coeffs.c_adv = args.c_adv
self.coeffs.c_siamese = args.c_siamese
self.coeffs.c_grad_mse = args.c_grad_mse
self.coeffs.c_grad_cos = args.c_grad_cos
self.coeffs.c_next_nll = args.c_next_nll
self.coeffs.c_contrast_next_nll = args.c_contrast_next_nll
self.coeffs.c_next_nll2 = args.c_next_nll2
self.coeffs.c_next_kl = args.c_next_kl
self.coeffs.c_hess = args.c_hess
self.coeffs.c_d = args.c_d
self.coeffs.c_recon = args.c_recon
self.coeffs.c_l2 = args.c_l2
self.coeffs.c_latent_kl = args.c_latent_kl
self.coeffs.next_lr = args.next_lr
self.coeffs.target_dropout = args.target_dropout
# ---------------------------------------------------------------------
#
# Let's check what needs to be computed (i.e. contrast data,
# gradients)
def check_need(lst):
return any(getattr(coeffs, n) > 0 for n in lst)
w_contrast = ["c_contrast_kl", "c_contrast_next_nll", "c_siamese"]
self.need_contrast = check_need(w_contrast)
if self.need_contrast:
self.info("Contrast data will be generated.")
w_real_grad = ["c_grad_mse", "c_grad_cos", "c_next_nll2", "c_hess"]
self.need_real_grad = check_need(w_real_grad)
w_fake_grad = ["c_grad_mse", "c_grad_cos", "c_next_nll", "c_next_kl",
"c_hess"]
self.need_fake_grad = check_need(w_fake_grad)
w_contrast_grad = ["c_contrast_next_nll"]
self.need_contrast_grad = check_need(w_contrast_grad)
self.need_some_grad = self.need_fake_grad or self.need_real_grad or \
self.need_contrast_grad
# ---------------------------------------------------------------------
self.grad_type = args.grad_type
assert self.grad_type in ["batch", "example", "class"]
if self.grad_type == "example":
self.grad_samples = args.grad_samples
else:
self.grad_samples = None
self.eval_samples = args.eval_samples
self.info(args.eval_samples, "samples will be used during teaching.")
# ---------------------------------------------------------------------
if isinstance(args.student_reset, list):
if len(args.student_reset) == nstudents:
self.student_reset = args.student_reset
else:
raise ValueError("Reset times must match no. of students.")
elif isinstance(args.student_reset, int):
self.student_reset = [args.student_reset] * nstudents
elif args.student_reset in ["linspace", "powspace", "everystep"]:
self.student_reset = args.student_reset
else:
raise ValueError("Expected int or list of ints or string. Got" +
str(args.student_reset))
# ---------------------------------------------------------------------
self.avg_fake_acc = [100.0 / nclasses] * len(self.students)
self.avg_real_acc = [100.0 / nclasses] * len(self.students)
self.max_known_real_acc = 150 / nclasses
self.last_perf = None # used during evaluation
self.info_trace = OrderedDict({})
self.nseen = 0
self.epoch = 1
self.report_freq = args.report_freq
self.last_report = 0
self.global_trace = OrderedDict({})
def _init_students(self):
in_size, nclasses = self.in_size, self.nclasses
self.students, self.student_optimizers = [], []
for _idx in range(self.args.nstudents):
if not self.args.random_students:
student = get_model(classifiers, self.args.student,
in_size=in_size,
nclasses=nclasses).to(self.crt_device)
if self.start_params:
student.load_state_dict(self.start_params)
else:
student = sample_classifier(
in_size, nclasses).to(self.crt_device)
student_optimizer = get_optimizer(student.parameters(),
self.args.student_optimizer)
self.students.append(student)
self.student_optimizers.append(student_optimizer)
self.info(nparams(student, name="Student model"))
def _create_components(self):
args = self.args
if args.generator.name == 'MemGenerator':
self.generator = get_model(generative,
args.generator,
ds_size=self.ds_size,
in_size=self.in_size,
nclasses=self.nclasses)
else:
self.generator = get_model(generative,
args.generator,
in_size=self.in_size,
nclasses=self.nclasses,
nz=args.nz,
nperf=args.nperf)
self.generator.to(self.crt_device)
self.info(nparams(self.generator,
name=f"Generator:{self.generator_idx:d}"))
all_param_sets = [self.generator.parameters()]
if hasattr(args, "encoder") and args.c_latent_kl > 0:
self.encoder = get_model(generative, args.encoder,
in_size=args.in_size, nz=args.nz)
self.encoder.to(self.crt_device)
self.info(nparams(self.encoder, name=f"Encoder"))
all_param_sets.append(self.encoder.parameters())
if hasattr(args, "siamese") and args.c_siamese > 0:
self.siamese = get_model(classifiers,
args.siamese,
in_size=args.in_size,
nclasses=32)
self.siamese.to(self.crt_device)
self.info(nparams(self.siamese, name="Siamese"))
all_param_sets.append(self.siamese.parameters())
if self.ctrl_loss:
self.ctrl_classifier = nn.Linear(32, self.nclasses)
self.ctrl_classifier.to(self.crt_device)
self.ctrl_optimizer = optim.Adam(self.ctrl_classifier.parameters(),
lr=0.001)
all_params = chain(*all_param_sets)
self.prof_optimizer = get_optimizer(all_params, args.optimizer)
if hasattr(args, "discriminator") and args.c_d > 0:
discriminator = get_model(generative, args.discriminator,
nclasses=args.nclasses,
use_labels=self.label_to_discriminator)
discriminator.to(self.crt_device)
self.discriminator = discriminator
self.bce_loss = nn.BCELoss()
self.d_optimizer = optim.Adam(discriminator.parameters(), lr=.001)
self.info(nparams(self.encoder, name=f"Discriminator"))
def to(self, device): # pylint: disable=invalid-name
self.crt_device = device
self.generator.to(device)
if self.discriminator is not None:
self.discriminator.to(device)
self.bce_loss.to(device)
self.classeye = self.classeye.to(device)
if self.encoder is not None:
self.encoder.to(device)
if self.old_generator is not None:
self.old_generator.to(device)
def eval_student(self, student: Student,
step: int,
nsamples: int = None) -> Tuple[Tensor, float]:
if step == 0:
self.last_perf = last_perf = 1 / self.nclasses
else:
last_perf = self.last_perf
if nsamples is None:
nsamples = self.eval_samples
with torch.no_grad():
data, target = self.generator(nsamples=nsamples, perf=last_perf)
output = student(data)
with torch.no_grad():
pred = output.max(1, keepdim=True)[1]
correct = pred.eq(target.view_as(pred)).sum().item()
self.last_perf = last_perf = (correct / len(data) * 100)
return F.cross_entropy(output, target), last_perf
def save_state(self, out_path: str, epoch_no: int,
data=None, target=None, data_idx=None) -> None:
generator = self.generator
encoder = self.encoder
siamese = self.siamese
discriminator = self.discriminator
# 1. save parameters
torch.save((generator.state_dict(),
None if encoder is None else encoder.state_dict(),
None if discriminator is None else discriminator.state_dict(),
None if siamese is None else siamese.state_dict()),
os.path.join(out_path, f"params_{epoch_no:04d}.th"))
torch.save(self.global_trace,
os.path.join(out_path, f"training_trace_{epoch_no:04d}.th"))
# 2. generate_some_images
with torch.no_grad():
if self.args.generator.name == 'MemGenerator':
fake_data, _ = self.generator(nsamples=64)
else:
fake_data, _ = self.generator(nsamples=64,
perf=torch.linspace(10, 90, 64))
save_image(fake_data.cpu(),
os.path.join(out_path, f"samples_{epoch_no:04d}.png"))
# 3. save some comparisons
with torch.no_grad():
mean, log_var = (None, None) if encoder is None else encoder(data)
if self.args.generator.name == 'MemGenerator':
fake_data, _target = generator(target, idx=data_idx)
else:
fake_data, _target = generator(target, mean=mean, log_var=log_var,
perf=torch.linspace(10, 90, len(data)))
all_data = torch.cat((data, fake_data), dim=0).cpu()
save_image(all_data,
os.path.join(out_path, f"recons_{epoch_no:04d}.png"))
def process(self, data, target, data_idx=None):
student_losses = []
info = OrderedDict({})
info_max = OrderedDict({})
coeffs = self.coeffs
encoder = self.encoder
generator = self.generator
siamese = self.siamese
orig_data, orig_target = data, target
# ---------------------------------------------------------------------
#
# If gradients are computed per example, we'll take a fixed
# number of samples for each student.
if self.grad_type == "example" and self.need_some_grad:
ngrad_samples = min(len(data), self.grad_samples)
nstudents = len(self.students) # type: int
nused = 0
for sidx in torch.randperm(nstudents)[:self.students_per_batch]:
nused += 1
student = self.students[sidx]
perf = self.avg_fake_acc[sidx]
# -----------------------------------------------------------------
#
# If there is an old generator, we just add some synthetic
# data for previous tasks. The batch size gets doubled.
if self.old_generator is not None:
with torch.no_grad():
prev_data, prev_target = self.old_generator(
nsamples=len(data), perf=perf)
data = torch.cat((orig_data, prev_data.detach()), dim=0)
target = torch.cat((orig_target, prev_target.detach()), dim=0)
del prev_data, prev_target
if coeffs.c_adv > 0:
self.debug("Need gradients on real data")
data.requires_grad_(True)
# -----------------------------------------------------------------
#
# Generate data for current task
#
# - fake_data: data generated by the professor
if coeffs.target_dropout > 0:
tmask = torch.bernoulli(torch.full(target.size(),
coeffs.target_dropout))
tmask = tmask.to(target.device)
else:
tmask = None
fake_kwargs = {"tmask": tmask, "perf": perf}
if encoder is not None:
mean, log_var = encoder(data)
fake_kwargs["mean"] = mean
fake_kwargs["log_var"] = log_var
if self.args.generator.name == 'MemGenerator':
fake_data, _target = generator(target, idx=data_idx)
else:
fake_data, _target = generator(target, **fake_kwargs)
if self.need_contrast:
contrast_kwargs = {"tmask": tmask, "perf": perf}
if self.contrast_from_real_data and encoder is not None:
pointers = torch.zeros_like(target)
for i in range(self.nclasses):
idxs = (target == i).nonzero()
if idxs.nelement() > 0:
pointers[idxs[:-1]] = idxs[1:]
pointers[idxs[-1]] = idxs[0]
contrast_mean = mean.index_select(0, pointers)
contrast_log_var = log_var.index_select(0, pointers)
contrast_kwargs["mean"] = contrast_mean
contrast_kwargs["log_var"] = contrast_log_var
if coeffs.c_siamese == 0 or self.siamese_detach_other:
with torch.no_grad():
contrast_data, _ = generator(target, **contrast_kwargs)
else:
contrast_data, _ = generator(target, **contrast_kwargs)
else:
contrast_data = None
if coeffs.c_siamese > 0:
similar_data, _target = generator(target, **fake_kwargs)
# -----------------------------------------------------------------
#
# We erase gradients in both generator and encoder. If any
# loss function is used, optimizer_generator will be True
# after finishing processing students.
self.prof_optimizer.zero_grad()
optimize_generator = False
# -----------------------------------------------------------------
#
# Before doing stuff with the fake data, normalize the
# posterior. This will put some gradients in the encoder.
if encoder is not None:
code = (mean, log_var)
kld, recon_loss = self._do_vae(code, data, fake_data)
if torch.is_tensor(recon_loss):
info["Reconstruction"] = recon_loss.item()
info["KL encoder"] = kld.item()
(recon_loss + kld).backward(retain_graph=True)
optimize_generator = True
del code, kld, recon_loss
# -----------------------------------------------------------------
#
# First compute the student's predictions for both real
# and fake data. Also, compute negative log likelihood
# with target.
real_output = student(data)
fake_output = student(fake_data)
real_nlls = F.cross_entropy(real_output, target, reduction="none")
fake_nlls = F.cross_entropy(fake_output, target, reduction="none")
_, real_pred = real_output.max(1)
real_acc = real_pred.eq(target).sum().item() / len(data) * 100
self.max_known_real_acc = max(real_acc, self.max_known_real_acc)
info_max["Max. Known Real Acc"] = self.max_known_real_acc
_, fake_pred = fake_output.max(1)
fake_acc = fake_pred.eq(target).sum().item() / len(data) * 100
if self.need_contrast:
if self.need_contrast_grad:
contrast_output = student(contrast_data)
contrast_nlls = F.cross_entropy(contrast_output, target,
reduction="none")
else:
with torch.no_grad():
contrast_output = student(contrast_data)
contrast_nlls = None
# -----------------------------------------------------------------
#
# Compute negative log likelihood for student. It will be
# backpropagated later as the professor' loss will put
# some values into student's gradient. So, we will first
# backpropagate the professor loss, clean the student's
# gradients and then backpropagate this nll.
if sidx < nstudents - self.trained_on_fake:
student_loss = real_nlls.mean()
else:
student_loss = fake_nlls.mean()
if coeffs.c_l2 > 0:
l2_loss = l2(student.parameters()) * coeffs.c_l2
student_loss = student_loss + l2_loss
del l2_loss
student_losses.append(student_loss.item())
# -----------------------------------------------------------------
#
# Compute gradients w.r.t. student's parameters for both
# fake, and real examples.
if self.need_some_grad:
if self.grad_type == "example":
idxs = [i for i in range(ngrad_samples)
if i % nstudents == sidx]
else:
idxs = None
aligned_grads = self._get_aligned_grads(student,
real_nlls, fake_nlls,
contrast_nlls,
target, idxs=idxs)
# -----------------------------------------------------------------
#
# Start computing losses for the professor. Accumulate them in
# professor_loss. At the end we just perform backward once for
# professor_loss.
professor_loss = 0
# -----------------------------------------------------------------
#
# Siamese network :)
if self.siamese and coeffs.c_siamese > 0:
v_fake = siamese(fake_data)
if self.siamese_detach_other:
v_similar = siamese(similar_data.detach())
v_contrast = siamese(contrast_data.detach())
else:
v_similar = siamese(similar_data)
v_contrast = siamese(contrast_data)
similar_dist = (v_fake - v_similar).pow(2).sum(dim=1)
sq_dist = (v_fake - v_contrast).pow(2).sum(dim=1).sqrt()
contrast_dist = torch.clamp(
self.margin - sq_dist, min=0).pow(2)
similar_loss = similar_dist.mean() * coeffs.c_siamese
contrast_loss = contrast_dist.mean() * coeffs.c_siamese
siamese_loss = similar_loss + contrast_loss
professor_loss += siamese_loss
if self.ctrl_loss:
self.ctrl_optimizer.zero_grad()
ctrl_loss = F.cross_entropy(
self.ctrl_classifier(v_fake.detach()), target)
ctrl_loss.backward()
self.ctrl_optimizer.step()
info["Control Classifier"] = info.get("Control Classifier", 0) +\
ctrl_loss.item()
info["Siamese"] = info.get("Siamese", 0) + siamese_loss.item()
info["Siamese - similar"] = info.get(
"Siamese - similar", 0) + similar_loss.item()
info["Siamese - contrast"] = info.get(
"Siamese - contrast", 0) + contrast_loss.item()
del v_similar, v_contrast, v_fake, similar_dist, contrast_dist
del similar_loss, contrast_loss, siamese_loss
# -----------------------------------------------------------------
#
# Mean squared errors between NLLs on real and fake
# examples.
if coeffs.c_nll > 0:
nll_mse = F.mse_loss(fake_nlls, real_nlls.detach())
nll_mse *= coeffs.c_nll
professor_loss += nll_mse
info["NLL"] = info.get("NLL", 0) + nll_mse.item()
del nll_mse
# -----------------------------------------------------------------
#
# KL divergence between fake and real outputs.
if coeffs.c_kl > 0:
fake_logp = F.log_softmax(fake_output, dim=1)
real_p = F.softmax(real_output, dim=1).detach()
kldiv = F.kl_div(fake_logp, real_p) * coeffs.c_kl
professor_loss += kldiv
info["KL div"] = info.get("KL div", 0) + kldiv.item()
if coeffs.c_contrast_kl > 0:
with torch.no_grad():
contrast_p = F.softmax(contrast_output, dim=1)
contrast_kldiv = F.kl_div(fake_logp, contrast_p)
contrast_kldiv *= coeffs.c_kl * coeffs.c_contrast_kl
professor_loss -= contrast_kldiv
info["KL div - contr"] = info.get("KL div - contr", 0) +\
contrast_kldiv.item()
del fake_logp, real_p, kldiv
# -----------------------------------------------------------------
#
#
if coeffs.c_adv > 0:
adv_loss = F.mse_loss(
grad_of(fake_nlls, fake_data,
grad_outputs=torch.ones(len(data),
device=data.device))[0],
grad_of(real_nlls, data,
grad_outputs=torch.ones(len(data),
device=data.device))[0],
reduction="sum")
adv_loss *= coeffs.c_adv
professor_loss += adv_loss
info["Adversarial"] = info.get(
"Adversarial", 0) + adv_loss.item()
del adv_loss
# -----------------------------------------------------------------
#
# Mean squared error between fake and real gradients.
if coeffs.c_grad_mse > 0:
grad_mse = 0
for real_g, fake_g, _contrast_g, _mask in aligned_grads:
grad_mse += mse(fake_g, real_g)
grad_mse *= coeffs.c_grad_mse / len(aligned_grads)
professor_loss += grad_mse
info["Grad MSE"] = info.get("Grad MSE", 0) + grad_mse.item()
del grad_mse
# -----------------------------------------------------------------
#
# Cosine distance between fake and real gradients.
if coeffs.c_grad_cos > 0:
cos_loss = 0
for real_grads, fake_grads, _contrast_g, _mask in aligned_grads:
cos_loss += cos(fake_grads, real_grads)
cos_loss *= coeffs.c_grad_cos / len(aligned_grads)
professor_loss += cos_loss
info["Grad Cos"] = info.get("Grad Cos", 0) + cos_loss.item()
del cos_loss
# -----------------------------------------------------------------
#
# Cross entropy for the student optimized with proposed
# gradients.
if coeffs.c_next_nll > 0:
next_l = self._next_nll(student, data, target, aligned_grads)
next_nll, contrast_next_nll = next_l
professor_loss += next_nll
info["Next NLL"] = info.get("Next NLL", 0) + next_nll.item()
if contrast_next_nll is not None:
professor_loss -= contrast_next_nll
info["Next NLL - contr"] = info.get("Next NLL - contr", 0) +\
contrast_next_nll.item()
del next_nll
# -----------------------------------------------------------------
#
# Cross entropy for the student optimized with proposed
# gradients. (symmetric: gradients from real data applied
# on synthetic data)
if coeffs.c_next_nll2 > 0:
next_nll2 = self._next_nll2(
student, fake_data, target, aligned_grads)
professor_loss += next_nll2
info["Next NLL (2)"] = info.get(
"Next NLL (2)", 0) + next_nll2.item()
del next_nll2
# -----------------------------------------------------------------
#
# KL divergence between outputs of displaced parameters
# and current outputs (both on real data). (Stefan's idea,
# no clue why it's important.
if coeffs.c_next_kl > 0:
next_kl = self._next_kldiv(student, real_output, data,
aligned_grads)
professor_loss += next_kl
info["Next KLdiv"] = info.get("Next KLdiv", 0) + next_kl.item()
del next_kl
# -----------------------------------------------------------------
#
# Mean Squared Error between the products of the Hessians
# with some random vector.
if coeffs.c_hess > 0:
hess_loss = self._hess_vec(student, aligned_grads)
professor_loss += hess_loss
info["Hv MSE"] = info.get("Hv MSE", 0) + hess_loss.item()
del hess_loss # Avoid bugs
# -----------------------------------------------------------------
#
# How bad is our current generator at fooling the
# discriminator that the outputs of the student given fake
# inputs result from real data.
if coeffs.c_d > 0:
d_loss, d_info = self._do_gan(real_output, fake_output, target)
professor_loss += d_loss
for key, value in d_info.items():
info[key] = info.get(key, 0) + value
del d_loss, d_info # Avoid bugs
# -----------------------------------------------------------------
#
# Backward now so we won't keep the computational graph
# through current student. First backpropagate from
# professor's loss. This will accumulate gradients in both
# generator's and encoder's parameters, but also in
# current student's parameters. Therefore we need to erase
# the student's gradients before backpropagation from its
# loss.
if torch.is_tensor(professor_loss):
if torch.isnan(professor_loss).any().item():
return True
optimize_generator = True
# professor_loss /= nstudents
professor_loss.backward(retain_graph=True)
# -- If there is some loss, improve teacher
if optimize_generator:
if self.verbose > 1:
generator_info = grad_info(generator)
print(tabulate(generator_info))
if encoder is not None:
encoder_info = grad_info(encoder)
print(tabulate(encoder_info))
self.prof_optimizer.step()
student.zero_grad()
if sidx < nstudents - self.trained_on_fake:
student_loss.backward()
else:
student_loss.backward(retain_graph=True)
self.student_optimizers[sidx].step()
del professor_loss, student_loss
# Backpropagation ended for current student.
#
# -----------------------------------------------------------------
# -----------------------------------------------------------------
#
# -- Post computation
self.avg_fake_acc[sidx] *= .75
self.avg_fake_acc[sidx] += .25 * fake_acc
self.avg_real_acc[sidx] *= .75
self.avg_real_acc[sidx] += .25 * real_acc
self.reset_students(len(orig_data))
for key, value in info.items():
self.info_trace.setdefault(key, []).append(value / nused)
for key, value in info_max.items():
[old_value] = self.info_trace.get(key, [value])
self.info_trace[key] = [max(old_value, value)]
self.nseen = self.nseen + len(orig_data)
self.report()
return False
def report(self):
if self.nseen - self.last_report >= self.report_freq:
info = OrderedDict({"epoch": self.epoch, "Step": self.nseen})
info.update(self.info_trace)
summary = []
for (key, vals) in info.items():
if isinstance(vals, list):
mean, std = np.mean(vals), np.std(vals)
summary.append((key, mean, std))
self.global_trace.setdefault(key, []).append((mean, std))
if self.args.writer is not None:
writer = self.args.writer
writer.add_scalar(f"prof/{key:s}", mean, self.nseen)
else:
summary.append((key, vals))
self.global_trace.setdefault(key, []).append(vals)
print(tabulate(summary, ["Metric", "Mean", "Std"]))
self.info_trace.clear()
self.last_report += self.report_freq
avg_fake_acc, avg_real_acc = self.avg_fake_acc, self.avg_real_acc
fake_accs = [f"{acc:5.2f}" for acc in avg_fake_acc]
real_accs = [clr(f"{acc:5.2f}", "yellow") for acc in avg_real_acc]
nreal = self.nstudents - self.trained_on_fake
if nreal > 0:
max_r_on_f_idx = np.argmax(avg_fake_acc[:nreal])
max_r_on_f = avg_fake_acc[max_r_on_f_idx]
fake_accs[max_r_on_f_idx] = clr(f"{max_r_on_f:5.2f}",
"white", "on_cyan")
max_r_on_r_idx = np.argmax(avg_real_acc[:nreal])
max_r_on_r = avg_real_acc[max_r_on_r_idx]
real_accs[max_r_on_r_idx] = clr(f"{max_r_on_r:5.2f}",
"yellow", "on_cyan")
if self.trained_on_fake > 0:
max_f_on_f_idx = np.argmax(avg_fake_acc[nreal:])
max_f_on_f = avg_fake_acc[max_f_on_f_idx + nreal]
fake_accs[max_f_on_f_idx + nreal] = clr(f"{max_f_on_f:5.2f}",
"white", "on_green")
max_f_on_r_idx = np.argmax(avg_real_acc[nreal:])
max_f_on_r = avg_real_acc[max_f_on_r_idx + nreal]
real_accs[max_f_on_r_idx + nreal] = clr(f"{max_f_on_r:5.2f}",
"yellow", "on_green")
self.info(" | ".join(fake_accs[:nreal]),
clr("|||", "yellow"),
" | ".join(fake_accs[nreal:]),
tags=["@FAKE"])
self.info(" | ".join(real_accs[:nreal]),
clr("|||", "yellow"),
" | ".join(real_accs[nreal:]),
tags=["@REAL"])
self.info("----")
def _next_kldiv(self, student, real_output, data, aligned_grads):
next_kldiv = 0
coeffs = self.coeffs
for _real_grads, fake_grads, _contrast_grads, mask in aligned_grads:
next_params = OrderedDict({})
pg_pairs = zip(student.named_parameters(), fake_grads)
for (name, param), grad in pg_pairs:
next_params[name] = param.detach() - coeffs.next_lr * grad
if mask is None:
next_output = student(data, params=next_params)
real_p = F.softmax(real_output, dim=1).detach()
else:
next_output = student(data[mask], params=next_params)
real_p = F.softmax(real_output[mask], dim=1).detach()
next_logp = F.log_softmax(next_output, dim=1)
next_kldiv += F.kl_div(next_logp, real_p)
next_kldiv *= coeffs.c_next_kl
return next_kldiv
def _hess_vec(self, student, aligned_grads):
hess_loss = 0
for real_grads, fake_grads, _contrast_grads, _mask in aligned_grads:
rand_v = [torch.bernoulli(torch.rand_like(g)) for g in real_grads]
real_hv = grad_of(real_grads, student.parameters(), rand_v)
fake_hv = grad_of(fake_grads, student.parameters(), rand_v)
hess_loss += mse(fake_hv, real_hv)
hess_loss *= self.coeffs.c_hess / len(aligned_grads)
return hess_loss
def _next_nll(self, student, data, target, aligned_grads):
next_nll = 0
coeffs = self.coeffs
do_contrast = coeffs.c_contrast_next_nll > 0
contrast_nll = 0 if do_contrast else None
for _real_grads, fake_grads, contrast_grads, mask in aligned_grads:
new_params, contrast_params = OrderedDict({}), OrderedDict({})
pg_pairs = zip(student.named_parameters(),
fake_grads, contrast_grads)
for (name, param), grad, contrast_grad in pg_pairs:
new_params[name] = param.detach() - coeffs.next_lr * grad
if do_contrast:
contrast_params[name] = param.detach() -\
coeffs.next_lr * contrast_grad
if mask is None:
next_output = student(data, params=new_params)
next_nll += F.cross_entropy(next_output, target)
if do_contrast:
with torch.no_grad():
contrast_output = student(data, params=contrast_params)
contrast_p = F.softmax(contrast_output, dim=1)
next_logp = F.log_softmax(next_output, dim=1)
contrast_nll += F.kl_div(next_logp, contrast_p)
else:
next_output = student(data[mask], params=new_params)
next_nll += F.cross_entropy(next_output, target[mask])
if do_contrast:
with torch.no_grad():
contrast_output = student(data[mask],
params=contrast_params)
contrast_p = F.softmax(contrast_output, dim=1)
next_logp = F.log_softmax(next_output, dim=1)
contrast_nll += F.kl_div(next_logp, contrast_p)
next_nll *= coeffs.c_next_nll / len(aligned_grads)
if do_contrast:
contrast_nll *= coeffs.c_next_nll * coeffs.c_contrast_next_nll
contrast_nll /= len(aligned_grads)
return next_nll, contrast_nll
def _next_nll2(self, student, fake_data, target, aligned_grads):
next_nll2 = 0
coeffs = self.coeffs
for real_grads, _fake_grads, _contrast_grads, mask in aligned_grads:
new_params = OrderedDict({})
pg_pairs = zip(student.named_parameters(), real_grads)
for (name, param), grad in pg_pairs:
new_params[name] = param.detach() - coeffs.next_lr * \
grad.detach()
if mask is None:
next_output = student(fake_data, params=new_params)
next_nll2 += F.cross_entropy(next_output, target)
else:
next_output = student(fake_data[mask], params=new_params)
next_nll2 += F.cross_entropy(next_output, target[mask])
next_nll2 *= coeffs.c_next_nll2 / len(aligned_grads)
return next_nll2
def _get_aligned_grads(self, student,
real_nlls, fake_nlls, contrast_nlls,
target, idxs=None):
aligned_grads = []
if self.grad_type == "batch":
real_g, fake_g, contrast_g = None, None, None
if self.need_real_grad:
real_g = grad_of(real_nlls.mean(), student.parameters())
if self.need_fake_grad:
fake_g = grad_of(fake_nlls.mean(), student.parameters())
if self.need_contrast_grad:
contrast_g = grad_of(contrast_nlls.mean(),
student.parameters())
for cgrad in contrast_g:
cgrad.detach_()
aligned_grads.append((real_g, fake_g, contrast_g, None))
elif self.grad_type == "class":
for class_idx in range(self.nclasses):
mask = (target == class_idx)
if mask.any().item():
real_g, fake_g, contrast_g = None, None, None
if self.need_real_grad:
real_nll_i = real_nlls[mask].mean()
real_g = grad_of(real_nll_i, student.parameters())
if self.need_fake_grad:
fake_nll_i = fake_nlls[mask].mean()
fake_g = grad_of(fake_nll_i, student.parameters())
if self.need_contrast_grad:
contrast_nll_i = contrast_nlls[mask].mean()
contrast_g = grad_of(
contrast_nll_i, student.parameters())
contrast_g = tuple([cg.detach() for cg in contrast_g])
aligned_grads.append((real_g, fake_g, contrast_g, mask))
else:
if idxs is None:
idxs = range(len(target))
for idx in idxs:
real_g, fake_g, contrast_g = None, None, None
if self.need_real_grad:
real_g = grad_of(
real_nlls[idx:idx + 1], student.parameters())
if self.need_fake_grad:
fake_g = grad_of(
fake_nlls[idx:idx + 1], student.parameters())
if self.need_contrast_grad:
contrast_g = grad_of(
contrast_nlls[idx:idx + 1], student.parameters())
contrast_g = [cg.detach() for cg in contrast_g]
aligned_grads.append(
(real_g, fake_g, contrast_g, slice(idx, idx+1)))
return aligned_grads
def _do_gan(self, real_output, fake_output, target):
label = self.classeye[target]
ones = torch.ones(target.size(), device=target.device)
ones.unsqueeze_(1)
zeros = torch.zeros(target.size(), device=target.device)
zeros.unsqueeze_(1)
# Improve discriminator
self.d_optimizer.zero_grad()
if self.permute_before_discriminator:
with torch.no_grad():
perm = torch.randperm(self.nclasses).long().to(
real_output.device)
perm_real = real_output.index_select(1, perm)
perm_fake = fake_output.index_select(1, perm)
else:
perm_real = real_output.detach()
perm_fake = fake_output.detach()
if self.label_to_discriminator:
with torch.no_grad():
d_real_in = torch.cat((perm_real, label), dim=1)
d_fake_in = torch.cat((perm_fake, label), dim=1)
else:
d_real_in = perm_real
d_fake_in = perm_fake
d_real_out = self.discriminator(d_real_in)
loss_real = self.bce_loss(d_real_out, ones)
d_fake_out = self.discriminator(d_fake_in)
loss_fake = self.bce_loss(d_fake_out, zeros)
(loss_real + loss_fake).backward()
self.d_optimizer.step()
# Improve generator
if self.label_to_discriminator:
d_gen_in = torch.cat((fake_output, label), dim=1)
else:
d_gen_in = fake_output
d_gen_out = self.discriminator(d_gen_in)
d_gen_loss = self.bce_loss(d_gen_out, ones)
# Return useful info
info = OrderedDict({})
info["Discriminator BCE - gen "] = d_gen_loss.item()
info["Discriminator BCE - real"] = loss_real.item()
info["Discriminator BCE - fake"] = loss_fake.item()
info["Discriminator avg - gen "] = d_gen_out.mean().item()
info["Discriminator avg - real"] = d_real_out.mean().item()
info["Discriminator avg - fake"] = d_fake_out.mean().item()
return d_gen_loss * self.coeffs.c_d, info
def _do_vae(self, code, data, fake_data):
mean, log_var = code
if self.coeffs.c_recon > 0:
data_mean, data_std = self.nrmlz
batch_size = len(data)
with torch.no_grad():
scaled_data = data_mean + data * data_std
scaled_data.clamp_(min=0, max=1)
recon_loss = F.binary_cross_entropy(
(fake_data.view(batch_size, -1) + 1) / 2,
scaled_data.view(batch_size, -1).detach(),
reduction='sum') * self.coeffs.c_recon
else:
recon_loss = 0
kld = -0.5 * torch.sum(1 + log_var - mean.pow(2) - log_var.exp())
kld *= self.coeffs.c_latent_kl
return kld, recon_loss
def reset_students(self, seen_samples: int):
student_reset = self.student_reset
nstudents = len(self.students)
nreal = nstudents - self.trained_on_fake
nfake = self.trained_on_fake
in_size, nclasses = self.in_size, self.nclasses
if student_reset == "everystep":
to_reset = list(range(nstudents))
elif isinstance(student_reset, str):
ref_acc = self.max_known_real_acc
to_reset_1, to_reset_2 = [], []
if nreal > 0:
to_reset_1 = what_to_reset(ref_acc, nclasses,
student_reset,
self.avg_real_acc,
(0, nreal))
if nfake > 0:
to_reset_2 = what_to_reset(ref_acc, nclasses,
student_reset,
self.avg_fake_acc,
(nreal,
nstudents))
to_reset = to_reset_1 + to_reset_2
else:
to_reset = []
for sidx, freq in zip(range(0, nstudents), student_reset):
p_reset = seen_samples / freq
if np.random.sample() < p_reset:
to_reset.append(sidx)
nreal = self.nstudents - self.trained_on_fake
max_f_on_f_idx, max_f_on_r_idx = None, None
if nreal > 0:
max_f_on_r_idx = np.argmax(self.avg_fake_acc[:nreal])
if self.trained_on_fake > 0:
max_f_on_f_idx = nreal + np.argmax(self.avg_fake_acc[nreal:])
if to_reset and student_reset != "everystep":
colored = []
for sidx, acc in enumerate(self.avg_fake_acc):
if sidx in to_reset:
clrs = ("white", "on_magenta")
elif sidx == max_f_on_r_idx:
clrs = ("white", "on_cyan")
elif sidx == max_f_on_f_idx:
clrs = ("white", "on_green")
else:
clrs = ("white",)
colored.append(clr(f"{acc:5.2f}", *clrs))
self.info(" | ".join(colored[:nreal]),
clr("|||", "yellow"),
" | ".join(colored[nreal:]),
tags=["RESET"])
for sidx in to_reset:
if self.start_params:
self.students[sidx].load_state_dict(self.start_params)
elif self.args.random_students:
self.students[sidx] = sample_classifier(in_size, nclasses)
self.students[sidx].to(self.crt_device)
else:
self.students[sidx].reset_weights()
self.student_optimizers[sidx] = get_optimizer(
self.students[sidx].parameters(),
self.args.student_optimizer)
self.avg_fake_acc[sidx] = 100. / self.nclasses
self.avg_real_acc[sidx] = 100. / self.nclasses
def end_epoch(self):
self.epoch += 1
def post_train_professor(self, old_model=None):
args = self.args
in_size = self.in_size # type: Tuple[int]
nclasses = self.nclasses
eval_samples = self.eval_samples
if old_model is None:
if self.args.generator.name == 'MemGenerator':
new_generator = get_model(generative,
args.generator,
ds_size=self.ds_size,
in_size=self.in_size,
nclasses=self.nclasses)
else:
new_generator = get_model(generative,
args.generator,
in_size=in_size,
nclasses=nclasses,
nz=args.nz,
nperf=args.nperf)
new_generator.to(self.crt_device)
new_generator.load_state_dict(self.generator.state_dict())
if self.args.generator.name == 'MemGenerator':
new_generator.part = self.generator.part
return PostTrainGenerativeProfessor(new_generator, nclasses,
eval_samples, self.args)
assert isinstance(old_model, PostTrainGenerativeProfessor)
old_model.generator.load_state_dict(self.generator.state_dict())
if self.args.generator.name == 'MemGenerator':
old_model.generator.part = self.generator.part
old_model.last_perf = 100 / nclasses
return old_model
class PostTrainGenerativeProfessor(PostTrainProfessor):
def __init__(self, generator, nclasses, eval_samples, args):
self.generator = generator
self.generator.eval()
self.nclasses = nclasses
self.eval_samples = eval_samples
self.last_perf = 100 / nclasses
self.args = args
def eval_student(self, student: Student,
step: int,
nsamples: int = None) -> Tensor:
if step == 0:
self.last_perf = last_perf = 1 / self.nclasses
else:
last_perf = self.last_perf
if nsamples is None:
nsamples = self.eval_samples
with torch.no_grad():
if self.args.generator.name == 'MemGenerator':
data, target = self.generator(nsamples=nsamples)
else:
data, target = self.generator(
nsamples=nsamples, perf=last_perf)
output = student(data)
with torch.no_grad():
pred = output.max(1, keepdim=True)[1]
correct = pred.eq(target.view_as(pred)).sum().item()
self.last_perf = last_perf = (correct / len(data) * 100)
return F.cross_entropy(output, target), last_perf
| 50,174 | 50 | 609 |
74dcaf3b4fc5c6a6b348a1d53ba3fa3b1a0c2513 | 1,450 | py | Python | example-data/3d-reconstruct.py | DiamondLightSource/rpi-config | 617f5e176c0621e3ea1b567e9586e96ba0f8b5db | [
"Apache-2.0"
] | 4 | 2016-08-23T12:13:21.000Z | 2018-08-22T12:55:55.000Z | example-data/3d-reconstruct.py | DiamondLightSource/rpi-config | 617f5e176c0621e3ea1b567e9586e96ba0f8b5db | [
"Apache-2.0"
] | null | null | null | example-data/3d-reconstruct.py | DiamondLightSource/rpi-config | 617f5e176c0621e3ea1b567e9586e96ba0f8b5db | [
"Apache-2.0"
] | 2 | 2016-09-15T19:17:30.000Z | 2018-03-06T06:34:13.000Z | import numpy as np
import h5py
import scisoftpy as dnp
from time import sleep
from math import cos, sin
print("Starting")
data = h5py.File('/dls/tmp/ssg37927/31_processed_160905_141219.nxs','r')['entry/result/data']
angles = h5py.File('/dls/tmp/ssg37927/31_processed_160905_141219.nxs','r')['entry/result/Angle']
frame = 300
dnp.plot.image(data[:,frame,:])
cor = 140
pad = 50
xs, ys = np.meshgrid(np.arange(data.shape[0]+(2*pad))-(cor+pad), np.arange(data.shape[0]+(2*pad))-(cor+pad))
dnp.plot.image(xs, name='xs')
dnp.plot.image(ys, name='ys')
result = np.zeros([data.shape[1]]+list(xs.shape))
#dnp.plot.image(result, name='result')
angles = np.deg2rad(angles)
for f in range(100,data.shape[1]):
print("F is ", f)
dnp.plot.image(data[:,f,:])
for i in range(angles.shape[0]):
angle = angles[i]
#print("Angle : ", angle)
xx = xs*cos(angle) - ys*sin(angle)
xx = xx.astype(np.int16) + (cor+pad)
xx[xx>data.shape[0]-1] = data.shape[0]-1
#yy = ys*cos(angle) + xs*sin(angle)
#dnp.plot.image(xx, name='xx')
#dnp.plot.image(yy, name='yy')
stripe = data[i,f,:][xx]
#dnp.plot.image(stripe, name='stripe')
result[f,:,:] = result[f,:,:] + stripe
dnp.plot.image(result[f,:,:], name='result')
print("Opening file")
output = h5py.File('/dls/tmp/ssg37927/mb1.h5','w')
output.create_dataset("data", data=result)
output.close()
print("Done")
| 23.387097 | 108 | 0.622759 | import numpy as np
import h5py
import scisoftpy as dnp
from time import sleep
from math import cos, sin
print("Starting")
data = h5py.File('/dls/tmp/ssg37927/31_processed_160905_141219.nxs','r')['entry/result/data']
angles = h5py.File('/dls/tmp/ssg37927/31_processed_160905_141219.nxs','r')['entry/result/Angle']
frame = 300
dnp.plot.image(data[:,frame,:])
cor = 140
pad = 50
xs, ys = np.meshgrid(np.arange(data.shape[0]+(2*pad))-(cor+pad), np.arange(data.shape[0]+(2*pad))-(cor+pad))
dnp.plot.image(xs, name='xs')
dnp.plot.image(ys, name='ys')
result = np.zeros([data.shape[1]]+list(xs.shape))
#dnp.plot.image(result, name='result')
angles = np.deg2rad(angles)
for f in range(100,data.shape[1]):
print("F is ", f)
dnp.plot.image(data[:,f,:])
for i in range(angles.shape[0]):
angle = angles[i]
#print("Angle : ", angle)
xx = xs*cos(angle) - ys*sin(angle)
xx = xx.astype(np.int16) + (cor+pad)
xx[xx>data.shape[0]-1] = data.shape[0]-1
#yy = ys*cos(angle) + xs*sin(angle)
#dnp.plot.image(xx, name='xx')
#dnp.plot.image(yy, name='yy')
stripe = data[i,f,:][xx]
#dnp.plot.image(stripe, name='stripe')
result[f,:,:] = result[f,:,:] + stripe
dnp.plot.image(result[f,:,:], name='result')
print("Opening file")
output = h5py.File('/dls/tmp/ssg37927/mb1.h5','w')
output.create_dataset("data", data=result)
output.close()
print("Done")
| 0 | 0 | 0 |
16f438ea264cd3a6cbbb277a018a017480157550 | 3,757 | py | Python | PaperwithCode/1.Co-Interactive-Transformer/load_data.py | techthiyanes/nlp-notebook | 0e5f4b75e635128d4056c89a6c65bea60c15e836 | [
"MIT"
] | 136 | 2021-04-18T12:03:55.000Z | 2022-03-31T14:58:46.000Z | PaperwithCode/1.Co-Interactive-Transformer/load_data.py | techthiyanes/nlp-notebook | 0e5f4b75e635128d4056c89a6c65bea60c15e836 | [
"MIT"
] | 3 | 2021-08-08T08:38:06.000Z | 2022-03-26T17:17:40.000Z | PaperwithCode/1.Co-Interactive-Transformer/load_data.py | techthiyanes/nlp-notebook | 0e5f4b75e635128d4056c89a6c65bea60c15e836 | [
"MIT"
] | 40 | 2021-05-18T06:55:37.000Z | 2022-03-30T00:47:12.000Z | # -*- coding: utf-8 -*-
import csv
import torch
import torch.utils.data as tud
from torch.nn.utils.rnn import pad_sequence
TRAIN_DATA_PATH = './data/data_with_slots_intent_train.csv'
DEV_DATA_PATH = './data/data_with_slots_intent_dev.csv'
SLOT_PATH = './data/slot_maping.csv'
INTENT_PATH = './data/intent_maping.csv'
BATCH_SIZE = 64
MIN_FREQ = 1
#Make char dict
char2id = {'<pad>':0, '<unk>':1}
char2freq = {}
with open(TRAIN_DATA_PATH, 'r', encoding='utf8') as rf:
r = csv.reader(rf)
for row in r:
data = row[0].split()[:-2]
for each in data:
char = each.split(':')[0]
char2freq[char] = char2freq.get(char, 0) + 1
filtered_chars = [char for char, freq in char2freq.items() if freq >= MIN_FREQ]
for ind, char in enumerate(filtered_chars, 2):
char2id[char] = ind
#Make slot dict
slot2id = {'<pad>':0}
with open(SLOT_PATH, 'r', encoding='utf8') as rf:
r = csv.reader(rf)
for ind, row in enumerate(r, 1):
slot2id[row[1]] = ind
print(slot2id)
id2slot = {}
for k, v in slot2id.items():
id2slot[v] = k
#Make intent dict
intent2id = {}
with open(INTENT_PATH, 'r', encoding='utf8') as rf:
r = csv.reader(rf)
for ind, row in enumerate(r, 0):
intent2id[row[1]] = ind
id2intent = {}
for k, v in intent2id.items():
id2intent[v] = k
def collate_fn(batch_data):
"""
DataLoader所需的collate_fun函数,将数据处理成tensor形式
Args:
batch_data: batch数据
Returns:
"""
input_ids_list, slot_ids_list, intent_id_list, mask_list = [], [], [], []
for instance in batch_data:
# 按照batch中的最大数据长度,对数据进行padding填充
input_ids_temp = instance["input_ids"]
slot_ids_temp = instance["slot_ids"]
mask_temp = instance["mask"]
# 将input_ids_temp和slot_ids_temp添加到对应的list中
input_ids_list.append(torch.tensor(input_ids_temp, dtype=torch.long))
slot_ids_list.append(torch.tensor(slot_ids_temp, dtype=torch.long))
mask_list.append(torch.tensor(mask_temp, dtype=torch.long))
intent_id_list.append(instance["intent_id"])
# 使用pad_sequence函数,会将list中所有的tensor进行长度补全,补全到一个batch数据中的最大长度,补全元素为padding_value
return {"input_ids": pad_sequence(input_ids_list, batch_first=True, padding_value=0),
"slot_ids": pad_sequence(slot_ids_list, batch_first=True, padding_value=0),
"intent_ids": torch.tensor(intent_id_list, dtype=torch.long),
"mask": pad_sequence(mask_list, batch_first=True, padding_value=0)}
traindataset = IntentDataset(TRAIN_DATA_PATH)
traindataloader = tud.DataLoader(traindataset, BATCH_SIZE, shuffle=True, collate_fn=collate_fn)
valdataset = IntentDataset(DEV_DATA_PATH)
valdataloader = tud.DataLoader(valdataset, BATCH_SIZE, shuffle=False, collate_fn=collate_fn)
| 36.475728 | 120 | 0.637211 | # -*- coding: utf-8 -*-
import csv
import torch
import torch.utils.data as tud
from torch.nn.utils.rnn import pad_sequence
TRAIN_DATA_PATH = './data/data_with_slots_intent_train.csv'
DEV_DATA_PATH = './data/data_with_slots_intent_dev.csv'
SLOT_PATH = './data/slot_maping.csv'
INTENT_PATH = './data/intent_maping.csv'
BATCH_SIZE = 64
MIN_FREQ = 1
#Make char dict
char2id = {'<pad>':0, '<unk>':1}
char2freq = {}
with open(TRAIN_DATA_PATH, 'r', encoding='utf8') as rf:
r = csv.reader(rf)
for row in r:
data = row[0].split()[:-2]
for each in data:
char = each.split(':')[0]
char2freq[char] = char2freq.get(char, 0) + 1
filtered_chars = [char for char, freq in char2freq.items() if freq >= MIN_FREQ]
for ind, char in enumerate(filtered_chars, 2):
char2id[char] = ind
#Make slot dict
slot2id = {'<pad>':0}
with open(SLOT_PATH, 'r', encoding='utf8') as rf:
r = csv.reader(rf)
for ind, row in enumerate(r, 1):
slot2id[row[1]] = ind
print(slot2id)
id2slot = {}
for k, v in slot2id.items():
id2slot[v] = k
#Make intent dict
intent2id = {}
with open(INTENT_PATH, 'r', encoding='utf8') as rf:
r = csv.reader(rf)
for ind, row in enumerate(r, 0):
intent2id[row[1]] = ind
id2intent = {}
for k, v in intent2id.items():
id2intent[v] = k
def collate_fn(batch_data):
"""
DataLoader所需的collate_fun函数,将数据处理成tensor形式
Args:
batch_data: batch数据
Returns:
"""
input_ids_list, slot_ids_list, intent_id_list, mask_list = [], [], [], []
for instance in batch_data:
# 按照batch中的最大数据长度,对数据进行padding填充
input_ids_temp = instance["input_ids"]
slot_ids_temp = instance["slot_ids"]
mask_temp = instance["mask"]
# 将input_ids_temp和slot_ids_temp添加到对应的list中
input_ids_list.append(torch.tensor(input_ids_temp, dtype=torch.long))
slot_ids_list.append(torch.tensor(slot_ids_temp, dtype=torch.long))
mask_list.append(torch.tensor(mask_temp, dtype=torch.long))
intent_id_list.append(instance["intent_id"])
# 使用pad_sequence函数,会将list中所有的tensor进行长度补全,补全到一个batch数据中的最大长度,补全元素为padding_value
return {"input_ids": pad_sequence(input_ids_list, batch_first=True, padding_value=0),
"slot_ids": pad_sequence(slot_ids_list, batch_first=True, padding_value=0),
"intent_ids": torch.tensor(intent_id_list, dtype=torch.long),
"mask": pad_sequence(mask_list, batch_first=True, padding_value=0)}
class IntentDataset(tud.Dataset):
def __init__(self, data_path):
super(IntentDataset, self).__init__()
self.data_set = []
with open (data_path, 'r', encoding='utf8') as rf:
r = csv.reader(rf)
for row in r:
data = row[0].split()
input_ids = []
slot_ids = []
intent_id = intent2id[data[-1]]
mask = [1] * (len(data) - 2)
for combo in data[:-2]:
char, slot = combo.split(':')
input_ids.append(char2id.get(char, 1))
slot_ids.append(slot2id[slot])
assert len(input_ids) == len(slot_ids)
self.data_set.append({"input_ids": input_ids, "slot_ids": slot_ids, "intent_id":intent_id, "mask":mask})
def __len__(self):
return len(self.data_set)
def __getitem__(self, idx):
return self.data_set[idx]
traindataset = IntentDataset(TRAIN_DATA_PATH)
traindataloader = tud.DataLoader(traindataset, BATCH_SIZE, shuffle=True, collate_fn=collate_fn)
valdataset = IntentDataset(DEV_DATA_PATH)
valdataloader = tud.DataLoader(valdataset, BATCH_SIZE, shuffle=False, collate_fn=collate_fn)
| 856 | 12 | 126 |
4dc63e70df28d10f5316b2903975a3cb406d4d5c | 1,222 | py | Python | data/render_data.py | ai-se/ResourcesDataDrivenSBSE | a061313f108f416035dcc6d03edc212aa4c7563e | [
"CC0-1.0"
] | 3 | 2018-09-11T07:28:18.000Z | 2020-07-07T17:25:11.000Z | data/render_data.py | ai-se/ResourcesDataDrivenSBSE | a061313f108f416035dcc6d03edc212aa4c7563e | [
"CC0-1.0"
] | 50 | 2018-01-20T20:48:24.000Z | 2018-03-05T00:29:47.000Z | data/render_data.py | ai-se/ResourcesDataDrivenSBSE | a061313f108f416035dcc6d03edc212aa4c7563e | [
"CC0-1.0"
] | null | null | null | from __future__ import print_function
from glob2 import glob
import pandas as pd
import os
from pdb import set_trace
head = """
[home](http://tiny.cc/sbse) |
[models](xx) |
[data](xx) |
[discuss](https://github.com/ai-se/ResourcesDataDrivenSBSE/issues) |
[citation](https://github.com/ai-se/ResourcesDataDrivenSBSE/blob/master/CITATION.md) |
[copyright](https://github.com/ai-se/ResourcesDataDrivenSBSE/blob/master/LICENSE.md) ©2018
<br>
[<img width=900 src="https://github.com/ai-se/ResourcesDataDrivenSBSE/raw/master/img/banner.png">](http://tiny.cc/sbse)<br>
[](https://zenodo.org/badge/latestdoi/116411075)
"""
print(head)
file = os.path.abspath("../var/data.csv")
csv = pd.read_csv(file)
columns = csv.columns
row_sep = pd.DataFrame([["---" for col in columns]], columns=columns)
csv = pd.concat([row_sep, csv])
print(csv.to_csv(path_or_buf=None, sep="|", index=False))
tail = """
## License
[](https://creativecommons.org/publicdomain/zero/1.0/)
To the extent possible under law, we waive all copyright and related or neighboring rights to this work.
"""
print(tail)
| 26.565217 | 135 | 0.728314 | from __future__ import print_function
from glob2 import glob
import pandas as pd
import os
from pdb import set_trace
head = """
[home](http://tiny.cc/sbse) |
[models](xx) |
[data](xx) |
[discuss](https://github.com/ai-se/ResourcesDataDrivenSBSE/issues) |
[citation](https://github.com/ai-se/ResourcesDataDrivenSBSE/blob/master/CITATION.md) |
[copyright](https://github.com/ai-se/ResourcesDataDrivenSBSE/blob/master/LICENSE.md) ©2018
<br>
[<img width=900 src="https://github.com/ai-se/ResourcesDataDrivenSBSE/raw/master/img/banner.png">](http://tiny.cc/sbse)<br>
[](https://zenodo.org/badge/latestdoi/116411075)
"""
print(head)
file = os.path.abspath("../var/data.csv")
csv = pd.read_csv(file)
columns = csv.columns
row_sep = pd.DataFrame([["---" for col in columns]], columns=columns)
csv = pd.concat([row_sep, csv])
print(csv.to_csv(path_or_buf=None, sep="|", index=False))
tail = """
## License
[](https://creativecommons.org/publicdomain/zero/1.0/)
To the extent possible under law, we waive all copyright and related or neighboring rights to this work.
"""
print(tail)
| 0 | 0 | 0 |
13eea81b3a271a532aa1b861edb5ff30a4588a1b | 712 | py | Python | monster_scrapper.py | Mounika1705/job_analysis | f90b7ca6dfb023ef06f894f120fde6673a1f3d8f | [
"Apache-2.0"
] | null | null | null | monster_scrapper.py | Mounika1705/job_analysis | f90b7ca6dfb023ef06f894f120fde6673a1f3d8f | [
"Apache-2.0"
] | null | null | null | monster_scrapper.py | Mounika1705/job_analysis | f90b7ca6dfb023ef06f894f120fde6673a1f3d8f | [
"Apache-2.0"
] | null | null | null | import feedparser
import csv
import json
#import pandas as pd
from urllib.request import urlopen
from bs4 import BeautifulSoup
query="engineer"
extract(query) | 20.342857 | 60 | 0.651685 | import feedparser
import csv
import json
#import pandas as pd
from urllib.request import urlopen
from bs4 import BeautifulSoup
def extract(query):
url= "http://rss.jobsearch.monster.com/rssquery.ashx?q="
query= "engineer"
final_url = url + query
url_xml = urlopen(final_url)
xml = url_xml.read()
print(xml)
url_xml.close()
soup_page = BeautifulSoup(xml,features="lxml")
list = soup_page.findAll("item")
create(list)
def create(list):
for item in list:
print("\n")
print(item.title.text)
print(item.link.text)
print(item.description.text)
print(item.pubDate.text)
print("\n")
query="engineer"
extract(query) | 498 | 0 | 46 |
066097e133b01ab3ecc03ebcc6052eaff7e3fa82 | 9,550 | py | Python | bleak/backends/dotnet/scanner.py | soldag/bleak | 7ef5d61c93cf92806ac16eca38ed565d08d8ac59 | [
"MIT"
] | null | null | null | bleak/backends/dotnet/scanner.py | soldag/bleak | 7ef5d61c93cf92806ac16eca38ed565d08d8ac59 | [
"MIT"
] | 2 | 2019-07-09T19:37:39.000Z | 2020-07-25T12:23:27.000Z | bleak/backends/dotnet/scanner.py | soldag/bleak | 7ef5d61c93cf92806ac16eca38ed565d08d8ac59 | [
"MIT"
] | 2 | 2019-07-03T20:26:30.000Z | 2020-09-05T13:01:53.000Z | import logging
import asyncio
import pathlib
import uuid
from functools import wraps
from typing import Callable, Any, Union, List
from bleak.backends.device import BLEDevice
from bleak.backends.dotnet.utils import BleakDataReader
from bleak.exc import BleakError, BleakDotNetTaskError
from bleak.backends.scanner import BaseBleakScanner
# Import of Bleak CLR->UWP Bridge. It is not needed here, but it enables loading of Windows.Devices
from BleakBridge import Bridge
from Windows.Devices.Bluetooth.Advertisement import (
BluetoothLEAdvertisementWatcher,
BluetoothLEScanningMode,
BluetoothLEAdvertisementType,
)
from Windows.Foundation import TypedEventHandler
logger = logging.getLogger(__name__)
_here = pathlib.Path(__file__).parent
class BleakScannerDotNet(BaseBleakScanner):
"""The native Windows Bleak BLE Scanner.
Implemented using `pythonnet <https://pythonnet.github.io/>`_, a package that provides an integration to
the .NET Common Language Runtime (CLR). Therefore, much of the code below has a distinct C# feel.
Keyword Args:
scanning mode (str): Set to ``Passive`` to avoid the ``Active`` scanning mode.
SignalStrengthFilter (``Windows.Devices.Bluetooth.BluetoothSignalStrengthFilter``): A
BluetoothSignalStrengthFilter object used for configuration of Bluetooth LE advertisement
filtering that uses signal strength-based filtering.
AdvertisementFilter (``Windows.Devices.Bluetooth.Advertisement.BluetoothLEAdvertisementFilter``): A
BluetoothLEAdvertisementFilter object used for configuration of Bluetooth LE advertisement
filtering that uses payload section-based filtering.
"""
async def set_scanning_filter(self, **kwargs):
"""Set a scanning filter for the BleakScanner.
Keyword Args:
SignalStrengthFilter (``Windows.Devices.Bluetooth.BluetoothSignalStrengthFilter``): A
BluetoothSignalStrengthFilter object used for configuration of Bluetooth
LE advertisement filtering that uses signal strength-based filtering.
AdvertisementFilter (Windows.Devices.Bluetooth.Advertisement.BluetoothLEAdvertisementFilter): A
BluetoothLEAdvertisementFilter object used for configuration of Bluetooth LE
advertisement filtering that uses payload section-based filtering.
"""
if "SignalStrengthFilter" in kwargs:
# TODO: Handle SignalStrengthFilter parameters
self._signal_strength_filter = kwargs["SignalStrengthFilter"]
if "AdvertisementFilter" in kwargs:
# TODO: Handle AdvertisementFilter parameters
self._advertisement_filter = kwargs["AdvertisementFilter"]
@staticmethod
def register_detection_callback(self, callback: Callable):
"""Set a function to act as Received Event Handler.
Documentation for the Event Handler:
https://docs.microsoft.com/en-us/uwp/api/windows.devices.bluetooth.advertisement.bluetoothleadvertisementwatcher.received
Args:
callback: Function accepting two arguments:
sender (``Windows.Devices.Bluetooth.AdvertisementBluetoothLEAdvertisementWatcher``) and
eventargs (``Windows.Devices.Bluetooth.Advertisement.BluetoothLEAdvertisementReceivedEventArgs``)
"""
self._callback = callback
# Windows specific
@property
def status(self) -> int:
"""Get status of the Watcher.
Returns:
Aborted 4
An error occurred during transition or scanning that stopped the watcher due to an error.
Created 0
The initial status of the watcher.
Started 1
The watcher is started.
Stopped 3
The watcher is stopped.
Stopping 2
The watcher stop command was issued.
"""
return self.watcher.Status if self.watcher else None
@classmethod
async def find_device_by_address(
cls, device_identifier: str, timeout: float = 10.0, **kwargs
) -> Union[BLEDevice, None]:
"""A convenience method for obtaining a ``BLEDevice`` object specified by Bluetooth address.
Args:
device_identifier (str): The Bluetooth address of the Bluetooth peripheral.
timeout (float): Optional timeout to wait for detection of specified peripheral
before giving up. Defaults to 10.0 seconds.
Keyword Args:
scanning mode (str): Set to ``Passive`` to avoid the ``Active`` scanning mode.
SignalStrengthFilter (``Windows.Devices.Bluetooth.BluetoothSignalStrengthFilter``): A
BluetoothSignalStrengthFilter object used for configuration of Bluetooth LE advertisement
filtering that uses signal strength-based filtering.
AdvertisementFilter (``Windows.Devices.Bluetooth.Advertisement.BluetoothLEAdvertisementFilter``): A
BluetoothLEAdvertisementFilter object used for configuration of Bluetooth LE
advertisement filtering that uses payload section-based filtering.
Returns:
The ``BLEDevice`` sought or ``None`` if not detected.
"""
ulong_id = int(device_identifier.replace(":", ""), 16)
loop = asyncio.get_event_loop()
stop_scanning_event = asyncio.Event()
scanner = cls(timeout=timeout)
return await scanner._find_device_by_address(
device_identifier, stop_scanning_event, stop_if_detected, timeout
)
| 37.747036 | 129 | 0.673927 | import logging
import asyncio
import pathlib
import uuid
from functools import wraps
from typing import Callable, Any, Union, List
from bleak.backends.device import BLEDevice
from bleak.backends.dotnet.utils import BleakDataReader
from bleak.exc import BleakError, BleakDotNetTaskError
from bleak.backends.scanner import BaseBleakScanner
# Import of Bleak CLR->UWP Bridge. It is not needed here, but it enables loading of Windows.Devices
from BleakBridge import Bridge
from Windows.Devices.Bluetooth.Advertisement import (
BluetoothLEAdvertisementWatcher,
BluetoothLEScanningMode,
BluetoothLEAdvertisementType,
)
from Windows.Foundation import TypedEventHandler
logger = logging.getLogger(__name__)
_here = pathlib.Path(__file__).parent
def _format_bdaddr(a):
return ":".join("{:02X}".format(x) for x in a.to_bytes(6, byteorder="big"))
def _format_event_args(e):
try:
return "{0}: {1}".format(
_format_bdaddr(e.BluetoothAddress), e.Advertisement.LocalName or "Unknown"
)
except Exception:
return e.BluetoothAddress
class BleakScannerDotNet(BaseBleakScanner):
"""The native Windows Bleak BLE Scanner.
Implemented using `pythonnet <https://pythonnet.github.io/>`_, a package that provides an integration to
the .NET Common Language Runtime (CLR). Therefore, much of the code below has a distinct C# feel.
Keyword Args:
scanning mode (str): Set to ``Passive`` to avoid the ``Active`` scanning mode.
SignalStrengthFilter (``Windows.Devices.Bluetooth.BluetoothSignalStrengthFilter``): A
BluetoothSignalStrengthFilter object used for configuration of Bluetooth LE advertisement
filtering that uses signal strength-based filtering.
AdvertisementFilter (``Windows.Devices.Bluetooth.Advertisement.BluetoothLEAdvertisementFilter``): A
BluetoothLEAdvertisementFilter object used for configuration of Bluetooth LE advertisement
filtering that uses payload section-based filtering.
"""
def __init__(self, **kwargs):
super(BleakScannerDotNet, self).__init__()
self.watcher = None
self._devices = {}
self._scan_responses = {}
self._callback = None
if "scanning_mode" in kwargs and kwargs["scanning_mode"].lower() == "passive":
self._scanning_mode = BluetoothLEScanningMode.Passive
else:
self._scanning_mode = BluetoothLEScanningMode.Active
self._signal_strength_filter = kwargs.get("SignalStrengthFilter", None)
self._advertisement_filter = kwargs.get("AdvertisementFilter", None)
def AdvertisementWatcher_Received(self, sender, e):
if sender == self.watcher:
logger.debug("Received {0}.".format(_format_event_args(e)))
if e.AdvertisementType == BluetoothLEAdvertisementType.ScanResponse:
if e.BluetoothAddress not in self._scan_responses:
self._scan_responses[e.BluetoothAddress] = e
else:
if e.BluetoothAddress not in self._devices:
self._devices[e.BluetoothAddress] = e
if self._callback is not None:
self._callback(sender, e)
def AdvertisementWatcher_Stopped(self, sender, e):
if sender == self.watcher:
logger.debug(
"{0} devices found. Watcher status: {1}.".format(
len(self._devices), self.watcher.Status
)
)
async def start(self):
self.watcher = BluetoothLEAdvertisementWatcher()
self.watcher.ScanningMode = self._scanning_mode
self.watcher.Received += self.AdvertisementWatcher_Received
self.watcher.Stopped += self.AdvertisementWatcher_Stopped
if self._signal_strength_filter is not None:
self.watcher.SignalStrengthFilter = self._signal_strength_filter
if self._advertisement_filter is not None:
self.watcher.AdvertisementFilter = self._advertisement_filter
self.watcher.Start()
async def stop(self):
self.watcher.Stop()
try:
self.watcher.Received -= self.AdvertisementWatcher_Received
self.watcher.Stopped -= self.AdvertisementWatcher_Stopped
except Exception as e:
logger.debug("Could not remove event handlers: {0}...".format(e))
self.watcher = None
async def set_scanning_filter(self, **kwargs):
"""Set a scanning filter for the BleakScanner.
Keyword Args:
SignalStrengthFilter (``Windows.Devices.Bluetooth.BluetoothSignalStrengthFilter``): A
BluetoothSignalStrengthFilter object used for configuration of Bluetooth
LE advertisement filtering that uses signal strength-based filtering.
AdvertisementFilter (Windows.Devices.Bluetooth.Advertisement.BluetoothLEAdvertisementFilter): A
BluetoothLEAdvertisementFilter object used for configuration of Bluetooth LE
advertisement filtering that uses payload section-based filtering.
"""
if "SignalStrengthFilter" in kwargs:
# TODO: Handle SignalStrengthFilter parameters
self._signal_strength_filter = kwargs["SignalStrengthFilter"]
if "AdvertisementFilter" in kwargs:
# TODO: Handle AdvertisementFilter parameters
self._advertisement_filter = kwargs["AdvertisementFilter"]
async def get_discovered_devices(self) -> List[BLEDevice]:
found = []
for event_args in list(self._devices.values()):
new_device = self.parse_eventargs(event_args)
if (
not new_device.name
and event_args.BluetoothAddress in self._scan_responses
):
new_device.name = self._scan_responses[
event_args.BluetoothAddress
].Advertisement.LocalName
found.append(new_device)
return found
@staticmethod
def parse_eventargs(event_args):
bdaddr = _format_bdaddr(event_args.BluetoothAddress)
uuids = []
for u in event_args.Advertisement.ServiceUuids:
uuids.append(u.ToString())
data = {}
for m in event_args.Advertisement.ManufacturerData:
with BleakDataReader(m.Data) as reader:
data[m.CompanyId] = reader.read()
local_name = event_args.Advertisement.LocalName
return BLEDevice(
bdaddr, local_name, event_args, uuids=uuids, manufacturer_data=data
)
def register_detection_callback(self, callback: Callable):
"""Set a function to act as Received Event Handler.
Documentation for the Event Handler:
https://docs.microsoft.com/en-us/uwp/api/windows.devices.bluetooth.advertisement.bluetoothleadvertisementwatcher.received
Args:
callback: Function accepting two arguments:
sender (``Windows.Devices.Bluetooth.AdvertisementBluetoothLEAdvertisementWatcher``) and
eventargs (``Windows.Devices.Bluetooth.Advertisement.BluetoothLEAdvertisementReceivedEventArgs``)
"""
self._callback = callback
# Windows specific
@property
def status(self) -> int:
"""Get status of the Watcher.
Returns:
Aborted 4
An error occurred during transition or scanning that stopped the watcher due to an error.
Created 0
The initial status of the watcher.
Started 1
The watcher is started.
Stopped 3
The watcher is stopped.
Stopping 2
The watcher stop command was issued.
"""
return self.watcher.Status if self.watcher else None
@classmethod
async def find_device_by_address(
cls, device_identifier: str, timeout: float = 10.0, **kwargs
) -> Union[BLEDevice, None]:
"""A convenience method for obtaining a ``BLEDevice`` object specified by Bluetooth address.
Args:
device_identifier (str): The Bluetooth address of the Bluetooth peripheral.
timeout (float): Optional timeout to wait for detection of specified peripheral
before giving up. Defaults to 10.0 seconds.
Keyword Args:
scanning mode (str): Set to ``Passive`` to avoid the ``Active`` scanning mode.
SignalStrengthFilter (``Windows.Devices.Bluetooth.BluetoothSignalStrengthFilter``): A
BluetoothSignalStrengthFilter object used for configuration of Bluetooth LE advertisement
filtering that uses signal strength-based filtering.
AdvertisementFilter (``Windows.Devices.Bluetooth.Advertisement.BluetoothLEAdvertisementFilter``): A
BluetoothLEAdvertisementFilter object used for configuration of Bluetooth LE
advertisement filtering that uses payload section-based filtering.
Returns:
The ``BLEDevice`` sought or ``None`` if not detected.
"""
ulong_id = int(device_identifier.replace(":", ""), 16)
loop = asyncio.get_event_loop()
stop_scanning_event = asyncio.Event()
scanner = cls(timeout=timeout)
def stop_if_detected(sender, event_args):
if event_args.BluetoothAddress == ulong_id:
loop.call_soon_threadsafe(stop_scanning_event.set)
return await scanner._find_device_by_address(
device_identifier, stop_scanning_event, stop_if_detected, timeout
)
| 3,716 | 0 | 265 |
b4c3955dd508cc1e9a27ca02d24fb2d7945bcf45 | 467 | py | Python | src/super_gradients/training/datasets/detection_datasets/coco_detection.py | Deci-AI/super-gradients | 658f638389654668a085e23c3b19622241fd9267 | [
"Apache-2.0"
] | 308 | 2021-12-30T10:14:30.000Z | 2022-03-30T19:05:31.000Z | src/super_gradients/training/datasets/detection_datasets/coco_detection.py | karndeepsingh/super-gradients | bfed440ecaf485af183570bf965eb5b74cb9f832 | [
"Apache-2.0"
] | 24 | 2022-01-10T08:05:37.000Z | 2022-03-30T18:49:06.000Z | src/super_gradients/training/datasets/detection_datasets/coco_detection.py | karndeepsingh/super-gradients | bfed440ecaf485af183570bf965eb5b74cb9f832 | [
"Apache-2.0"
] | 26 | 2021-12-31T06:04:07.000Z | 2022-03-21T09:51:44.000Z | from super_gradients.training.datasets.detection_datasets.detection_dataset import DetectionDataSet
from super_gradients.training.datasets.datasets_conf import COCO_DETECTION_CLASSES_LIST
class COCODetectionDataSet(DetectionDataSet):
"""
COCODetectionDataSet - Detection Data Set Class COCO Data Set
"""
| 35.923077 | 99 | 0.777302 | from super_gradients.training.datasets.detection_datasets.detection_dataset import DetectionDataSet
from super_gradients.training.datasets.datasets_conf import COCO_DETECTION_CLASSES_LIST
class COCODetectionDataSet(DetectionDataSet):
"""
COCODetectionDataSet - Detection Data Set Class COCO Data Set
"""
def __init__(self, *args, **kwargs):
kwargs['all_classes_list'] = COCO_DETECTION_CLASSES_LIST
super().__init__(*args, **kwargs)
| 122 | 0 | 27 |
f00bbaa6bf9d748d229a232f4dfbb02cf2de0cca | 2,904 | py | Python | contract_tests.py | LukasForst/SWA-TP-user-service-contract-tests | 7f08be64b23ff63f2b647c811226097619a363db | [
"MIT"
] | null | null | null | contract_tests.py | LukasForst/SWA-TP-user-service-contract-tests | 7f08be64b23ff63f2b647c811226097619a363db | [
"MIT"
] | null | null | null | contract_tests.py | LukasForst/SWA-TP-user-service-contract-tests | 7f08be64b23ff63f2b647c811226097619a363db | [
"MIT"
] | null | null | null | import unittest
import uuid
import requests
SERVICE_URL = 'http://localhost:8080'
if __name__ == '__main__':
unittest.main()
| 38.210526 | 113 | 0.625689 | import unittest
import uuid
import requests
SERVICE_URL = 'http://localhost:8080'
class MyTestCase(unittest.TestCase):
def test_status_returns_ok(self):
r = requests.get(f'{SERVICE_URL}/status')
self.assertEqual('OK', r.json()['status'])
def test_version_returns_some_version(self):
r = requests.get(f'{SERVICE_URL}/version')
self.assertEqual(200, r.status_code)
json = r.json()
self.assertIsNotNone(json['version'])
def test_get_without_bearer_returns_401(self):
r = requests.get(f'{SERVICE_URL}/user')
self.assertEqual(401, r.status_code)
def test_login_without_correct_user_returns_401(self):
auth = {'email': 'none', 'password': 'nope'}
r = requests.post(f'{SERVICE_URL}/user/login', json=auth)
self.assertEqual(401, r.status_code)
def test_registration_unique_user(self):
user = {'first_name': 'John', 'last_name': 'Snow', 'email': f'JohnSnow-{str(uuid.uuid4())}@castle.black',
'password': 'Wind1'}
r = requests.put(f'{SERVICE_URL}/user/register', json=user)
self.assertEqual(200, r.status_code)
j = r.json()
self.assertIsNotNone(j['auth_token'])
self.assertEqual('Successfully registered.', j['message'])
self.assertEqual('success', j['status'])
def test_registration_duplicated_user(self):
user = {'first_name': 'John', 'last_name': 'Snow', 'email': f'JohnSnow-{str(uuid.uuid4())}@castle.black',
'password': 'Wind1'}
r = requests.put(f'{SERVICE_URL}/user/register', json=user)
self.assertEqual(200, r.status_code)
r = requests.put(f'{SERVICE_URL}/user/register', json=user)
self.assertEqual(409, r.status_code)
def test_login_self(self):
user = {'first_name': 'John', 'last_name': 'Snow', 'email': f'JohnSnow-{str(uuid.uuid4())}@castle.black',
'password': 'Wind1'}
r = requests.put(f'{SERVICE_URL}/user/register', json=user)
self.assertEqual(200, r.status_code)
auth = {'email': user['email'], 'password': user['password']}
r = requests.post(f'{SERVICE_URL}/user/login', json=auth)
self.assertEqual(200, r.status_code)
self.assertIsNotNone(r.json()['auth_token'])
def test_get_self(self):
user = {'first_name': 'John', 'last_name': 'Snow', 'email': f'JohnSnow-{str(uuid.uuid4())}@castle.black',
'password': 'Wind1'}
r = requests.put(f'{SERVICE_URL}/user/register', json=user)
self.assertEqual(200, r.status_code)
token = r.json()['auth_token']
r = requests.get(f'{SERVICE_URL}/user', headers={"Authorization": f"Bearer {token}"})
self.assertEqual(200, r.status_code)
j = r.json()
self.assertEqual(user['email'], j['data']['email'])
if __name__ == '__main__':
unittest.main()
| 2,516 | 15 | 239 |
e59843bb2a662d5fa0cc55c5911cde4a95adf697 | 674 | py | Python | src/user/user_controller.py | andresposada/mongoconnections-dependency-injections | 77b19229e6a867584051592632b7624d1dd3c7ad | [
"Apache-2.0"
] | null | null | null | src/user/user_controller.py | andresposada/mongoconnections-dependency-injections | 77b19229e6a867584051592632b7624d1dd3c7ad | [
"Apache-2.0"
] | null | null | null | src/user/user_controller.py | andresposada/mongoconnections-dependency-injections | 77b19229e6a867584051592632b7624d1dd3c7ad | [
"Apache-2.0"
] | null | null | null | import json
from src.user.use_cases import UserUseCase
from src.user.schemas import UserSchema
from abc import ABC, abstractmethod
from src.containers import Services
| 22.466667 | 47 | 0.718101 | import json
from src.user.use_cases import UserUseCase
from src.user.schemas import UserSchema
from abc import ABC, abstractmethod
from src.containers import Services
class UserControllerBase(ABC):
@abstractmethod
def execute(self, **kwargs):
pass
class UserControllerPost(UserControllerBase):
def execute(self, **kwargs):
user_service = Services.user_service
user_service.create_user()
class UserController:
@staticmethod
def post(event, context):
user_json = json.loads(event['json'])
user_schema = UserSchema()
user_data = user_schema.load(user_json)
UserUseCase.create_user(user_data)
| 283 | 125 | 96 |
46cb05635ad337e6167d8763bcaa6919b36bc980 | 2,539 | py | Python | server/app/service/motion.py | raspihats/EvePnP | 2961d759554c38609f0f431c012ea8d38016f347 | [
"MIT"
] | null | null | null | server/app/service/motion.py | raspihats/EvePnP | 2961d759554c38609f0f431c012ea8d38016f347 | [
"MIT"
] | 5 | 2020-07-07T19:41:23.000Z | 2022-02-12T05:42:50.000Z | server/app/service/motion.py | raspihats/EvePnP | 2961d759554c38609f0f431c012ea8d38016f347 | [
"MIT"
] | null | null | null | from ..dao import axis_dao
from .controllers import controllers_service
motion_service = MotionService()
| 36.271429 | 89 | 0.579756 | from ..dao import axis_dao
from .controllers import controllers_service
class MotionService(object):
def home(self):
controllers_service.motion_controller.home()
self.park([{'id': 'z'}])
self.park([{'id': 'x'}, {'id': 'y'}])
self.park([{'id': 'a'}, {'id': 'b'}])
def park(self, axis_list, speed_factor=1):
# add park position to axis in axis_list and determine minimum feed_rate
min_feed_rate = None
positions = {}
for axis_config in axis_dao.get_list():
for axis in axis_list:
if axis['id'] == axis_config['id']:
positions[axis['id']] = axis_config['park']
if min_feed_rate is None or min_feed_rate > axis_config['feed_rate']:
min_feed_rate = axis_config['feed_rate']
controllers_service.motion_controller.move(
positions, min_feed_rate * speed_factor)
def jog(self, axis, speed_factor=1):
feed_rate = None
for axis_config in axis_dao.get_list():
if axis['id'] == axis_config['id']:
feed_rate = axis_config['feed_rate']
controllers_service.motion_controller.jog(
axis['id'], axis['step'], feed_rate * speed_factor)
def move(self, axis_list, speed_factor=1):
min_feed_rate = None
positions = {}
for axis_config in axis_dao.get_list():
for axis in axis_list:
if axis['id'] == axis_config['id']:
positions[axis['id']] = axis['position']
if min_feed_rate is None or min_feed_rate > axis_config['feed_rate']:
min_feed_rate = axis_config['feed_rate']
controllers_service.motion_controller.move(
positions, min_feed_rate * speed_factor)
def move_safe(self, axis_list, speed_factor=1):
self.park([{'id': 'z'}])
z_axis = None
for axis in axis_list:
if axis['id'] == 'z':
z_axis = axis
break
axis_list.remove(z_axis)
self.move(axis_list, speed_factor)
self.move([z_axis], speed_factor)
def get_position(self):
position = controllers_service.motion_controller.position
response = []
for axis_config in axis_dao.get_list():
_id = axis_config['id']
if _id in position:
response.append({'id': _id, 'position': position[_id]})
return response
motion_service = MotionService()
| 2,239 | 7 | 185 |
dc2728c07d84b61179c743daba2813a4ab10792d | 2,029 | py | Python | tests/int_return_menu_test.py | IXIXIXIXIXIXIXIXIX/pyMenus | 843abe73e3c19492cfdc62365bc2a4e6c5066050 | [
"MIT"
] | null | null | null | tests/int_return_menu_test.py | IXIXIXIXIXIXIXIXIX/pyMenus | 843abe73e3c19492cfdc62365bc2a4e6c5066050 | [
"MIT"
] | null | null | null | tests/int_return_menu_test.py | IXIXIXIXIXIXIXIXIX/pyMenus | 843abe73e3c19492cfdc62365bc2a4e6c5066050 | [
"MIT"
] | null | null | null | import unittest
from py_CLI_menus.int_return_menu import IntReturnMenu
| 32.725806 | 120 | 0.631345 | import unittest
from py_CLI_menus.int_return_menu import IntReturnMenu
class TestIntReturnMenu(unittest.TestCase):
def setUp(self):
self.list_of_options_1 = [
"First Option - choose this",
"Second Option",
"Third Option",
"Fourth Option"
]
self.list_of_options_2 = [
"First Option",
"Second Option - choose this",
"Third Option",
"Fourth Option"
]
self.list_of_options_3 = [
"First Option",
"Second Option",
"Third Option",
"Fourth Option - choose this one"
]
self.list_of_options_4 = [
"CHOOSE Q",
"CHOOSE Q",
"CHOOSE Q",
"CHOOSE Q"
]
self.menu_header = "Select one of the following options: "
self.menu_header_2 = "Choose Q this time: "
def test_choose_first_option(self):
string_menu = IntReturnMenu(self.list_of_options_1, self.menu_header)
self.assertEqual(0, string_menu.choose())
def test_choose_middle_option(self):
string_menu = IntReturnMenu(self.list_of_options_2, self.menu_header)
self.assertEqual(1, string_menu.choose())
def test_choose_last_option(self):
string_menu = IntReturnMenu(self.list_of_options_3, self.menu_header)
self.assertEqual(3, string_menu.choose())
def test_choose_q(self):
string_menu = IntReturnMenu(self.list_of_options_4, self.menu_header_2)
self.assertEqual(None, string_menu.choose())
def test_append_option(self):
string_menu = IntReturnMenu(self.list_of_options_1, self.menu_header)
self.list_of_options_1.append("New fifth option")
self.assertEqual(5, len(string_menu.object_list))
def test_confirm_loop(self):
string_menu = IntReturnMenu(self.list_of_options_1, "Check y, n and other inputs to confirm work as expected: ")
self.assertEqual(0, string_menu.choose(True))
| 1,724 | 22 | 212 |
37a912fcaa51bf417936b1e79aa45ed24ecd9252 | 563 | py | Python | backend/smartMoney_project/smartMoney_app/migrations/0013_expense_category.py | uca-pid/f-al-cuadrado-2021 | 2cd203f5a998f3b4f54811a382458ce5b6762a2a | [
"MIT"
] | null | null | null | backend/smartMoney_project/smartMoney_app/migrations/0013_expense_category.py | uca-pid/f-al-cuadrado-2021 | 2cd203f5a998f3b4f54811a382458ce5b6762a2a | [
"MIT"
] | 13 | 2021-09-08T21:05:49.000Z | 2021-12-16T04:58:51.000Z | backend/smartMoney_project/smartMoney_app/migrations/0013_expense_category.py | uca-pid/f-al-cuadrado-2021 | 2cd203f5a998f3b4f54811a382458ce5b6762a2a | [
"MIT"
] | null | null | null | # Generated by Django 3.2.4 on 2021-09-19 22:33
from django.db import migrations, models
import django.db.models.deletion
import smartMoney_app.models
| 26.809524 | 157 | 0.68206 | # Generated by Django 3.2.4 on 2021-09-19 22:33
from django.db import migrations, models
import django.db.models.deletion
import smartMoney_app.models
class Migration(migrations.Migration):
dependencies = [
('smartMoney_app', '0012_auto_20210919_2119'),
]
operations = [
migrations.AddField(
model_name='expense',
name='category',
field=models.ForeignKey(default=smartMoney_app.models.Category.other, on_delete=django.db.models.deletion.CASCADE, to='smartMoney_app.category'),
),
]
| 0 | 387 | 23 |
94e1a55855eb3fba74bcfbc988c3961ba5b60fc7 | 2,321 | py | Python | udapi/mongodb/databases.py | FOSS-X/udapi-core-v2 | 0e0cceb7130389d01bc4e674ad9a1f38fb556837 | [
"MIT"
] | null | null | null | udapi/mongodb/databases.py | FOSS-X/udapi-core-v2 | 0e0cceb7130389d01bc4e674ad9a1f38fb556837 | [
"MIT"
] | null | null | null | udapi/mongodb/databases.py | FOSS-X/udapi-core-v2 | 0e0cceb7130389d01bc4e674ad9a1f38fb556837 | [
"MIT"
] | null | null | null | #
# databases.py
# Start of dataabases.py
#
# Created by FOSS-X UDAPI Desgin Team on 7/05/20.
# Copyright © 2020 FOSS-X. All rights reserved.
#
from flask import Flask, jsonify, request,Blueprint
from ..util_mongodb import *
from ..util import *
from ..util_mysql import *
import pymongo
mod = Blueprint('databasesMongodb', __name__)
client = pymongo.MongoClient()
@mod.route('/databases', methods=['GET'])
@token_required
def get_mysql_db(username):
""" List all the databases of databaseType = mongodb """
databaseType = 'mongodb'
try:
cnx = connectSQLServerDB('root', 'password', 'udapiDB')
mycursor = cnx.cursor()
sql = "SELECT * FROM udapiDB.configs WHERE (username='" + username + "') AND (databaseType='" + databaseType + "');"
mycursor.execute(sql)
entities = mycursor.fetchall()
attributes = [desc[0] for desc in mycursor.description]
fieldType = [FieldType.get_info(desc[1]) for desc in mycursor.description] # Debug code
results = []
for entity in entities:
results.append(entity[1])
cnx.close()
return jsonify(success=1, mongodb=results)
except mysql.connector.Error as err:
return jsonify(success=0, error_code=err.errno, message=err.msg)
@mod.route('/databases', methods=['POST'])
@token_required
@mod.route('/databases/<databaseName>', methods=['DELETE'])
@token_required
| 35.707692 | 124 | 0.691081 | #
# databases.py
# Start of dataabases.py
#
# Created by FOSS-X UDAPI Desgin Team on 7/05/20.
# Copyright © 2020 FOSS-X. All rights reserved.
#
from flask import Flask, jsonify, request,Blueprint
from ..util_mongodb import *
from ..util import *
from ..util_mysql import *
import pymongo
mod = Blueprint('databasesMongodb', __name__)
client = pymongo.MongoClient()
@mod.route('/databases', methods=['GET'])
@token_required
def get_mysql_db(username):
""" List all the databases of databaseType = mongodb """
databaseType = 'mongodb'
try:
cnx = connectSQLServerDB('root', 'password', 'udapiDB')
mycursor = cnx.cursor()
sql = "SELECT * FROM udapiDB.configs WHERE (username='" + username + "') AND (databaseType='" + databaseType + "');"
mycursor.execute(sql)
entities = mycursor.fetchall()
attributes = [desc[0] for desc in mycursor.description]
fieldType = [FieldType.get_info(desc[1]) for desc in mycursor.description] # Debug code
results = []
for entity in entities:
results.append(entity[1])
cnx.close()
return jsonify(success=1, mongodb=results)
except mysql.connector.Error as err:
return jsonify(success=0, error_code=err.errno, message=err.msg)
@mod.route('/databases', methods=['POST'])
@token_required
def createDatabase(username):
configData = request.get_json()
databaseName = configData['databaseName']
processedDBName = getDBName(username, databaseName)
if dbExists(databaseName, processedDBName):
raise duplicateResource(
f"Database '{databaseName}' already exists.")
client[processedDBName]
addToConfig(username,databaseName,processedDBName)
return jsonify({'code': 200, 'message': f"Database '{databaseName}' created successfully", "success": 1})
@mod.route('/databases/<databaseName>', methods=['DELETE'])
@token_required
def deleteDB(username,databaseName):
storedDB=getDBName(username,databaseName)
if not dbExists(databaseName, storedDB):
raise notFound(f"Unknown database '{databaseName}'.")
removeFromConfig(username,databaseName, storedDB)
client.drop_database(storedDB)
return jsonify({'code': 200, 'message': f"Database {databaseName} deleted successfully", "success": 1})
| 843 | 0 | 44 |
56483d92d184dffd6f7b6f11cd36ff2344b63545 | 550 | py | Python | relaax/server/common/saver/fs_saver.py | j0k/relaax | dff865facc2932e4f8317d6ab4ad32a1f218e7b6 | [
"MIT"
] | 4 | 2018-07-31T06:32:30.000Z | 2021-05-02T20:21:37.000Z | relaax/server/common/saver/fs_saver.py | bohblue2/relaax | 0a7ed8f2a21e37ca047e16d216d164527c1fffdd | [
"MIT"
] | null | null | null | relaax/server/common/saver/fs_saver.py | bohblue2/relaax | 0a7ed8f2a21e37ca047e16d216d164527c1fffdd | [
"MIT"
] | null | null | null | from __future__ import print_function
import os
import tensorflow
import saver
| 22.916667 | 52 | 0.667273 | from __future__ import print_function
import os
import tensorflow
import saver
class FsSaver(saver.Saver):
def __init__(self, dir):
super(FsSaver, self).__init__()
self._dir = dir
def restore_latest_checkpoint(self, session):
return self._restore(self._dir, session)
def save_checkpoint(self, session, global_step):
if not os.path.exists(self._dir):
os.makedirs(self._dir)
self._save(self._dir, session, global_step)
def location(self):
return "'%s' dir" % self._dir
| 332 | 6 | 130 |
062502d51ba25c33ec0f0bba62d7be2e338d4bb9 | 1,762 | py | Python | syn/tagmathon/b/tests/test_interpreter.py | mbodenhamer/syn | aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258 | [
"MIT"
] | 1 | 2021-07-15T08:55:12.000Z | 2021-07-15T08:55:12.000Z | syn/tagmathon/b/tests/test_interpreter.py | mbodenhamer/syn | aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258 | [
"MIT"
] | 7 | 2021-01-07T23:51:57.000Z | 2021-12-13T19:50:57.000Z | syn/tagmathon/b/tests/test_interpreter.py | mbodenhamer/syn | aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258 | [
"MIT"
] | 2 | 2016-07-11T08:46:31.000Z | 2017-12-13T13:30:51.000Z | from nose.tools import assert_raises
from syn.tagmathon.b import Frame, Env, eval
#-------------------------------------------------------------------------------
# Frame
#-------------------------------------------------------------------------------
# Env
#-------------------------------------------------------------------------------
# eval
#-------------------------------------------------------------------------------
if __name__ == '__main__': # pragma: no cover
from syn.base_utils import run_all_tests
run_all_tests(globals(), verbose=True, print_errors=False)
| 21.753086 | 80 | 0.420545 | from nose.tools import assert_raises
from syn.tagmathon.b import Frame, Env, eval
#-------------------------------------------------------------------------------
# Frame
def test_frame():
f = Frame()
f['a'] = 1
f['b'] = 2
assert f.globals == {}
assert f.locals == dict(a=1, b=2)
f.set_global('a', 3)
f.set_global('c', 4)
assert f['a'] == 1
assert f['b'] == 2
assert f['c'] == 4
assert len(f) == 3
assert list(f) == ['a', 'b', 'c']
del f['a']
assert f['a'] == 3
del f['a']
assert_raises(KeyError, f.__getitem__, 'a')
assert_raises(KeyError, f.__delitem__, 'a')
f.update(dict(a=1))
assert f.locals == dict(a=1, b=2)
f.gensym()
#-------------------------------------------------------------------------------
# Env
def test_env():
e = Env()
e['a'] = 1
e['b'] = 2
assert dict(e.items()) == dict(a=1, b=2)
e.push({})
e['a'] = 3
e['c'] = 4
assert dict(e.items()) == dict(a=3, b=2, c=4)
e.pop()
assert dict(e.items()) == dict(a=1, b=2)
e.update(dict(a=3, c=4))
assert dict(e.items()) == dict(a=3, b=2, c=4)
del e['c']
assert dict(e.items()) == dict(a=3, b=2)
assert len(e) == 2
assert list(e) == ['a', 'b']
e.set_global('a', 4)
assert e['a'] == 3
del e['a']
assert e['a'] == 4
e.gensym()
#-------------------------------------------------------------------------------
# eval
def test_eval():
assert eval(1) == 1
assert eval(()) == []
#-------------------------------------------------------------------------------
if __name__ == '__main__': # pragma: no cover
from syn.base_utils import run_all_tests
run_all_tests(globals(), verbose=True, print_errors=False)
| 1,107 | 0 | 69 |
23022abdf1b502146d3781d4836f78943a7ec6e6 | 2,186 | py | Python | kvmagent/kvmagent/test/test_vm_plugin_update_hostIommu_status.py | qianfei11/zstack-utility | e791bc6b6ae3a74e202f6fce84bde498c715aee8 | [
"Apache-2.0"
] | 55 | 2017-02-10T07:55:21.000Z | 2021-09-01T00:59:36.000Z | kvmagent/kvmagent/test/test_vm_plugin_update_hostIommu_status.py | qianfei11/zstack-utility | e791bc6b6ae3a74e202f6fce84bde498c715aee8 | [
"Apache-2.0"
] | 106 | 2017-02-13T09:58:27.000Z | 2022-02-15T09:51:48.000Z | kvmagent/kvmagent/test/test_vm_plugin_update_hostIommu_status.py | qianfei11/zstack-utility | e791bc6b6ae3a74e202f6fce84bde498c715aee8 | [
"Apache-2.0"
] | 68 | 2017-02-13T11:02:01.000Z | 2021-12-16T11:02:01.000Z | '''
@author: kaicai.hu
'''
import unittest
import tempfile
from kvmagent import kvmagent
from kvmagent.plugins import vm_plugin
from zstacklib.utils import bash
if __name__ == "__main__":
unittest.main() | 46.510638 | 237 | 0.639067 | '''
@author: kaicai.hu
'''
import unittest
import tempfile
from kvmagent import kvmagent
from kvmagent.plugins import vm_plugin
from zstacklib.utils import bash
class Test(unittest.TestCase):
def testName(self):
temp = tempfile.NamedTemporaryFile(prefix='grub', suffix='', dir='/tmp', mode='w+b', delete=True)
try:
temp.write('GRUB_CMDLINE_LINUX="crashkernel=auto rd.lvm.lv=zstack/root rd.lvm.lv=zstack/swap rhgb quiet d=d intel_iommu=on modprobe.blacklist=snd_hda_intel,amd76x_edac,vga16fb,nouveau,rivafb,nvidiafb,rivatv,amdgpu,radeon"\n')
temp.seek(0)
path = temp.name
updateConfigration = vm_plugin.UpdateConfigration()
updateConfigration.path = path
updateConfigration.enableIommu = False
success, error = updateConfigration.updateHostIommu()
self.assertTrue(success)
r_on, o_on, e_on = bash.bash_roe("grep -E 'intel_iommu(\ )*=(\ )*on' %s" % path)
r_off, o_off, e_off = bash.bash_roe("grep -E 'intel_iommu(\ )*=(\ )*off' %s" % path)
r_modprobe_blacklist, o_modprobe_blacklist, e_modprobe_blacklist = bash.bash_roe("grep -E 'modprobe.blacklist(\ )*=' %s" % path)
self.assertNotEqual(r_on, 0)
self.assertNotEqual(r_off, 0)
self.assertNotEqual(r_modprobe_blacklist, 0)
updateConfigration = vm_plugin.UpdateConfigration()
updateConfigration.path = path
updateConfigration.enableIommu = True
success, error = updateConfigration.updateHostIommu()
self.assertTrue(success)
r_on, o_on, e_on = bash.bash_roe("grep -E 'intel_iommu(\ )*=(\ )*on' %s" % path)
r_off, o_off, e_off = bash.bash_roe("grep -E 'intel_iommu(\ )*=(\ )*off' %s" % path)
r_modprobe_blacklist, o_modprobe_blacklist, e_modprobe_blacklist = bash.bash_roe("grep -E 'modprobe.blacklist(\ )*=' %s" % path)
self.assertEqual(r_on, 0)
self.assertNotEqual(r_off, 0)
self.assertEqual(r_modprobe_blacklist, 0)
finally:
temp.close()
if __name__ == "__main__":
unittest.main() | 1,911 | 9 | 56 |
ea8f8a167839e9e37d789b4d71ccdca1c2f23656 | 2,816 | py | Python | math/functions/contrast/contrast.py | timi-liuliang/auto | 2562fd6c41b05e6db453bc0346ddcb4a0a9281a1 | [
"MIT"
] | 3 | 2022-02-07T09:45:59.000Z | 2022-02-08T08:48:43.000Z | math/functions/contrast/contrast.py | timi-liuliang/auto | 2562fd6c41b05e6db453bc0346ddcb4a0a9281a1 | [
"MIT"
] | 13 | 2021-08-06T02:20:30.000Z | 2021-09-09T02:20:43.000Z | math/functions/contrast/contrast.py | timi-liuliang/auto | 2562fd6c41b05e6db453bc0346ddcb4a0a9281a1 | [
"MIT"
] | null | null | null | import os
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.ticker import FuncFormatter, MultipleLocator
from scipy import integrate
# total time
base_time = 4.0
pause_time = 1.5
total_time = base_time + pause_time
# fig
fig, ax = plt.subplots()
# plots
plot_0 = None
plot_25 = None
plot_50 = None
plot_75 = None
plot_100 = None
plot_125 = None
plot_150 = None
plot_175 = None
plot_200 = None
# delta time
play_speed = 0.5
dt = 0.02 * play_speed
# figure size (pixels->inches)
# https://matplotlib.org/devdocs/gallery/subplots_axes_and_figures/figure_size_units.html
px = 1/plt.rcParams["figure.dpi"]
fig_width = float(960) * px
fig_height = float(960) * px
# clear
fig, ax = plt.subplots(figsize=(fig_width, fig_height))
# show grid
ax.grid()
# animation
anim = animation.FuncAnimation(fig, init_func=init_figure, func=animation_frame, frames=np.arange(0, total_time, dt), interval=dt * 1000 / play_speed)
# save to gif
anim.save("contrast.gif", writer='pillow')
# save last frame to png
fig.savefig("contrast.png") | 22.173228 | 150 | 0.675426 | import os
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.ticker import FuncFormatter, MultipleLocator
from scipy import integrate
# total time
base_time = 4.0
pause_time = 1.5
total_time = base_time + pause_time
# fig
fig, ax = plt.subplots()
# plots
plot_0 = None
plot_25 = None
plot_50 = None
plot_75 = None
plot_100 = None
plot_125 = None
plot_150 = None
plot_175 = None
plot_200 = None
def set_title(title):
ax.set_title("{0}".format(title))
def set_lim(x_lim_begin, x_lim_end, y_lim_begin, y_lim_end):
ax.set_xlim(x_lim_begin, x_lim_end)
ax.set_ylim(y_lim_begin, y_lim_end)
def init_plot(name):
x_data = []
y_data = []
plot = ax.plot([], [], label=name)[0]
plot.set_data(x_data, y_data)
return plot
def plot_append_value(p, x_value, y_value):
x_data = p.get_xdata()
y_data = p.get_ydata()
x_data.append(x_value)
y_data.append(y_value)
p.set_data(x_data, y_data)
def animation_plot(plot_data_table, contrast, i):
x_value = i
y_value = math.pow(x_value / 0.18, contrast) * 0.18
plot_append_value(plot_data_table, x_value, y_value)
def init_figure():
# title
set_title("f(x, c) = pow(x/0.18, c) * 0.18")
# lim
set_lim(0.0, 1.0, 0.0, 4.0)
# global
global plot_0
global plot_25
global plot_50
global plot_75
global plot_100
global plot_125
global plot_150
global plot_175
# init
plot_0 = init_plot("0.0")
plot_25 = init_plot("0.25")
plot_50 = init_plot("0.5")
plot_75 = init_plot("0.75")
plot_100 = init_plot("1.0")
plot_125 = init_plot("1.25")
plot_150 = init_plot("1.5")
plot_175 = init_plot("1.75")
# show legend
ax.legend()
def animation_frame(i):
if i > base_time:
return
t = i / 4.0
animation_plot(plot_0, 0.0, t)
animation_plot(plot_25, 0.25, t)
animation_plot(plot_50, 0.5, t)
animation_plot(plot_75, 0.75, t)
animation_plot(plot_100, 1.0, t)
animation_plot(plot_125, 1.25, t)
animation_plot(plot_150, 1.5, t)
animation_plot(plot_175, 1.75, t)
# delta time
play_speed = 0.5
dt = 0.02 * play_speed
# figure size (pixels->inches)
# https://matplotlib.org/devdocs/gallery/subplots_axes_and_figures/figure_size_units.html
px = 1/plt.rcParams["figure.dpi"]
fig_width = float(960) * px
fig_height = float(960) * px
# clear
fig, ax = plt.subplots(figsize=(fig_width, fig_height))
# show grid
ax.grid()
# animation
anim = animation.FuncAnimation(fig, init_func=init_figure, func=animation_frame, frames=np.arange(0, total_time, dt), interval=dt * 1000 / play_speed)
# save to gif
anim.save("contrast.gif", writer='pillow')
# save last frame to png
fig.savefig("contrast.png") | 1,542 | 0 | 165 |
096631f832909cdbfb5d9cd339d4547385821674 | 75 | py | Python | simple_test.py | RaymondKlass/raspberry_pi_tuner | 0f2bc1d4a9e483f293575fc5b4765d7bf3489b49 | [
"MIT"
] | null | null | null | simple_test.py | RaymondKlass/raspberry_pi_tuner | 0f2bc1d4a9e483f293575fc5b4765d7bf3489b49 | [
"MIT"
] | null | null | null | simple_test.py | RaymondKlass/raspberry_pi_tuner | 0f2bc1d4a9e483f293575fc5b4765d7bf3489b49 | [
"MIT"
] | null | null | null | import spidev
import os
spi = spidev.SpiDev()
spi.open(0,0)
print("Yay")
| 9.375 | 21 | 0.693333 | import spidev
import os
spi = spidev.SpiDev()
spi.open(0,0)
print("Yay")
| 0 | 0 | 0 |
e5c02850e19cee76b29d8f84b88d8484cee6661b | 5,377 | py | Python | arith.py | versey-sherry/arith | e110fe050bd04ca00fd9d9c0d4261e4bff49366e | [
"MIT"
] | null | null | null | arith.py | versey-sherry/arith | e110fe050bd04ca00fd9d9c0d4261e4bff49366e | [
"MIT"
] | 1 | 2020-01-14T04:52:52.000Z | 2020-01-14T04:52:52.000Z | test/arith.py | versey-sherry/arith | e110fe050bd04ca00fd9d9c0d4261e4bff49366e | [
"MIT"
] | 1 | 2020-04-03T21:43:08.000Z | 2020-04-03T21:43:08.000Z |
#!/usr/bin/env python3
#I mainly followed this post https://ruslanspivak.com/lsbasi-part7/
#Lexer
#Tokenize the inputs
#Token type PLUS MUL MINUS INTEGER
import sys
sys.tracebacklimit=0
#String representation for debugging just in case
#lexer
#advance to the next character
#skipping white spaces
#multiple digits
#return lexical token one at a time
#Parser
#Parse the tokens into an AST
#some value from term
#some value form expression
#some value from term
#some value form expression
#some factor
#some value from term
#A node for all the integers
#Only evaluate integers and create num node
#Only evaluate multiplication and create mul node
#Evaluate plus and minus and create nodes
#Interpreter
#Evaluate the programing with AST
#for tree checking
'''
text = "45-3-7-2"
lex = Lexer(text)
par = Parser(lex)
tree = par.parse()
inter = Interpreter(tree)
#inter.load_tree()
print(inter.visit())
'''
if __name__ == '__main__':
main() | 23.378261 | 87 | 0.679375 |
#!/usr/bin/env python3
#I mainly followed this post https://ruslanspivak.com/lsbasi-part7/
#Lexer
#Tokenize the inputs
#Token type PLUS MUL MINUS INTEGER
import sys
sys.tracebacklimit=0
class Token():
def __init__(self, type, value):
self.type = type
self.value = value
#String representation for debugging just in case
def __repr__(self):
return 'Token({type},{value})'.format(type = self.type, value = repr(self.value))
#lexer
class Lexer():
def __init__(self, text):
self.text = text
#Start from the first character
self.pos = 0
self.current_char = self.text[self.pos]
def error(self):
raise Exception("Invalid inputs")
#advance to the next character
def next(self):
self.pos +=1
if self.pos > len(self.text) -1:
self.current_char = None
else:
self.current_char = self.text[self.pos]
#skipping white spaces
def skipwhitespace(self):
while self.current_char is not None and self.current_char.isspace():
self.next()
#multiple digits
def num(self):
result = ''
while self.current_char is not None and self.current_char.isdigit():
result = result + self.current_char
self.next()
return int(result)
#return lexical token one at a time
def tokenize(self):
#nicer than elif, return breaks the loop
while self.current_char is not None:
if self.current_char.isspace():
self.next()
if self.current_char.isdigit():
return Token("INTEGER", self.num())
if self.current_char == "+":
self.next()
return Token("PLUS", "+")
if self.current_char == "-":
self.next()
return Token("MINUS", "-")
if self.current_char == "*":
self.next()
return Token("MUL", "*")
self.error()
return(Token("EOF", None))
#Parser
#Parse the tokens into an AST
class PlusNode():
#some value from term
#some value form expression
def __init__(self, left, right):
self.left = left
self.right = right
self.op = "PLUS"
class MinusNode():
#some value from term
#some value form expression
def __init__(self, left, right):
self.left = left
self.right = right
self.op = "MINUS"
class MulNode():
#some factor
#some value from term
def __init__(self, left, right):
self.left = left
self.right = right
self.op = "MUL"
#A node for all the integers
class IntNode():
def __init__(self, token):
self.token = token
self.value = self.token.value
self.op = "INTEGER"
class Parser():
def __init__(self, lexer):
#initially the pos is at 0. self.current_token returns the token for the current char
#tokenize also move pos to the next one and record the next char for lexer.
self.lexer = lexer
self.current_token = self.lexer.tokenize()
def error():
raise error("Invalid syntax.")
#Only evaluate integers and create num node
def factor(self):
token = self.current_token
if token.type == "MINUS":
self.current_token = self.lexer.tokenize()
token = self.current_token
#print('first',token.value)
token.value = -token.value
#print(token.value)
node = IntNode(token)
elif token.type == "INTEGER":
node = IntNode(token)
#print(node.value)
else:
self.error()
self.current_token = self.lexer.tokenize()
return node
#Only evaluate multiplication and create mul node
def term(self):
node = self.factor()
while self.current_token.type == "MUL":
token = self.current_token
#print("in term", token.value)
if token.type == "MUL":
#print("got mul")
self.current_token = self.lexer.tokenize()
node = MulNode(left = node, right = self.factor())
#print("in term",node.left, node.right)
return node
#Evaluate plus and minus and create nodes
def expr(self):
node = self.term()
#print("in expression", token.value)
while self.current_token.type in ("PLUS", "MINUS"):
token = self.current_token
if token.type == "PLUS":
#print("got plus")
self.current_token = self.lexer.tokenize()
node = PlusNode(left = node, right = self.term())
#print(node.left, node.right)
elif token.type == "MINUS":
#print("got minus")
self.current_token = self.lexer.tokenize()
node = MinusNode(left = node, right = self.term())
#print("in expr",node.left, node.right)
return node
def parse(self):
return self.expr()
#Interpreter
#Evaluate the programing with AST
def evaluate(node):
if node.op == "INTEGER":
return(node.value)
elif node.op == "MUL":
return (evaluate(node.left) * evaluate(node.right))
elif node.op =="PLUS":
return (evaluate(node.left) + evaluate(node.right))
elif node.op == "MINUS":
return (evaluate(node.left) - evaluate(node.right))
class Interpreter():
def __init__(self, tree):
#Tree is represented by the root node of the tree
self.tree = tree
#for tree checking
def load_tree(self):
print(self.tree.op)
print(self.tree.left.op)
print(self.tree.right.op)
def error(self):
raise Error("This feature is not supported")
def visit(self):
tree = self.tree
return evaluate(tree)
'''
text = "45-3-7-2"
lex = Lexer(text)
par = Parser(lex)
tree = par.parse()
inter = Interpreter(tree)
#inter.load_tree()
print(inter.visit())
'''
def main():
while True:
try:
#Taking raw inputs
text = input("")
except EOFError:
break
lexer = Lexer(text)
parser = Parser(lexer)
tree = parser.parse()
interpreter = Interpreter(tree)
print(interpreter.visit())
if __name__ == '__main__':
main() | 3,710 | -38 | 737 |
9664835c042e90d1230dac038e374368cbb3ceb1 | 1,284 | py | Python | auto_pose/meshrenderer/gl_utils/glfw_offscreen_context.py | juwangvsu/AugmentedAutoencoder_new | f29d657f369332a6284cf39e615af24758add624 | [
"MIT"
] | 299 | 2018-11-05T20:29:13.000Z | 2022-03-31T11:03:45.000Z | auto_pose/meshrenderer/gl_utils/glfw_offscreen_context.py | juwangvsu/AugmentedAutoencoder_new | f29d657f369332a6284cf39e615af24758add624 | [
"MIT"
] | 103 | 2018-11-21T20:00:24.000Z | 2022-03-30T21:03:56.000Z | auto_pose/meshrenderer/gl_utils/glfw_offscreen_context.py | juwangvsu/AugmentedAutoencoder_new | f29d657f369332a6284cf39e615af24758add624 | [
"MIT"
] | 96 | 2018-11-30T12:03:21.000Z | 2022-02-18T07:15:24.000Z | # -*- coding: utf-8 -*-
import logging as log
import os
from OpenGL.GL import *
import cyglfw3 as glfw
| 25.68 | 77 | 0.634735 | # -*- coding: utf-8 -*-
import logging as log
import os
from OpenGL.GL import *
import cyglfw3 as glfw
class OffscreenContext(object):
def __init__(self):
assert glfw.Init(), 'Glfw Init failed!'
glfw.WindowHint(glfw.VISIBLE, False)
self._offscreen_context = glfw.CreateWindow(1, 1, "", None)
assert self._offscreen_context, 'Could not create Offscreen Context!'
glfw.MakeContextCurrent(self._offscreen_context)
self.previous_second = glfw.GetTime()
self.frame_count = 0.0
self._fps = 0.0
def update(self):
self.poll_events()
self.update_fps_counter()
def poll_events(self):
glfw.PollEvents()
def update_fps_counter(self):
current_second = glfw.GetTime()
elapsed_seconds = current_second - self.previous_second
if elapsed_seconds > 1.0:
self.previous_second = current_second
self._fps = float(self.frame_count) / float(elapsed_seconds)
self.frame_count = 0.0
self.frame_count += 1.0
@property
def fps(self):
return self._fps
def close(self):
glfw.Terminate()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
| 916 | 240 | 23 |
71ad7eeb0397df8745079fc70911f19d55534c40 | 12,461 | py | Python | legacy/dicebox_multi_client.py | shapeandshare/dicebox.sdk | 4cf80f5034e4c3f2f859bc5cbd6607c8af278819 | [
"MIT"
] | 1 | 2018-09-24T21:17:32.000Z | 2018-09-24T21:17:32.000Z | legacy/dicebox_multi_client.py | shapeandshare/dicebox.sdk | 4cf80f5034e4c3f2f859bc5cbd6607c8af278819 | [
"MIT"
] | 6 | 2017-09-25T00:28:18.000Z | 2022-01-13T03:01:32.000Z | legacy/dicebox_multi_client.py | shapeandshare/dicebox.sdk | 4cf80f5034e4c3f2f859bc5cbd6607c8af278819 | [
"MIT"
] | null | null | null | ###############################################################################
# dice box
###############################################################################
import cv
import cv2
from datetime import datetime
import json
import requests
import os
import numpy
import math
from lib import dicebox_config as config # import our high level configuration
# from PIL import Image
# import sys
import os
import errno
# https://stackoverflow.com/questions/273192/how-can-i-create-a-directory-if-it-does-not-exist
###############################################################################
# configure our camera, and begin our capture and prediction loop
###############################################################################
# Camera 0 is the integrated web cam on my netbook
camera_port = 0
# Number of frames to throw away while the camera adjusts to light levels
ramp_frames = 3
# Now we can initialize the camera capture object with the cv2.VideoCapture class.
# All it needs is the index to a camera port.
camera = cv2.VideoCapture(camera_port)
camera.set(cv.CV_CAP_PROP_FRAME_WIDTH, 640)
camera.set(cv.CV_CAP_PROP_FRAME_HEIGHT, 480)
font = cv.CV_FONT_HERSHEY_SIMPLEX
# Ramp the camera - these frames will be discarded and are only used to allow v4l2
# to adjust light levels, if necessary
for i in xrange(ramp_frames):
temp = get_image()
# Get our classification categories
server_category_map = get_category_map()
# Setup our default state
global CURRENT_EXPECTED_CATEGORY_INDEX
CURRENT_EXPECTED_CATEGORY_INDEX = 11
MAX_EXPECTED_CATEGORY_INDEX = len(server_category_map)
global MISCLASSIFIED_CATEGORY_INDEX
MISCLASSIFIED_CATEGORY_INDEX = True
global KEEP_INPUT
KEEP_INPUT = False
global ONLY_KEEP_MISCLASSIFIED_INPUT
ONLY_KEEP_MISCLASSIFIED_INPUT = True
global SERVER_ERROR
SERVER_ERROR = False
###############################################################################
# main loop
###############################################################################
while True:
# Take the actual image we want to keep
# camera_capture, resized_image = get_image()
camera_capture = get_image()
cropped_images, marked_capture = crop_image(camera_capture)
left_filename = datetime.now().strftime('capture_left_%Y-%m-%d_%H_%M_%S_%f.png')
middle_filename = datetime.now().strftime('capture_middle_%Y-%m-%d_%H_%M_%S_%f.png')
right_filename = datetime.now().strftime('capture_right_%Y-%m-%d_%H_%M_%S_%f.png')
left_tmp_file_path = "%s/%s" % (config.TMP_DIR, left_filename)
middle_tmp_file_path = "%s/%s" % (config.TMP_DIR, middle_filename)
right_tmp_file_path = "%s/%s" % (config.TMP_DIR, right_filename)
# A nice feature of the imwrite method is that it will automatically choose the
# correct format based on the file extension you provide. Convenient!
left_cropped_image = cropped_images[0]
cropped_image = cropped_images[1]
right_cropped_image = cropped_images[2]
cv2.imwrite(left_tmp_file_path, left_cropped_image)
with open(left_tmp_file_path, 'rb') as tmp_file:
left_content = tmp_file.read()
cv2.imwrite(middle_tmp_file_path, cropped_image)
with open(middle_tmp_file_path, 'rb') as tmp_file:
middle_content = tmp_file.read()
cv2.imwrite(right_tmp_file_path, right_cropped_image)
with open(right_tmp_file_path, 'rb') as tmp_file:
right_content = tmp_file.read()
if KEEP_INPUT:
if not MISCLASSIFIED_CATEGORY_INDEX and ONLY_KEEP_MISCLASSIFIED_INPUT:
os.remove(left_tmp_file_path)
os.remove(middle_tmp_file_path)
os.remove(right_tmp_file_path)
else:
new_path = "%s/%s" % (config.TMP_DIR, server_category_map[str(CURRENT_EXPECTED_CATEGORY_INDEX-1)])
make_sure_path_exists(new_path)
new_full_path = "%s/%s" % (new_path, middle_filename)
os.rename(middle_tmp_file_path, new_full_path)
os.remove(left_tmp_file_path)
os.remove(right_tmp_file_path)
else:
os.remove(left_tmp_file_path)
os.remove(middle_tmp_file_path)
os.remove(right_tmp_file_path)
base64_encoded_left_content = left_content.encode('base64')
base64_encoded_middle_content = middle_content.encode('base64')
base64_encoded_right_content = right_content.encode('base64')
outbound_content = [base64_encoded_left_content, base64_encoded_middle_content, base64_encoded_right_content]
categories = []
category_result = []
for content in outbound_content:
outjson = {}
outjson['data'] = content
json_data = json.dumps(outjson)
prediction = {}
category = {}
SERVER_ERROR = False
response = make_api_call('api/classify', json_data, 'POST')
if 'classification' in response:
prediction = response['classification']
if prediction != -1:
category = server_category_map[str(prediction)]
categories.append(category)
else:
SERVER_ERROR = True
if category == server_category_map[str(CURRENT_EXPECTED_CATEGORY_INDEX-1)]:
# MISCLASSIFIED_CATEGORY_INDEX = False
category_result.append(False)
else:
# MISCLASSIFIED_CATEGORY_INDEX = True
category_result.append(True)
MISCLASSIFIED_CATEGORY_INDEX = category_result[1]
cv2.namedWindow('dice box', cv2.WINDOW_NORMAL)
output_display = camera_capture
#resized_display = cv2.resize(output_display, (config.IMAGE_WIDTH, config.IMAGE_HEIGHT))
resized_display = cropped_image
height, width = output_display.shape[:2]
output_display[height - config.IMAGE_HEIGHT:height, 0:config.IMAGE_WIDTH] = resized_display # cv2.cvtColor(resized_display, cv2.COLOR_BGR2GRAY)
output_display = cv2.cvtColor(output_display, cv2.COLOR_GRAY2RGB)
output_label_1 = "[expecting %s]" % server_category_map[str(CURRENT_EXPECTED_CATEGORY_INDEX - 1)]
cv2.putText(output_display, output_label_1, (5, 20), font, 0.7, (255, 255, 255), 2)
if len(categories) == 3:
output_label_2 = "[left][classified %s][match? %r]" % (categories[0], not category_result[0])
output_label_3 = "[middle][classified %s][match? %r]" % (categories[1], not category_result[1])
output_label_4 = "[right][classified %s][match? %r]" % (categories[2], not category_result[2])
cv2.putText(output_display, output_label_2, (5, 50), font, 0.7, (255, 255, 255), 2)
cv2.putText(output_display, output_label_3, (5, 80), font, 0.7, (255, 255, 255), 2)
cv2.putText(output_display, output_label_4, (5, 110), font, 0.7, (255, 255, 255), 2)
output_label_5 = "[record? %r][only keep misclassified? %r]" % (KEEP_INPUT, ONLY_KEEP_MISCLASSIFIED_INPUT)
output_label_6 = "[server error? %r]" % SERVER_ERROR
cv2.putText(output_display, output_label_5, (5, 140), font, 0.5, (255, 0, 0), 2)
cv2.putText(output_display, output_label_6, (5, 170), font, 0.5, (0, 255, 255), 0)
try:
cv2.imshow('dice box', output_display)
except:
print("Unable to display output!")
input_key = cv2.waitKey(1)
if input_key & 0xFF == ord('q'):
break
if input_key & 0xFF == ord('c'):
KEEP_INPUT = False
if CURRENT_EXPECTED_CATEGORY_INDEX >= MAX_EXPECTED_CATEGORY_INDEX:
CURRENT_EXPECTED_CATEGORY_INDEX = 1
else:
CURRENT_EXPECTED_CATEGORY_INDEX += 1
if input_key & 0xFF == ord('z'):
if KEEP_INPUT is True:
KEEP_INPUT = False
else:
KEEP_INPUT = True
if input_key & 0xFF == ord('b'):
if ONLY_KEEP_MISCLASSIFIED_INPUT is True:
ONLY_KEEP_MISCLASSIFIED_INPUT = False
else:
ONLY_KEEP_MISCLASSIFIED_INPUT = True
###############################################################################
# cleanup
###############################################################################
# You'll want to release the camera, otherwise you won't be able to create a new
# capture object until your script exits
camera.release()
cv2.destroyAllWindows()
| 35.300283 | 148 | 0.644892 | ###############################################################################
# dice box
###############################################################################
import cv
import cv2
from datetime import datetime
import json
import requests
import os
import numpy
import math
from lib import dicebox_config as config # import our high level configuration
# from PIL import Image
# import sys
import os
import errno
# https://stackoverflow.com/questions/273192/how-can-i-create-a-directory-if-it-does-not-exist
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
###############################################################################
# configure our camera, and begin our capture and prediction loop
###############################################################################
# Camera 0 is the integrated web cam on my netbook
camera_port = 0
# Number of frames to throw away while the camera adjusts to light levels
ramp_frames = 3
# Now we can initialize the camera capture object with the cv2.VideoCapture class.
# All it needs is the index to a camera port.
camera = cv2.VideoCapture(camera_port)
camera.set(cv.CV_CAP_PROP_FRAME_WIDTH, 640)
camera.set(cv.CV_CAP_PROP_FRAME_HEIGHT, 480)
font = cv.CV_FONT_HERSHEY_SIMPLEX
def get_image():
im = None
try:
retval, im = camera.read()
except:
print('Unable to read from camera!')
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
return im
def crop_image(image):
# Crop Image If Required
# Now ensure we are the same dimensions as when we started
cropped_image = None
#marked_capture = None
#original_width, original_height = im.size
original_height, original_width = image.shape[:2]
new_width = config.IMAGE_WIDTH
new_height = config.IMAGE_HEIGHT
new_middle_x = float(new_width) / 2
new_middle_y = float(new_height) / 2
left = int((float(original_width) / 2) - new_middle_x)
upper = int((float(original_height) / 2)- new_middle_y)
right = int(new_middle_x + float(original_width) / 2)
lower = int(new_middle_y + float(original_height) / 2)
# NOTE: its img[y: y + h, x: x + w] and *not* img[x: x + w, y: y + h]
width_offset = int(config.IMAGE_WIDTH)
left_cropped_image = numpy.copy(image[upper:lower, (left - width_offset):(right - width_offset)])
cropped_image = numpy.copy(image[upper:lower, left:right])
right_cropped_image = numpy.copy(image[upper:lower, (left + width_offset):(right + width_offset)])
marked_capture = cv.fromarray(image)
cv.Rectangle(marked_capture, ((left - width_offset)-1, upper-1), ((right-width_offset)+1,lower+1), (255,0,0), thickness=1, lineType=8, shift=0)
cv.Rectangle(marked_capture, (left-1, upper-1), (right+1,lower+1), (255,0,0), thickness=1, lineType=8, shift=0)
cv.Rectangle(marked_capture, ((left+width_offset)-1, upper-1), ((right+width_offset)+1,lower+1), (255,0,0), thickness=1, lineType=8, shift=0)
return [left_cropped_image, cropped_image, right_cropped_image], marked_capture
def resize_keep_aspect_ratio(input_image, desired_size):
height, width = input_image.shape[:2]
height = float(height)
width = float(width)
if width >= height:
max_dim = width
else:
max_dim = height
scale = float(desired_size) / max_dim
if width >= height:
new_width = desired_size
x = 0
new_height = height * scale
y = (desired_size - new_height) / 2
else:
y = 0
new_height = desired_size
new_width = width * scale
x = (desired_size - new_width) / 2
new_height = int(math.floor(new_height))
new_width = int((math.floor(new_width)))
resized_input = cv2.resize(input_image, (new_width, new_height))
output = numpy.zeros((desired_size, desired_size), numpy.uint8)
x_offset = int(math.floor(x+new_width))
y_offset = int(math.floor(y+new_height))
# new lets drop the resized imput onto the output
output[int(y):int(y_offset), int(x):int(x_offset)] = resized_input
return output
def get_category_map():
jdata = {}
if len(jdata) == 0:
with open('./category_map.json') as data_file:
raw_cat_data = json.load(data_file)
for d in raw_cat_data:
jdata[str(raw_cat_data[d])] = str(d)
print('loaded category map from file.')
if len(jdata) == 0:
response = make_api_call('api/categories', None, 'GET')
if 'category_map' in response:
jdata = response['category_map']
print('loaded category map from server.')
# print(jdata)
return jdata
def make_api_call(end_point, json_data, call_type):
headers = {
'Content-type': 'application/json',
'API-ACCESS-KEY': config.API_ACCESS_KEY,
'API-VERSION': config.API_VERSION
}
try:
url = "%s%s:%i/%s" % (config.SERVER_URI, config.CLASSIFICATION_SERVER, config.SERVER_PORT, end_point)
response = None
if call_type == 'GET':
response = requests.get(url, data=json_data, headers=headers)
elif call_type == 'POST':
response = requests.post(url, data=json_data, headers=headers)
if response is not None:
if response.status_code != 500:
return response.json()
except:
return {}
return {}
# Ramp the camera - these frames will be discarded and are only used to allow v4l2
# to adjust light levels, if necessary
for i in xrange(ramp_frames):
temp = get_image()
# Get our classification categories
server_category_map = get_category_map()
# Setup our default state
global CURRENT_EXPECTED_CATEGORY_INDEX
CURRENT_EXPECTED_CATEGORY_INDEX = 11
MAX_EXPECTED_CATEGORY_INDEX = len(server_category_map)
global MISCLASSIFIED_CATEGORY_INDEX
MISCLASSIFIED_CATEGORY_INDEX = True
global KEEP_INPUT
KEEP_INPUT = False
global ONLY_KEEP_MISCLASSIFIED_INPUT
ONLY_KEEP_MISCLASSIFIED_INPUT = True
global SERVER_ERROR
SERVER_ERROR = False
###############################################################################
# main loop
###############################################################################
while True:
# Take the actual image we want to keep
# camera_capture, resized_image = get_image()
camera_capture = get_image()
cropped_images, marked_capture = crop_image(camera_capture)
left_filename = datetime.now().strftime('capture_left_%Y-%m-%d_%H_%M_%S_%f.png')
middle_filename = datetime.now().strftime('capture_middle_%Y-%m-%d_%H_%M_%S_%f.png')
right_filename = datetime.now().strftime('capture_right_%Y-%m-%d_%H_%M_%S_%f.png')
left_tmp_file_path = "%s/%s" % (config.TMP_DIR, left_filename)
middle_tmp_file_path = "%s/%s" % (config.TMP_DIR, middle_filename)
right_tmp_file_path = "%s/%s" % (config.TMP_DIR, right_filename)
# A nice feature of the imwrite method is that it will automatically choose the
# correct format based on the file extension you provide. Convenient!
left_cropped_image = cropped_images[0]
cropped_image = cropped_images[1]
right_cropped_image = cropped_images[2]
cv2.imwrite(left_tmp_file_path, left_cropped_image)
with open(left_tmp_file_path, 'rb') as tmp_file:
left_content = tmp_file.read()
cv2.imwrite(middle_tmp_file_path, cropped_image)
with open(middle_tmp_file_path, 'rb') as tmp_file:
middle_content = tmp_file.read()
cv2.imwrite(right_tmp_file_path, right_cropped_image)
with open(right_tmp_file_path, 'rb') as tmp_file:
right_content = tmp_file.read()
if KEEP_INPUT:
if not MISCLASSIFIED_CATEGORY_INDEX and ONLY_KEEP_MISCLASSIFIED_INPUT:
os.remove(left_tmp_file_path)
os.remove(middle_tmp_file_path)
os.remove(right_tmp_file_path)
else:
new_path = "%s/%s" % (config.TMP_DIR, server_category_map[str(CURRENT_EXPECTED_CATEGORY_INDEX-1)])
make_sure_path_exists(new_path)
new_full_path = "%s/%s" % (new_path, middle_filename)
os.rename(middle_tmp_file_path, new_full_path)
os.remove(left_tmp_file_path)
os.remove(right_tmp_file_path)
else:
os.remove(left_tmp_file_path)
os.remove(middle_tmp_file_path)
os.remove(right_tmp_file_path)
base64_encoded_left_content = left_content.encode('base64')
base64_encoded_middle_content = middle_content.encode('base64')
base64_encoded_right_content = right_content.encode('base64')
outbound_content = [base64_encoded_left_content, base64_encoded_middle_content, base64_encoded_right_content]
categories = []
category_result = []
for content in outbound_content:
outjson = {}
outjson['data'] = content
json_data = json.dumps(outjson)
prediction = {}
category = {}
SERVER_ERROR = False
response = make_api_call('api/classify', json_data, 'POST')
if 'classification' in response:
prediction = response['classification']
if prediction != -1:
category = server_category_map[str(prediction)]
categories.append(category)
else:
SERVER_ERROR = True
if category == server_category_map[str(CURRENT_EXPECTED_CATEGORY_INDEX-1)]:
# MISCLASSIFIED_CATEGORY_INDEX = False
category_result.append(False)
else:
# MISCLASSIFIED_CATEGORY_INDEX = True
category_result.append(True)
MISCLASSIFIED_CATEGORY_INDEX = category_result[1]
cv2.namedWindow('dice box', cv2.WINDOW_NORMAL)
output_display = camera_capture
#resized_display = cv2.resize(output_display, (config.IMAGE_WIDTH, config.IMAGE_HEIGHT))
resized_display = cropped_image
height, width = output_display.shape[:2]
output_display[height - config.IMAGE_HEIGHT:height, 0:config.IMAGE_WIDTH] = resized_display # cv2.cvtColor(resized_display, cv2.COLOR_BGR2GRAY)
output_display = cv2.cvtColor(output_display, cv2.COLOR_GRAY2RGB)
output_label_1 = "[expecting %s]" % server_category_map[str(CURRENT_EXPECTED_CATEGORY_INDEX - 1)]
cv2.putText(output_display, output_label_1, (5, 20), font, 0.7, (255, 255, 255), 2)
if len(categories) == 3:
output_label_2 = "[left][classified %s][match? %r]" % (categories[0], not category_result[0])
output_label_3 = "[middle][classified %s][match? %r]" % (categories[1], not category_result[1])
output_label_4 = "[right][classified %s][match? %r]" % (categories[2], not category_result[2])
cv2.putText(output_display, output_label_2, (5, 50), font, 0.7, (255, 255, 255), 2)
cv2.putText(output_display, output_label_3, (5, 80), font, 0.7, (255, 255, 255), 2)
cv2.putText(output_display, output_label_4, (5, 110), font, 0.7, (255, 255, 255), 2)
output_label_5 = "[record? %r][only keep misclassified? %r]" % (KEEP_INPUT, ONLY_KEEP_MISCLASSIFIED_INPUT)
output_label_6 = "[server error? %r]" % SERVER_ERROR
cv2.putText(output_display, output_label_5, (5, 140), font, 0.5, (255, 0, 0), 2)
cv2.putText(output_display, output_label_6, (5, 170), font, 0.5, (0, 255, 255), 0)
try:
cv2.imshow('dice box', output_display)
except:
print("Unable to display output!")
input_key = cv2.waitKey(1)
if input_key & 0xFF == ord('q'):
break
if input_key & 0xFF == ord('c'):
KEEP_INPUT = False
if CURRENT_EXPECTED_CATEGORY_INDEX >= MAX_EXPECTED_CATEGORY_INDEX:
CURRENT_EXPECTED_CATEGORY_INDEX = 1
else:
CURRENT_EXPECTED_CATEGORY_INDEX += 1
if input_key & 0xFF == ord('z'):
if KEEP_INPUT is True:
KEEP_INPUT = False
else:
KEEP_INPUT = True
if input_key & 0xFF == ord('b'):
if ONLY_KEEP_MISCLASSIFIED_INPUT is True:
ONLY_KEEP_MISCLASSIFIED_INPUT = False
else:
ONLY_KEEP_MISCLASSIFIED_INPUT = True
###############################################################################
# cleanup
###############################################################################
# You'll want to release the camera, otherwise you won't be able to create a new
# capture object until your script exits
camera.release()
cv2.destroyAllWindows()
| 4,129 | 0 | 137 |
59c6f237a998baf3a75150823097b488c556a173 | 1,473 | py | Python | eland/tests/operations/test_map_pd_aggs_to_es_aggs_pytest.py | mesejo/eland | d1444f8e094ef11ce4fa6713a521245b68a842d7 | [
"Apache-2.0"
] | null | null | null | eland/tests/operations/test_map_pd_aggs_to_es_aggs_pytest.py | mesejo/eland | d1444f8e094ef11ce4fa6713a521245b68a842d7 | [
"Apache-2.0"
] | 1 | 2020-05-06T01:34:25.000Z | 2020-05-06T01:34:25.000Z | eland/tests/operations/test_map_pd_aggs_to_es_aggs_pytest.py | mesejo/eland | d1444f8e094ef11ce4fa6713a521245b68a842d7 | [
"Apache-2.0"
] | 1 | 2020-05-06T01:31:18.000Z | 2020-05-06T01:31:18.000Z | # Licensed to Elasticsearch B.V under one or more agreements.
# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information
from eland.operations import Operations
| 36.825 | 87 | 0.649695 | # Licensed to Elasticsearch B.V under one or more agreements.
# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information
from eland.operations import Operations
def test_all_aggs():
es_aggs = Operations._map_pd_aggs_to_es_aggs(
["min", "max", "mean", "std", "var", "mad", "count", "nunique", "median"]
)
assert es_aggs == [
("extended_stats", "min"),
("extended_stats", "max"),
("extended_stats", "avg"),
("extended_stats", "std_deviation"),
("extended_stats", "variance"),
"median_absolute_deviation",
("extended_stats", "count"),
"cardinality",
("percentiles", "50.0"),
]
def test_extended_stats_optimization():
# Tests that when '<agg>' and an 'extended_stats' agg are used together
# that ('extended_stats', '<agg>') is used instead of '<agg>'.
es_aggs = Operations._map_pd_aggs_to_es_aggs(["count", "nunique"])
assert es_aggs == ["count", "cardinality"]
for pd_agg in ["var", "std"]:
extended_es_agg = Operations._map_pd_aggs_to_es_aggs([pd_agg])[0]
es_aggs = Operations._map_pd_aggs_to_es_aggs([pd_agg, "nunique"])
assert es_aggs == [extended_es_agg, "cardinality"]
es_aggs = Operations._map_pd_aggs_to_es_aggs(["count", pd_agg, "nunique"])
assert es_aggs == [("extended_stats", "count"), extended_es_agg, "cardinality"]
| 1,182 | 0 | 46 |
fd052ded2f5e1744dcd04fbd358f8ba09d27b9d2 | 2,386 | py | Python | src/visualization/plot_dist.py | igordub/enm-research-project | c5c52f2a6b415bd871800bcf725fda23cb3fd542 | [
"MIT"
] | null | null | null | src/visualization/plot_dist.py | igordub/enm-research-project | c5c52f2a6b415bd871800bcf725fda23cb3fd542 | [
"MIT"
] | null | null | null | src/visualization/plot_dist.py | igordub/enm-research-project | c5c52f2a6b415bd871800bcf725fda23cb3fd542 | [
"MIT"
] | null | null | null | import os,sys, math, numpy as np, itertools
from matplotlib.patches import Patch
import matplotlib.pyplot as plt
from pylab import *
import src.utilities as utils
config = utils.read_config()
mpl.rcParams.update(mpl.rcParamsDefault) # VS Code plots not black
plt.style.use(config['viz'])
infile='dist.dat' #First input file
outname='dist' #Name output files will take
xlbl='Amino Acid Number'
ylbl='Amino Acid Number'
ttl=''
maxc=16
mi=[]
mj=[]
ol=[]
i=-1
#############################################################################
# Read arguments from terminal, and assign input files and a name that all output files will contain.
#############################################################################
for x in range(1,len(sys.argv)):
if sys.argv[x] == '-i':
infile = sys.argv[x+1]
if sys.argv[x] == '-out':
outname = sys.argv[x+1]
if sys.argv[x]=='-xlabel':
xlbl = sys.argv[x+1]
if sys.argv[x]=='-ylabel':
ylbl = sys.argv[x+1]
if sys.argv[x]=='-title':
ttl = sys.argv[x+1]
if sys.argv[x]=='-val':
maxc = sys.argv[x+1]
if sys.argv[x]=='-help':
print('\n\nProgram to plot overlap data...\n\nOPTIONS:\n'\
'-i = Name of input file (Default=overlap.dat)\n'\
'-xlabel = Label for x axis (Default=mode i)\n'\
'-ylabel = Label for y axis (Default=mode j)\n'\
'-title = Title for plot\n')
exit()
inlines=open(infile,'r').readlines()
if inlines[-1]=='\n':
inlines[-1:]=[]
i=i+1
mi.append([])
mj.append([])
ol.append([])
for line in inlines:
if line=='\n':
i=i+1
mi.append([])
mj.append([])
ol.append([])
else:
mi[i].append(int(line.split()[0]))
mj[i].append(int(line.split()[1]))
ol[i].append(float(line.split()[2]))
mi=np.array(mi)
mj=np.array(mj)
ol=np.array(ol)
maxv = mi.max()
for x in range(1,len(sys.argv)):
if sys.argv[x] == '-max':
maxv = float(sys.argv[x+1])
fig=plt.figure(1, figsize=(11,8))
ax=fig.add_subplot(111)
cmain=ax.pcolor(mi,mj,ol,vmin=0, vmax=maxc,cmap=plt.cm.gist_yarg_r)
ax.set_title(ttl)
ax.set_xlabel(xlbl)
ax.set_xlim(mi.min(), maxv)
ax.set_ylabel(ylbl)
ax.set_ylim(maxv, mj.min())
cbar=fig.colorbar(cmain,aspect=10,ticks=[0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30])
fig.text(.85, .95, 'Distance / $\AA{}$', horizontalalignment='center')
# plt.rcParams.update({'font.size': 22})
plt.savefig(outname+'.png',format='png')
plt.show()
print('DONE')
| 22.299065 | 102 | 0.601006 | import os,sys, math, numpy as np, itertools
from matplotlib.patches import Patch
import matplotlib.pyplot as plt
from pylab import *
import src.utilities as utils
config = utils.read_config()
mpl.rcParams.update(mpl.rcParamsDefault) # VS Code plots not black
plt.style.use(config['viz'])
infile='dist.dat' #First input file
outname='dist' #Name output files will take
xlbl='Amino Acid Number'
ylbl='Amino Acid Number'
ttl=''
maxc=16
mi=[]
mj=[]
ol=[]
i=-1
#############################################################################
# Read arguments from terminal, and assign input files and a name that all output files will contain.
#############################################################################
for x in range(1,len(sys.argv)):
if sys.argv[x] == '-i':
infile = sys.argv[x+1]
if sys.argv[x] == '-out':
outname = sys.argv[x+1]
if sys.argv[x]=='-xlabel':
xlbl = sys.argv[x+1]
if sys.argv[x]=='-ylabel':
ylbl = sys.argv[x+1]
if sys.argv[x]=='-title':
ttl = sys.argv[x+1]
if sys.argv[x]=='-val':
maxc = sys.argv[x+1]
if sys.argv[x]=='-help':
print('\n\nProgram to plot overlap data...\n\nOPTIONS:\n'\
'-i = Name of input file (Default=overlap.dat)\n'\
'-xlabel = Label for x axis (Default=mode i)\n'\
'-ylabel = Label for y axis (Default=mode j)\n'\
'-title = Title for plot\n')
exit()
inlines=open(infile,'r').readlines()
if inlines[-1]=='\n':
inlines[-1:]=[]
i=i+1
mi.append([])
mj.append([])
ol.append([])
for line in inlines:
if line=='\n':
i=i+1
mi.append([])
mj.append([])
ol.append([])
else:
mi[i].append(int(line.split()[0]))
mj[i].append(int(line.split()[1]))
ol[i].append(float(line.split()[2]))
mi=np.array(mi)
mj=np.array(mj)
ol=np.array(ol)
maxv = mi.max()
for x in range(1,len(sys.argv)):
if sys.argv[x] == '-max':
maxv = float(sys.argv[x+1])
fig=plt.figure(1, figsize=(11,8))
ax=fig.add_subplot(111)
cmain=ax.pcolor(mi,mj,ol,vmin=0, vmax=maxc,cmap=plt.cm.gist_yarg_r)
ax.set_title(ttl)
ax.set_xlabel(xlbl)
ax.set_xlim(mi.min(), maxv)
ax.set_ylabel(ylbl)
ax.set_ylim(maxv, mj.min())
cbar=fig.colorbar(cmain,aspect=10,ticks=[0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30])
fig.text(.85, .95, 'Distance / $\AA{}$', horizontalalignment='center')
# plt.rcParams.update({'font.size': 22})
plt.savefig(outname+'.png',format='png')
plt.show()
print('DONE')
| 0 | 0 | 0 |
50b592b7a39165dd3bf0043bc15a4c59db51dd20 | 1,048 | py | Python | WindNinja_learning/nwp.py | louisletoumelin/wind_downscaling_cnn | 9d08711620db1ee1f472847f0e822c5f4eb1d300 | [
"W3C"
] | null | null | null | WindNinja_learning/nwp.py | louisletoumelin/wind_downscaling_cnn | 9d08711620db1ee1f472847f0e822c5f4eb1d300 | [
"W3C"
] | 12 | 2021-11-30T16:56:05.000Z | 2021-12-13T16:26:31.000Z | WindNinja_learning/nwp.py | louisletoumelin/wind_downscaling_cnn | 9d08711620db1ee1f472847f0e822c5f4eb1d300 | [
"W3C"
] | null | null | null | import numpy as np
import xarray as xr
| 29.942857 | 91 | 0.678435 | import numpy as np
import xarray as xr
def preprocess_function(netCDF_file):
try:
netCDF_file = netCDF_file.assign_coords(time=("time", netCDF_file.time.data))
except:
netCDF_file = netCDF_file.assign_coords(time=("oldtime", netCDF_file.time.data))
netCDF_file = netCDF_file.assign_coords(xx=("xx", list(range(netCDF_file.dims['xx']))))
netCDF_file = netCDF_file.assign_coords(yy=("yy", list(range(netCDF_file.dims['yy']))))
try:
netCDF_file = netCDF_file.rename({'oldtime': 'time'})
except:
pass
return netCDF_file
def load_netcdf_and_preprocess(path, dtype=np.float32):
dataset = xr.open_mfdataset(path,
preprocess=preprocess_function,
concat_dim='time').astype(dtype, copy=False)
return dataset
def select_time_range_xr(dataset, begin, end):
return dataset.sel(time=slice(begin, end))
def select_station_grid_point_in_NWP(nwp, x_idx_nwp, y_idx_nwp):
return nwp.isel(xx=x_idx_nwp, yy=y_idx_nwp)
| 913 | 0 | 92 |
39fff929dd82ff62b575763af114fc78809ce1dc | 1,639 | py | Python | lib/uiEntityClass.py | Dogeek/codevo | 690d161b4099d37597246f1ca3164f60a350e662 | [
"MIT"
] | null | null | null | lib/uiEntityClass.py | Dogeek/codevo | 690d161b4099d37597246f1ca3164f60a350e662 | [
"MIT"
] | null | null | null | lib/uiEntityClass.py | Dogeek/codevo | 690d161b4099d37597246f1ca3164f60a350e662 | [
"MIT"
] | null | null | null | import pygame
from pygame.locals import *
import math
from . import *
#from .functions import *
#from .constants import *
| 36.422222 | 85 | 0.705918 | import pygame
from pygame.locals import *
import math
from . import *
#from .functions import *
#from .constants import *
class UiEntity(pygame.sprite.Sprite):
def __init__(self, player):
pygame.sprite.Sprite.__init__(self, uiEntityGroup)
self.heart_sprites = []
heart_spritesheet = pygame.image.load(PATH+"sprites/ui/hearts.png").convert()
self.magic_bar_sprite = pygame.image.load(PATH+"sprites/ui/magicbar.png").convert()
self.magic_bar_sprite.set_colorkey((0, 255, 0))
self.magic_sprite = pygame.image.load(PATH+"sprites/ui/magic.png").convert()
self.magic_sprite.set_colorkey((255, 0, 255))
for i in range(4):
X = i*32
surf = pygame.Surface((32, 32))
surf.blit(heart_spritesheet, (0,0), (X, 0, 32, 32))
#surf.set_colorkey((255, 0, 255))
self.heart_sprites.append(surf)
self.hearts = player.health/4
self.player_magic = player.magic
self.update()
def update(self):
self.surface = pygame.Surface((max(int(math.ceil(self.hearts))*32, 104), 58))
self.surface.fill((255, 0, 255))
hearts_full = int(math.floor(self.hearts))
leftover = self.hearts - hearts_full
for i in range(hearts_full):
self.surface.blit(self.heart_sprites[0], (i*32, 0))
if leftover == 0.25:
self.surface.blit(self.heart_sprites[3], (leftover*32, 0))
elif leftover == 0.5:
self.surface.blit(self.heart_sprites[2], (leftover*32, 0))
elif leftover == 0.75:
self.surface.blit(self.heart_sprites[1], (leftover*32, 0))
for i in range(self.player_magic):
self.surface.blit(self.magic_sprite, (i+2, 22))
self.surface.blit(self.magic_bar_sprite, (0, 22))
self.surface.set_colorkey((255, 0, 255))
| 1,428 | 16 | 72 |
86f6eecde8359d62f3d37ec3b0aaee3b5aa05a27 | 799 | py | Python | profit/dataset/preprocessing/__init__.py | ayushkarnawat/profit | f3c4d601078b52513af6832c3faf75ddafc59ac5 | [
"MIT"
] | null | null | null | profit/dataset/preprocessing/__init__.py | ayushkarnawat/profit | f3c4d601078b52513af6832c3faf75ddafc59ac5 | [
"MIT"
] | 1 | 2021-09-15T13:13:12.000Z | 2021-09-15T13:13:12.000Z | profit/dataset/preprocessing/__init__.py | ayushkarnawat/profit | f3c4d601078b52513af6832c3faf75ddafc59ac5 | [
"MIT"
] | null | null | null | from profit.dataset.preprocessing import mol_feats
from profit.dataset.preprocessing import mutator
from profit.dataset.preprocessing import seq_feats
from profit.dataset.preprocessing.mol_feats import construct_adj_matrix
from profit.dataset.preprocessing.mol_feats import construct_mol_features
from profit.dataset.preprocessing.mol_feats import check_num_atoms
from profit.dataset.preprocessing.mol_feats import construct_pos_matrix
from profit.dataset.preprocessing.mol_feats import MolFeatureExtractionError
from profit.dataset.preprocessing.mutator import PDBMutator
from profit.dataset.preprocessing.seq_feats import check_num_residues
from profit.dataset.preprocessing.seq_feats import construct_embedding
from profit.dataset.preprocessing.seq_feats import SequenceFeatureExtractionError
| 49.9375 | 81 | 0.894869 | from profit.dataset.preprocessing import mol_feats
from profit.dataset.preprocessing import mutator
from profit.dataset.preprocessing import seq_feats
from profit.dataset.preprocessing.mol_feats import construct_adj_matrix
from profit.dataset.preprocessing.mol_feats import construct_mol_features
from profit.dataset.preprocessing.mol_feats import check_num_atoms
from profit.dataset.preprocessing.mol_feats import construct_pos_matrix
from profit.dataset.preprocessing.mol_feats import MolFeatureExtractionError
from profit.dataset.preprocessing.mutator import PDBMutator
from profit.dataset.preprocessing.seq_feats import check_num_residues
from profit.dataset.preprocessing.seq_feats import construct_embedding
from profit.dataset.preprocessing.seq_feats import SequenceFeatureExtractionError
| 0 | 0 | 0 |
729efd3596279ece5858734443f087028b153fd0 | 4,806 | py | Python | tests/models/test_tensorflow.py | cdknorow/modelstore | f08839478432b89e828a8dcb41adf27b0e3aa66b | [
"Apache-2.0"
] | null | null | null | tests/models/test_tensorflow.py | cdknorow/modelstore | f08839478432b89e828a8dcb41adf27b0e3aa66b | [
"Apache-2.0"
] | null | null | null | tests/models/test_tensorflow.py | cdknorow/modelstore | f08839478432b89e828a8dcb41adf27b0e3aa66b | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Neal Lathia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import numpy as np
import pytest
import tensorflow as tf
from modelstore.models.tensorflow import (
MODEL_DIRECTORY,
TensorflowManager,
_save_model,
_save_weights,
save_json,
)
# pylint: disable=protected-access
# pylint: disable=redefined-outer-name
tf.config.threading.set_inter_op_parallelism_threads(1)
@pytest.fixture()
@pytest.fixture
@pytest.mark.parametrize(
"ml_library,should_match",
[
("tensorflow", True),
("keras", True),
("xgboost", False),
],
)
| 29.484663 | 88 | 0.700375 | # Copyright 2020 Neal Lathia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import numpy as np
import pytest
import tensorflow as tf
from modelstore.models.tensorflow import (
MODEL_DIRECTORY,
TensorflowManager,
_save_model,
_save_weights,
save_json,
)
# pylint: disable=protected-access
# pylint: disable=redefined-outer-name
tf.config.threading.set_inter_op_parallelism_threads(1)
@pytest.fixture()
def tf_model():
model = tf.keras.models.Sequential(
[
tf.keras.layers.Dense(5, activation="relu", input_shape=(10,)),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(1),
]
)
model.compile(optimizer="adam", loss="mean_squared_error")
return model
@pytest.fixture
def tf_manager():
return TensorflowManager()
def assert_models_equal(
model_a: tf.keras.Model, model_b: tf.keras.Model, assert_predictions: bool = True
):
# Same high-level structure
assert type(model_a) == type(model_b)
assert model_a.count_params() == model_b.count_params()
assert len(model_a.layers) == len(model_b.layers)
# Same structure
for i in range(len(model_a.layers)):
assert (
model_a.layers[i].__class__.__name__ == model_b.layers[i].__class__.__name__
)
# Same predictions
if assert_predictions:
test_input = np.random.random((128, 10))
np.testing.assert_allclose(
model_a.predict(test_input), model_b.predict(test_input)
)
def test_model_info(tf_manager):
exp = {"library": "tensorflow"}
res = tf_manager._model_info()
assert exp == res
@pytest.mark.parametrize(
"ml_library,should_match",
[
("tensorflow", True),
("keras", True),
("xgboost", False),
],
)
def test_is_same_library(tf_manager, ml_library, should_match):
assert tf_manager._is_same_library({"library": ml_library}) == should_match
def test_model_data(tf_manager, tf_model):
exp = {}
res = tf_manager._model_data(model=tf_model)
assert exp == res
def test_required_kwargs(tf_manager):
assert tf_manager._required_kwargs() == ["model"]
def test_matches_with(tf_manager, tf_model):
assert tf_manager.matches_with(model=tf_model)
assert not tf_manager.matches_with(model="a-string-value")
assert not tf_manager.matches_with(classifier=tf_model)
def test_get_functions(tf_manager, tf_model):
assert len(tf_manager._get_functions(model=tf_model)) == 3
def test_get_params(tf_manager, tf_model):
exp = tf_model.optimizer.get_config()
res = tf_manager._get_params(model=tf_model)
assert exp == res
def test_save_model(tmp_path, tf_model):
exp = os.path.join(tmp_path, "model")
model_path = _save_model(tmp_path, tf_model)
assert exp == model_path
assert os.path.isdir(model_path)
loaded_model = tf.keras.models.load_model(model_path)
assert_models_equal(tf_model, loaded_model)
def test_save_weights(tf_model, tmp_path):
exp = os.path.join(tmp_path, "checkpoint")
file_path = _save_weights(tmp_path, model=tf_model)
assert file_path == exp
assert os.path.isfile(file_path)
loaded_model = tf.keras.models.Sequential(
[
tf.keras.layers.Dense(5, activation="relu", input_shape=(10,)),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(1),
]
)
loaded_model.load_weights(file_path).expect_partial()
assert_models_equal(tf_model, loaded_model)
def test_model_json(tf_model, tmp_path):
exp = os.path.join(tmp_path, "model_config.json")
file_path = save_json(tmp_path, "model_config.json", tf_model.to_json())
assert file_path == exp
with open(file_path, "r") as lines:
model_json = json.loads(lines.read())
model = tf.keras.models.model_from_json(model_json)
assert_models_equal(model, tf_model, assert_predictions=False)
def test_load_model(tmp_path, tf_manager, tf_model):
# Save the model to a tmp directory
model_path = os.path.join(tmp_path, MODEL_DIRECTORY)
tf_model.save(model_path)
# Load the model
loaded_model = tf_manager.load(tmp_path, {})
# Expect the two to be the same
assert_models_equal(tf_model, loaded_model)
| 3,326 | 0 | 319 |
ed60387424a02a928c8ef69020f22264e81ae158 | 524 | py | Python | tests/test_main_game.py | sandu-alexandru/hero_game | f8094cec260b45e7e1e2b847cd4bb5ae726a38f6 | [
"MIT"
] | null | null | null | tests/test_main_game.py | sandu-alexandru/hero_game | f8094cec260b45e7e1e2b847cd4bb5ae726a38f6 | [
"MIT"
] | null | null | null | tests/test_main_game.py | sandu-alexandru/hero_game | f8094cec260b45e7e1e2b847cd4bb5ae726a38f6 | [
"MIT"
] | null | null | null | from game_hero.main_game import HeroGame
def test_start_game():
"""
Tests output of the game as string
"""
game_instance = HeroGame()
game_output = game_instance.start_game()
assert isinstance(game_output, str), "Output of the game is not string!"
def test_game_instance():
"""
Tests singleton implementation for the HeroGame instances.
"""
first_instance = HeroGame()
second_instance = HeroGame()
assert first_instance is second_instance, "Different instances for game!"
| 26.2 | 77 | 0.709924 | from game_hero.main_game import HeroGame
def test_start_game():
"""
Tests output of the game as string
"""
game_instance = HeroGame()
game_output = game_instance.start_game()
assert isinstance(game_output, str), "Output of the game is not string!"
def test_game_instance():
"""
Tests singleton implementation for the HeroGame instances.
"""
first_instance = HeroGame()
second_instance = HeroGame()
assert first_instance is second_instance, "Different instances for game!"
| 0 | 0 | 0 |
012bae7ff5639b2c9a4156f6f0c70ca87eab3abc | 6,948 | py | Python | previousTries (1).py | Katzuno/BlackBoxClassification-IA-Sem2 | 79869dc5abf4cb04b12ff9b722c77a2c296b144d | [
"MIT"
] | null | null | null | previousTries (1).py | Katzuno/BlackBoxClassification-IA-Sem2 | 79869dc5abf4cb04b12ff9b722c77a2c296b144d | [
"MIT"
] | null | null | null | previousTries (1).py | Katzuno/BlackBoxClassification-IA-Sem2 | 79869dc5abf4cb04b12ff9b722c77a2c296b144d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 4 23:15:21 2019
@author: erikh
"""
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 4 21:15:45 2019
@author: erikh
"""
# Data Preprocessing Template
"""
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.utils import shuffle
# Importing the dataset
X = pd.read_csv('train_samples.csv', header=None)#, nrows = 5000)
y = pd.read_csv('train_labels.csv', header=None)#, nrows = 5000)
"""
X = dataset.iloc[:, [2,3]].values
y = dataset.iloc[:, 4].values
"""
print('Dataset loaded')
for i in range(len(y)):
if y.iloc[i][0] == 5 or y.iloc[i][0] == 7:
y = y.append(y.iloc[i])
y = y.append(y.iloc[i])
X = X.append(X.iloc[i])
X = X.append(X.iloc[i])
print ('Classes 5 and 7 doubled')
X = X.append(X)
y = y.append(y)
X, y = shuffle(X, y)
print('Dataset doubled and shuffled')
mu, sigma = 0, 0.15
# creating a noise with the same dimension as the dataset (2,2)
noise = np.random.normal(mu, sigma, [X.shape[0],X.shape[1]])
X = X + noise
print('Noise generated')
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
#X_train, X_test2, y_train, y_test2 = train_test_split(X_train2, y_train2, test_size = 0.2)
# Feature Scaling
"""
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
"""
# Fitting Random forrest to the Training set
# Create classifier
"""
# Random Forest Classifier
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators = 99, criterion = 'entropy', random_state = 0)
classifier.fit(X_train, y_train)
"""
# Classic SVM
"""
from sklearn.svm import SVC
classifier = SVC(C = 0.9, kernel = 'linear')
#classifier.fit(X_train, y_train)
"""
"""
from sklearn.model_selection import GridSearchCV
parameters =[ {'C': [0.01, 0.1, 1, 10], #so called `eta` value
'kernel': ['linear'],
'gamma': [0.001, 0.01, 0.1, 1],
'random_state': [0]
},
{'C': [0.01, 0.1, 1, 10], #so called `eta` value
'kernel': ['sigmoid'],
'coef0': [0.0, 0.1, 0.3, 0.4],
'gamma': [0.001, 0.01, 0.1, 1],
'random_state': [0]
}
]
grid_search = GridSearchCV(estimator = classifier,
param_grid = parameters,
scoring = 'accuracy',
cv = 5,
n_jobs = -1)
grid_search = grid_search.fit(X_train, y_train)
best_accuracy = grid_search.best_score_
best_parameters = grid_search.best_params_
"""
# Fine Tuned XGBoost
"""
from xgboost import XGBClassifier
classifier = XGBClassifier(n_estimators = 100, learning_rate = 0.05, max_depth = 2, min_child_weight = 2, gamma = 0.05, subsample = 0.7, colsample_bytree = 0.9, n_jobs = -1)
"""
# Perceptron neural network
from sklearn.neural_network import MLPClassifier # importul clasei
from sklearn.linear_model import Perceptron
classifier = MLPClassifier(hidden_layer_sizes=((100)),
activation='relu', solver='adam', batch_size='auto',
learning_rate='adaptive', learning_rate_init=0.001, power_t=0.5,
max_iter=100, shuffle=True, random_state=None, tol=0.0001,
momentum=0.9, early_stopping=True, validation_fraction=0.25, verbose = True)
#perceptron_model.fit(X, y)
classifier.fit(X_train, y_train)
# Predicting the test set results
y_pred = classifier.predict(X_test)
x_pred = classifier.predict(X_train)
"""
y_pred2 = classifier.predict(X_test2)
x_pred2 = classifier.predict(X_train2)
"""
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm2 = confusion_matrix(y_train, x_pred)
# PERCEPTRON GRID SEARCH
"""
from sklearn.model_selection import GridSearchCV
parameters = {'hidden_layer_sizes': [(200, 200), (150, 150)], #so called `eta` value
'learning_rate': ['adaptive'],
'max_iter': [100],
'early_stopping': [True]
}
grid_search = GridSearchCV(estimator = classifier,
param_grid = parameters,
scoring = 'accuracy',
cv = 5,
n_jobs = -1)
grid_search = grid_search.fit(X_train, y_train)
best_accuracy = grid_search.best_score_
best_parameters = grid_search.best_params_
"""
# Applying 10-Fold Cross Validation
"""
from sklearn.model_selection import cross_val_score
accuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, n_jobs = -1, cv = 5)
avg_accuracy = accuracies.mean()
accuracies.std()
"""
# Applying Grid Search to find the best model and the best parameters
"""
from sklearn.model_selection import GridSearchCV
parameters = {'learning_rate': [0.05], #so called `eta` value
'max_depth': [2],
'min_child_weight': [2],
'gamma': [0.05],
'subsample': [0.7],
'colsample_bytree': [0.9],
'n_estimators': [100]
}
grid_search = GridSearchCV(estimator = classifier,
param_grid = parameters,
scoring = 'accuracy',
cv = 3,
n_jobs = -1)
grid_search = grid_search.fit(X_train, y_train)
best_accuracy = grid_search.best_score_
best_parameters = grid_search.best_params_
"""
y_pred.shape = (len(y_pred), y_test.shape[1])
x_pred.shape = (len(x_pred), y_train.shape[1])
y_pred2.shape = (len(y_pred2), y_test2.shape[1])
x_pred2.shape = (len(x_pred2), y_train2.shape[1])
print('Accuracy TRAIN: ', get_accuracy(x_pred, y_train))
print('Accuracy TEST: ', get_accuracy(y_pred, y_test))
print('Accuracy TRAIN 2: ', get_accuracy(x_pred2, y_train2))
print('Accuracy TEST 2: ', get_accuracy(y_pred2, y_test2))
print('----- CREATING KAGGLE SUBMISSION FORMAT ----')
to_predict = pd.read_csv('test_samples.csv', header=None)
results = pd.DataFrame(columns = ['Id', 'Prediction'])
sample_predictions = classifier.predict(to_predict)
for i in range(len(to_predict)):
results = results.append({'Id': i+1, 'Prediction':sample_predictions[i]}, ignore_index=True)
results.to_csv('PERCEPTRON-NN-15k--NOISE-0.15-variable-1-layerss-doubled-100-neurons.csv', encoding='utf-8', index=False)
| 31.726027 | 174 | 0.614421 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 4 23:15:21 2019
@author: erikh
"""
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 4 21:15:45 2019
@author: erikh
"""
# Data Preprocessing Template
"""
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.utils import shuffle
def get_accuracy(pred, real):
return len(pred[pred == real]) / len(pred)
# Importing the dataset
X = pd.read_csv('train_samples.csv', header=None)#, nrows = 5000)
y = pd.read_csv('train_labels.csv', header=None)#, nrows = 5000)
"""
X = dataset.iloc[:, [2,3]].values
y = dataset.iloc[:, 4].values
"""
print('Dataset loaded')
for i in range(len(y)):
if y.iloc[i][0] == 5 or y.iloc[i][0] == 7:
y = y.append(y.iloc[i])
y = y.append(y.iloc[i])
X = X.append(X.iloc[i])
X = X.append(X.iloc[i])
print ('Classes 5 and 7 doubled')
X = X.append(X)
y = y.append(y)
X, y = shuffle(X, y)
print('Dataset doubled and shuffled')
mu, sigma = 0, 0.15
# creating a noise with the same dimension as the dataset (2,2)
noise = np.random.normal(mu, sigma, [X.shape[0],X.shape[1]])
X = X + noise
print('Noise generated')
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
#X_train, X_test2, y_train, y_test2 = train_test_split(X_train2, y_train2, test_size = 0.2)
# Feature Scaling
"""
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
"""
# Fitting Random forrest to the Training set
# Create classifier
"""
# Random Forest Classifier
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators = 99, criterion = 'entropy', random_state = 0)
classifier.fit(X_train, y_train)
"""
# Classic SVM
"""
from sklearn.svm import SVC
classifier = SVC(C = 0.9, kernel = 'linear')
#classifier.fit(X_train, y_train)
"""
"""
from sklearn.model_selection import GridSearchCV
parameters =[ {'C': [0.01, 0.1, 1, 10], #so called `eta` value
'kernel': ['linear'],
'gamma': [0.001, 0.01, 0.1, 1],
'random_state': [0]
},
{'C': [0.01, 0.1, 1, 10], #so called `eta` value
'kernel': ['sigmoid'],
'coef0': [0.0, 0.1, 0.3, 0.4],
'gamma': [0.001, 0.01, 0.1, 1],
'random_state': [0]
}
]
grid_search = GridSearchCV(estimator = classifier,
param_grid = parameters,
scoring = 'accuracy',
cv = 5,
n_jobs = -1)
grid_search = grid_search.fit(X_train, y_train)
best_accuracy = grid_search.best_score_
best_parameters = grid_search.best_params_
"""
# Fine Tuned XGBoost
"""
from xgboost import XGBClassifier
classifier = XGBClassifier(n_estimators = 100, learning_rate = 0.05, max_depth = 2, min_child_weight = 2, gamma = 0.05, subsample = 0.7, colsample_bytree = 0.9, n_jobs = -1)
"""
# Perceptron neural network
from sklearn.neural_network import MLPClassifier # importul clasei
from sklearn.linear_model import Perceptron
classifier = MLPClassifier(hidden_layer_sizes=((100)),
activation='relu', solver='adam', batch_size='auto',
learning_rate='adaptive', learning_rate_init=0.001, power_t=0.5,
max_iter=100, shuffle=True, random_state=None, tol=0.0001,
momentum=0.9, early_stopping=True, validation_fraction=0.25, verbose = True)
#perceptron_model.fit(X, y)
classifier.fit(X_train, y_train)
# Predicting the test set results
y_pred = classifier.predict(X_test)
x_pred = classifier.predict(X_train)
"""
y_pred2 = classifier.predict(X_test2)
x_pred2 = classifier.predict(X_train2)
"""
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm2 = confusion_matrix(y_train, x_pred)
# PERCEPTRON GRID SEARCH
"""
from sklearn.model_selection import GridSearchCV
parameters = {'hidden_layer_sizes': [(200, 200), (150, 150)], #so called `eta` value
'learning_rate': ['adaptive'],
'max_iter': [100],
'early_stopping': [True]
}
grid_search = GridSearchCV(estimator = classifier,
param_grid = parameters,
scoring = 'accuracy',
cv = 5,
n_jobs = -1)
grid_search = grid_search.fit(X_train, y_train)
best_accuracy = grid_search.best_score_
best_parameters = grid_search.best_params_
"""
# Applying 10-Fold Cross Validation
"""
from sklearn.model_selection import cross_val_score
accuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, n_jobs = -1, cv = 5)
avg_accuracy = accuracies.mean()
accuracies.std()
"""
# Applying Grid Search to find the best model and the best parameters
"""
from sklearn.model_selection import GridSearchCV
parameters = {'learning_rate': [0.05], #so called `eta` value
'max_depth': [2],
'min_child_weight': [2],
'gamma': [0.05],
'subsample': [0.7],
'colsample_bytree': [0.9],
'n_estimators': [100]
}
grid_search = GridSearchCV(estimator = classifier,
param_grid = parameters,
scoring = 'accuracy',
cv = 3,
n_jobs = -1)
grid_search = grid_search.fit(X_train, y_train)
best_accuracy = grid_search.best_score_
best_parameters = grid_search.best_params_
"""
y_pred.shape = (len(y_pred), y_test.shape[1])
x_pred.shape = (len(x_pred), y_train.shape[1])
y_pred2.shape = (len(y_pred2), y_test2.shape[1])
x_pred2.shape = (len(x_pred2), y_train2.shape[1])
print('Accuracy TRAIN: ', get_accuracy(x_pred, y_train))
print('Accuracy TEST: ', get_accuracy(y_pred, y_test))
print('Accuracy TRAIN 2: ', get_accuracy(x_pred2, y_train2))
print('Accuracy TEST 2: ', get_accuracy(y_pred2, y_test2))
print('----- CREATING KAGGLE SUBMISSION FORMAT ----')
to_predict = pd.read_csv('test_samples.csv', header=None)
results = pd.DataFrame(columns = ['Id', 'Prediction'])
sample_predictions = classifier.predict(to_predict)
for i in range(len(to_predict)):
results = results.append({'Id': i+1, 'Prediction':sample_predictions[i]}, ignore_index=True)
results.to_csv('PERCEPTRON-NN-15k--NOISE-0.15-variable-1-layerss-doubled-100-neurons.csv', encoding='utf-8', index=False)
| 56 | 0 | 25 |
c85c112ba096f11bd69535331666d12ef9fc027a | 13,077 | py | Python | lib/python2.7/site-packages/appionlib/apIMAGIC.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | null | null | null | lib/python2.7/site-packages/appionlib/apIMAGIC.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | null | null | null | lib/python2.7/site-packages/appionlib/apIMAGIC.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | 1 | 2019-09-05T20:58:37.000Z | 2019-09-05T20:58:37.000Z | import os
import re
import shutil
import sys
import stat
import time
import subprocess
import glob
from appionlib import apDisplay
from appionlib import apParam
from pyami import imagic2mrc
#======================
#======================
#======================
def executeImagicBatchFile(filename, showcmd=True, verbose=False, logfile=None):
"""
executes an IMAGIC batch file in a controlled fashion
"""
proc = subprocess.Popen("chmod 755 "+filename, shell=True)
proc.wait()
path = os.path.dirname(filename)
os.chdir(path)
waited = False
t0 = time.time()
try:
if logfile is not None:
logf = open(logfile, 'a')
process = subprocess.Popen(filename, shell=True, stdout=logf, stderr=logf)
elif verbose is False:
devnull = open('/dev/null', 'w')
process = subprocess.Popen(filename, shell=True, stdout=devnull, stderr=devnull)
else:
process = subprocess.Popen(filename, shell=True)
if verbose is True:
out, err = process.communicate()
if out is not None and err is not None:
print "IMAGIC error", out, err
else:
out, err = process.communicate()
### continuous check
waittime = 0.01
while process.poll() is None:
if waittime > 0.05:
waited = True
sys.stderr.write(".")
waittime *= 1.02
time.sleep(waittime)
except:
apDisplay.printWarning("could not run IMAGIC batchfile: "+filename)
raise
tdiff = time.time() - t0
if tdiff > 20:
apDisplay.printMsg("completed in "+apDisplay.timeString(tdiff))
elif waited is True:
print ""
#======================
#======================
#======================
#======================
def checkLogFileForErrors(logfile):
"""
checks for any errors arising in IMAGIC log file, provided as a full path & filename
"""
logf = open(logfile)
loglines = logf.readlines()
for line in loglines:
if re.search("ERROR in program", line):
apDisplay.printError("ERROR IN IMAGIC SUBROUTINE, please check the logfile: "+logfile)
elif re.search("ERROR: all pixels", line):
apDisplay.printError("ERROR IN IMAGIC SUBROUTINE, please check the logfile: "+logfile)
logf.close()
#======================
def mask2D(boxsz, mask, infile=False, maskfile="mask2Dimgfile", path=os.path.abspath('.'), keepfiles=False):
"""
creates a 2d circular mask
if infile is specified, mask is applied to stack & then mask is deleted
boxsz is the box size in pixels
mask is the size of the mask to apply as a fraction
"""
imagicroot = checkImagicExecutablePath()
batchfile = os.path.join(path, 'maskimg.batch')
logf = os.path.join(path, 'maskimg.log')
### generate a 2D mask
f=open(batchfile,"w")
f.write("#!/bin/csh -f\n")
f.write("setenv IMAGIC_BATCH 1\n")
f.write("%s/stand/testim.e <<EOF\n"%imagicroot)
f.write("%s\n"%maskfile)
f.write("%i,%i\n"%(boxsz,boxsz))
f.write("real\n")
f.write("disc\n")
f.write("%.3f\n"%mask)
f.write("EOF\n")
if not infile:
f.close()
apDisplay.printMsg("creating 2D mask")
executeImagicBatchFile(batchfile, logfile=logf)
# check proper execution
if not os.path.exists(maskfile+".hed"):
apDisplay.printError("mask generation did not execute properly")
checkLogFileForErrors(logf)
if keepfiles is not True:
os.remove(batchfile)
os.remove(logf)
return maskfile+".hed"
### if infile is specified, apply mask to images
fname,ext=os.path.splitext(infile)
if not os.path.exists(fname+".hed"):
apDisplay.printError("input file: '%s' is not in imagic format"%infile)
file_ma=fname+"_ma"
f.write("%s/stand/twoimag.e <<EOF\n"%imagicroot)
f.write("mul\n")
f.write("%s\n"%fname)
f.write("%s\n"%maskfile)
f.write("%s\n"%file_ma)
f.write("EOF\n")
f.close()
apDisplay.printMsg("applying 2D mask")
executeImagicBatchFile(batchfile, logfile=logf)
# check proper execution
if not os.path.exists(file_ma+".hed"):
apDisplay.printError("masking did not execute properly")
checkLogFileForErrors(logf)
if keepfiles is not True:
os.remove(batchfile)
os.remove(logf)
return file_ma
#======================
def rotateStack(infile, ang, path=os.path.abspath('.'), keepfiles=False):
"""
creates a rotated copy of a stack
"""
imagicroot = checkImagicExecutablePath()
imagicv = getImagicVersion(imagicroot)
batchfile = os.path.join(path, 'rotate.batch')
logf = os.path.join(path, 'rotate.log')
fname,ext=os.path.splitext(infile)
if not os.path.exists(fname+".hed"):
apDisplay.printError("input file: '%s' is not in imagic format"%infile)
file_rot=fname+"_rot"
### rotate batch
f=open(batchfile,'w')
f.write("#!/bin/csh -f\n")
f.write("setenv IMAGIC_BATCH 1\n")
f.write("%s/stand/rotate.e MODE ROTATE << EOF\n"%imagicroot)
f.write("NO\n")
f.write("%s\n"%fname)
f.write("%s\n"%file_rot)
if imagicv < 100312:
f.write("NO\n")
f.write("%.3f\n"%ang)
f.write("NO\n")
f.write("EOF\n")
f.close()
apDisplay.printMsg("rotating particles by %.3f degrees"%ang)
executeImagicBatchFile(batchfile, logfile=logf)
# check proper execution
if not os.path.exists(file_rot+".hed"):
apDisplay.printError("rotate.e did not execute properly")
checkLogFileForErrors(logf)
if keepfiles is not True:
os.remove(batchfile)
os.remove(logf)
return file_rot
#======================
def runMSA(infile, maskf="none.hed", iter=50, numeig=69, overcor=0.8, nproc=1, path=os.path.abspath('.'), keepfiles=False):
"""
performs multivariate statistical analysis
"""
imagicroot = checkImagicExecutablePath()
imagicv = getImagicVersion(imagicroot)
batchfile = os.path.join(path, 'msa.batch')
logf = os.path.join(path, 'msa.log')
fname,ext=os.path.splitext(infile)
if not os.path.exists(fname+".hed"):
apDisplay.printError("input file: '%s' is not in imagic format"%infile)
if maskf is not False:
mname,ext=os.path.splitext(maskf)
if not os.path.exists(mname+".hed"):
apDisplay.printError("input mask file: '%s' is not in imagic format"%infile)
outbase = os.path.join(path,"my_msa")
### msa batch
f=open(batchfile,'w')
f.write("#!/bin/csh -f\n")
f.write("setenv IMAGIC_BATCH 1\n")
if nproc > 1:
f.write("%s/openmpi/bin/mpirun -np %i"%(imagicroot,nproc)+\
" -x IMAGIC_BATCH %s/msa/msa.e_mpi <<EOF\n"%imagicroot)
f.write("YES\n")
f.write("%i\n"%nproc)
else:
f.write("%s/msa/msa.e << EOF\n"%imagicroot)
f.write("NO\n")
f.write("FRESH\n")
f.write("MODULATION\n")
f.write("%s\n"%fname)
if nproc > 1:
f.write("NO\n")
f.write("%s\n"%mname)
f.write("%s\n"%os.path.join(path,"eigenim"))
f.write("%s\n"%os.path.join(path,"pixcoos"))
f.write("%s\n"%os.path.join(path,"eigenpix"))
f.write("%i\n"%iter)
f.write("%i\n"%numeig)
f.write("%.2f\n"%overcor)
f.write("%s\n"%outbase)
f.write("EOF\n")
f.close()
apDisplay.printMsg("running IMAGIC MSA")
executeImagicBatchFile(batchfile, logfile=logf)
# check proper execution
if not os.path.exists(outbase+".plt"):
apDisplay.printError("msa.e did not execute properly")
checkLogFileForErrors(logf)
if keepfiles is not True:
os.remove(batchfile)
os.remove(logf)
return outbase
#======================
def classifyAndAvg(infile, numcls, path=os.path.abspath('.'), keepfiles=False):
"""
classify particles using eigenvectors
and create class averages
"""
imagicroot = checkImagicExecutablePath()
imagicv = getImagicVersion(imagicroot)
batchfile = os.path.join(path, 'classify.batch')
logf = os.path.join(path, 'classify.log')
fname,ext=os.path.splitext(infile)
if not os.path.exists(fname+".hed"):
apDisplay.printError("input file: '%s' is not in imagic format"%infile)
classlist=os.path.join(path,"classlist")
classavgs=os.path.join(path,"classes")
### classify batch
f=open(batchfile,'w')
f.write("#!/bin/csh -f\n")
f.write("setenv IMAGIC_BATCH 1\n")
f.write("%s/msa/classify.e <<EOF\n"%imagicroot)
f.write("IMAGES/VOLUMES\n")
f.write("%s\n"%fname)
f.write("0\n")
f.write("69\n")
f.write("YES\n")
f.write("%i\n"%numcls)
f.write("%s\n"%classlist)
f.write("EOF\n")
f.write("%s/msa/classum.e << EOF\n"%imagicroot)
f.write("%s\n"%fname)
f.write("%s\n"%classlist)
f.write("%s\n"%classavgs)
f.write("YES\n")
f.write("NONE\n")
f.write("0\n")
f.write("EOF\n")
f.close()
apDisplay.printMsg("running IMAGIC classification")
executeImagicBatchFile(batchfile, logfile=logf)
# check proper execution
if not os.path.exists(classavgs+".hed"):
apDisplay.printError("classification did not execute properly")
checkLogFileForErrors(logf)
if keepfiles is not True:
os.remove(batchfile)
os.remove(logf)
return classavgs
#======================
#======================
def prealignClassAverages(rundir, avgs):
'''function to iteratively align class averages to each other prior to
input into angular reconstitution (optional) '''
imagicroot = checkImagicExecutablePath()
batchfile = os.path.join(rundir, "prealignClassAverages.batch")
f = open(batchfile, 'w')
f.write("#!/bin/csh -f\n")
f.write("setenv IMAGIC_BATCH 1\n")
f.write("cd "+rundir+"/\n")
### this is the actual alignment
f.write(str(imagicroot)+"/align/alirefs.e <<EOF >> prealignClassAverages.log\n")
f.write("ALL\n")
f.write("CCF\n")
f.write(str(os.path.basename(avgs)[:-4])+"\n")
f.write("NO\n")
f.write("0.99\n")
f.write(str(os.path.basename(avgs)[:-4])+"_aligned\n")
f.write("-999.\n")
f.write("0.2\n")
f.write("-180,180\n")
f.write("NO\n")
f.write("5\n")
f.write("NO\n")
f.write("EOF\n")
f.close()
avgs = avgs[:-4]+"_aligned.img"
proc = subprocess.Popen('chmod 755 '+batchfile, shell=True)
proc.wait()
apParam.runCmd(batchfile, "IMAGIC")
return avgs
| 28.803965 | 123 | 0.679131 | import os
import re
import shutil
import sys
import stat
import time
import subprocess
import glob
from appionlib import apDisplay
from appionlib import apParam
from pyami import imagic2mrc
#======================
def checkImagicExecutablePath():
### check for IMAGIC installation
d = os.environ
if d.has_key('IMAGIC_ROOT'):
imagicroot = d['IMAGIC_ROOT']
else:
apDisplay.printError("$IMAGIC_ROOT directory is not specified, please specify this in your .cshrc / .bashrc")
return imagicroot
#======================
def getImagicVersion(imagicroot):
### get IMAGIC version from the "version_######S" file in
### the imagicroot directory, return as an int
versionstr=glob.glob(os.path.join(imagicroot,"version_*"))
if versionstr:
v = re.search('\d\d\d\d\d\d',versionstr[0]).group(0)
return int(v)
else:
apDisplay.printError("Could not get version number from imagic root directory")
#======================
def executeImagicBatchFile(filename, showcmd=True, verbose=False, logfile=None):
"""
executes an IMAGIC batch file in a controlled fashion
"""
proc = subprocess.Popen("chmod 755 "+filename, shell=True)
proc.wait()
path = os.path.dirname(filename)
os.chdir(path)
waited = False
t0 = time.time()
try:
if logfile is not None:
logf = open(logfile, 'a')
process = subprocess.Popen(filename, shell=True, stdout=logf, stderr=logf)
elif verbose is False:
devnull = open('/dev/null', 'w')
process = subprocess.Popen(filename, shell=True, stdout=devnull, stderr=devnull)
else:
process = subprocess.Popen(filename, shell=True)
if verbose is True:
out, err = process.communicate()
if out is not None and err is not None:
print "IMAGIC error", out, err
else:
out, err = process.communicate()
### continuous check
waittime = 0.01
while process.poll() is None:
if waittime > 0.05:
waited = True
sys.stderr.write(".")
waittime *= 1.02
time.sleep(waittime)
except:
apDisplay.printWarning("could not run IMAGIC batchfile: "+filename)
raise
tdiff = time.time() - t0
if tdiff > 20:
apDisplay.printMsg("completed in "+apDisplay.timeString(tdiff))
elif waited is True:
print ""
#======================
def copyFile(path, file, headers=False):
# used if conversion from EMAN does not write appropriate headers
imagicroot = checkImagicExecutablePath()
batchfile = os.path.join(path, 'copyImage.batch')
if file[-4:] == ".img" or file[-4:] == ".hed":
stripped_file = file[:-4]
else:
stripped_file = file
f = open(batchfile, 'w')
f.write("#!/bin/csh -f\n")
f.write("setenv IMAGIC_BATCH 1\n")
f.write("cd %s\n" % (path))
f.write(str(imagicroot)+"/stand/copyim.e <<EOF \n")
f.write(stripped_file+"\n")
f.write(stripped_file+"_copy\n")
f.write("EOF\n")
f.write(str(imagicroot)+"/stand/imdel.e <<EOF \n")
f.write(stripped_file+"\n")
f.write("EOF\n")
f.write(str(imagicroot)+"/stand/im_rename.e <<EOF \n")
f.write(stripped_file+"_copy\n")
f.write(stripped_file+"\n")
f.write("EOF\n")
if headers is True:
f.write(str(imagicroot)+"/stand/headers.e <<EOF \n")
f.write(stripped_file+"\n")
f.write("write\n")
f.write("wipe\n")
f.write("all\n")
f.write("EOF\n")
f.close()
executeImagicBatchFile(batchfile)
#======================
def takeoverHeaders(filename, numpart, boxsize, keepfiles=False):
### better workaround than copyFile ... still a workaround though
imagicroot = checkImagicExecutablePath()
basedir = os.path.split(filename)[0]
basename = os.path.split(filename)[1]
batchfile = os.path.join(basedir, "takeoverHeaders.batch")
if basename[-4:] == ".img" or basename[-4:] == ".hed":
stripped_file = basename[:-4]
else:
stripped_file = basename
f = open(batchfile, 'w')
f.write("#!/bin/csh -f\n")
f.write("setenv IMAGIC_BATCH 1\n")
f.write("cd %s\n" % (basedir))
f.write(str(imagicroot)+"/stand/testim.e <<EOF\n")
f.write("test,1,%d\n" % (numpart))
f.write("%d,%d\n" % (boxsize, boxsize))
f.write("REAL\n")
f.write("BLOBS\n")
f.write("EOF\n")
f.close()
proc = subprocess.Popen('chmod 755 '+batchfile, shell=True)
proc.wait()
apParam.runCmd(batchfile, "IMAGIC")
shutil.move(os.path.join(basedir, "test.hed"), os.path.join(basedir, stripped_file+".hed"))
os.remove(os.path.join(basedir, "test.img"))
if keepfiles is not True:
os.remove(batchfile)
#======================
def convertFilteringParameters(hpfilt, lpfilt, apix):
### CONVERT FILTERING PARAMETERS TO IMAGIC FORMAT BETWEEN 0-1
if lpfilt is not "" and apix is not "":
lpfilt_imagic = 2 * float(apix) / int(lpfilt)
else:
lpfilt_imagic = 1
if float(lpfilt_imagic) > 1 or float(lpfilt_imagic) < 0:
lpfilt_imagic = 1 # imagic cannot perform job when lowpass > 1
if hpfilt is not "" and apix is not "":
hpfilt_imagic = 2 * float(apix) / int(hpfilt)
else:
hpfilt_imagic = 0.01
if float(hpfilt_imagic) > 1 or float(hpfilt_imagic) < 0:
hpfilt_imagic = 0.01
return hpfilt_imagic, lpfilt_imagic
#======================
def checkLogFileForErrors(logfile):
"""
checks for any errors arising in IMAGIC log file, provided as a full path & filename
"""
logf = open(logfile)
loglines = logf.readlines()
for line in loglines:
if re.search("ERROR in program", line):
apDisplay.printError("ERROR IN IMAGIC SUBROUTINE, please check the logfile: "+logfile)
elif re.search("ERROR: all pixels", line):
apDisplay.printError("ERROR IN IMAGIC SUBROUTINE, please check the logfile: "+logfile)
logf.close()
#======================
def mask2D(boxsz, mask, infile=False, maskfile="mask2Dimgfile", path=os.path.abspath('.'), keepfiles=False):
"""
creates a 2d circular mask
if infile is specified, mask is applied to stack & then mask is deleted
boxsz is the box size in pixels
mask is the size of the mask to apply as a fraction
"""
imagicroot = checkImagicExecutablePath()
batchfile = os.path.join(path, 'maskimg.batch')
logf = os.path.join(path, 'maskimg.log')
### generate a 2D mask
f=open(batchfile,"w")
f.write("#!/bin/csh -f\n")
f.write("setenv IMAGIC_BATCH 1\n")
f.write("%s/stand/testim.e <<EOF\n"%imagicroot)
f.write("%s\n"%maskfile)
f.write("%i,%i\n"%(boxsz,boxsz))
f.write("real\n")
f.write("disc\n")
f.write("%.3f\n"%mask)
f.write("EOF\n")
if not infile:
f.close()
apDisplay.printMsg("creating 2D mask")
executeImagicBatchFile(batchfile, logfile=logf)
# check proper execution
if not os.path.exists(maskfile+".hed"):
apDisplay.printError("mask generation did not execute properly")
checkLogFileForErrors(logf)
if keepfiles is not True:
os.remove(batchfile)
os.remove(logf)
return maskfile+".hed"
### if infile is specified, apply mask to images
fname,ext=os.path.splitext(infile)
if not os.path.exists(fname+".hed"):
apDisplay.printError("input file: '%s' is not in imagic format"%infile)
file_ma=fname+"_ma"
f.write("%s/stand/twoimag.e <<EOF\n"%imagicroot)
f.write("mul\n")
f.write("%s\n"%fname)
f.write("%s\n"%maskfile)
f.write("%s\n"%file_ma)
f.write("EOF\n")
f.close()
apDisplay.printMsg("applying 2D mask")
executeImagicBatchFile(batchfile, logfile=logf)
# check proper execution
if not os.path.exists(file_ma+".hed"):
apDisplay.printError("masking did not execute properly")
checkLogFileForErrors(logf)
if keepfiles is not True:
os.remove(batchfile)
os.remove(logf)
return file_ma
#======================
def rotateStack(infile, ang, path=os.path.abspath('.'), keepfiles=False):
"""
creates a rotated copy of a stack
"""
imagicroot = checkImagicExecutablePath()
imagicv = getImagicVersion(imagicroot)
batchfile = os.path.join(path, 'rotate.batch')
logf = os.path.join(path, 'rotate.log')
fname,ext=os.path.splitext(infile)
if not os.path.exists(fname+".hed"):
apDisplay.printError("input file: '%s' is not in imagic format"%infile)
file_rot=fname+"_rot"
### rotate batch
f=open(batchfile,'w')
f.write("#!/bin/csh -f\n")
f.write("setenv IMAGIC_BATCH 1\n")
f.write("%s/stand/rotate.e MODE ROTATE << EOF\n"%imagicroot)
f.write("NO\n")
f.write("%s\n"%fname)
f.write("%s\n"%file_rot)
if imagicv < 100312:
f.write("NO\n")
f.write("%.3f\n"%ang)
f.write("NO\n")
f.write("EOF\n")
f.close()
apDisplay.printMsg("rotating particles by %.3f degrees"%ang)
executeImagicBatchFile(batchfile, logfile=logf)
# check proper execution
if not os.path.exists(file_rot+".hed"):
apDisplay.printError("rotate.e did not execute properly")
checkLogFileForErrors(logf)
if keepfiles is not True:
os.remove(batchfile)
os.remove(logf)
return file_rot
#======================
def runMSA(infile, maskf="none.hed", iter=50, numeig=69, overcor=0.8, nproc=1, path=os.path.abspath('.'), keepfiles=False):
"""
performs multivariate statistical analysis
"""
imagicroot = checkImagicExecutablePath()
imagicv = getImagicVersion(imagicroot)
batchfile = os.path.join(path, 'msa.batch')
logf = os.path.join(path, 'msa.log')
fname,ext=os.path.splitext(infile)
if not os.path.exists(fname+".hed"):
apDisplay.printError("input file: '%s' is not in imagic format"%infile)
if maskf is not False:
mname,ext=os.path.splitext(maskf)
if not os.path.exists(mname+".hed"):
apDisplay.printError("input mask file: '%s' is not in imagic format"%infile)
outbase = os.path.join(path,"my_msa")
### msa batch
f=open(batchfile,'w')
f.write("#!/bin/csh -f\n")
f.write("setenv IMAGIC_BATCH 1\n")
if nproc > 1:
f.write("%s/openmpi/bin/mpirun -np %i"%(imagicroot,nproc)+\
" -x IMAGIC_BATCH %s/msa/msa.e_mpi <<EOF\n"%imagicroot)
f.write("YES\n")
f.write("%i\n"%nproc)
else:
f.write("%s/msa/msa.e << EOF\n"%imagicroot)
f.write("NO\n")
f.write("FRESH\n")
f.write("MODULATION\n")
f.write("%s\n"%fname)
if nproc > 1:
f.write("NO\n")
f.write("%s\n"%mname)
f.write("%s\n"%os.path.join(path,"eigenim"))
f.write("%s\n"%os.path.join(path,"pixcoos"))
f.write("%s\n"%os.path.join(path,"eigenpix"))
f.write("%i\n"%iter)
f.write("%i\n"%numeig)
f.write("%.2f\n"%overcor)
f.write("%s\n"%outbase)
f.write("EOF\n")
f.close()
apDisplay.printMsg("running IMAGIC MSA")
executeImagicBatchFile(batchfile, logfile=logf)
# check proper execution
if not os.path.exists(outbase+".plt"):
apDisplay.printError("msa.e did not execute properly")
checkLogFileForErrors(logf)
if keepfiles is not True:
os.remove(batchfile)
os.remove(logf)
return outbase
#======================
def classifyAndAvg(infile, numcls, path=os.path.abspath('.'), keepfiles=False):
"""
classify particles using eigenvectors
and create class averages
"""
imagicroot = checkImagicExecutablePath()
imagicv = getImagicVersion(imagicroot)
batchfile = os.path.join(path, 'classify.batch')
logf = os.path.join(path, 'classify.log')
fname,ext=os.path.splitext(infile)
if not os.path.exists(fname+".hed"):
apDisplay.printError("input file: '%s' is not in imagic format"%infile)
classlist=os.path.join(path,"classlist")
classavgs=os.path.join(path,"classes")
### classify batch
f=open(batchfile,'w')
f.write("#!/bin/csh -f\n")
f.write("setenv IMAGIC_BATCH 1\n")
f.write("%s/msa/classify.e <<EOF\n"%imagicroot)
f.write("IMAGES/VOLUMES\n")
f.write("%s\n"%fname)
f.write("0\n")
f.write("69\n")
f.write("YES\n")
f.write("%i\n"%numcls)
f.write("%s\n"%classlist)
f.write("EOF\n")
f.write("%s/msa/classum.e << EOF\n"%imagicroot)
f.write("%s\n"%fname)
f.write("%s\n"%classlist)
f.write("%s\n"%classavgs)
f.write("YES\n")
f.write("NONE\n")
f.write("0\n")
f.write("EOF\n")
f.close()
apDisplay.printMsg("running IMAGIC classification")
executeImagicBatchFile(batchfile, logfile=logf)
# check proper execution
if not os.path.exists(classavgs+".hed"):
apDisplay.printError("classification did not execute properly")
checkLogFileForErrors(logf)
if keepfiles is not True:
os.remove(batchfile)
os.remove(logf)
return classavgs
#======================
def convertImagicStackToMrcStack(infile,outfile):
# 2D ImagicFile data block generated by EMAN is y-flipped from the original mrc.
# Therefore, we need flip it back.
imagic2mrc.imagic_to_mrc(infile,outfile,yflip=True)
#======================
def prealignClassAverages(rundir, avgs):
'''function to iteratively align class averages to each other prior to
input into angular reconstitution (optional) '''
imagicroot = checkImagicExecutablePath()
batchfile = os.path.join(rundir, "prealignClassAverages.batch")
f = open(batchfile, 'w')
f.write("#!/bin/csh -f\n")
f.write("setenv IMAGIC_BATCH 1\n")
f.write("cd "+rundir+"/\n")
### this is the actual alignment
f.write(str(imagicroot)+"/align/alirefs.e <<EOF >> prealignClassAverages.log\n")
f.write("ALL\n")
f.write("CCF\n")
f.write(str(os.path.basename(avgs)[:-4])+"\n")
f.write("NO\n")
f.write("0.99\n")
f.write(str(os.path.basename(avgs)[:-4])+"_aligned\n")
f.write("-999.\n")
f.write("0.2\n")
f.write("-180,180\n")
f.write("NO\n")
f.write("5\n")
f.write("NO\n")
f.write("EOF\n")
f.close()
avgs = avgs[:-4]+"_aligned.img"
proc = subprocess.Popen('chmod 755 '+batchfile, shell=True)
proc.wait()
apParam.runCmd(batchfile, "IMAGIC")
return avgs
| 3,441 | 0 | 132 |
84676529c79351f4d06d6bfc27b4b70952854e49 | 3,736 | py | Python | home/storage/migrations/0003_logical_delete.py | he0119/smart-home | bdd3a59a8c46c0fdc07ac3049bf589c7f95a2683 | [
"MIT"
] | null | null | null | home/storage/migrations/0003_logical_delete.py | he0119/smart-home | bdd3a59a8c46c0fdc07ac3049bf589c7f95a2683 | [
"MIT"
] | 223 | 2020-02-21T06:16:56.000Z | 2022-03-01T22:24:19.000Z | home/storage/migrations/0003_logical_delete.py | he0119/smart-home | bdd3a59a8c46c0fdc07ac3049bf589c7f95a2683 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.4 on 2020-12-21 11:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
def set_edited_by(apps, schema_editor):
"""将修改人设置为录入人"""
Item = apps.get_model("storage", "Item")
for item in Item.objects.all():
item.edited_by = item.created_by
item.save()
def reverse_set_edited_by(apps, schema_editor):
"""删除 storage_id 为空的物品"""
Item = apps.get_model("storage", "Item")
for item in Item.objects.filter(storage_id__isnull=True).all():
item.delete()
| 30.876033 | 83 | 0.546842 | # Generated by Django 3.1.4 on 2020-12-21 11:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
def set_edited_by(apps, schema_editor):
"""将修改人设置为录入人"""
Item = apps.get_model("storage", "Item")
for item in Item.objects.all():
item.edited_by = item.created_by
item.save()
def reverse_set_edited_by(apps, schema_editor):
"""删除 storage_id 为空的物品"""
Item = apps.get_model("storage", "Item")
for item in Item.objects.filter(storage_id__isnull=True).all():
item.delete()
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("storage", "0002_item_date_added"),
]
operations = [
migrations.RenameField(
model_name="item",
old_name="date_added",
new_name="created_at",
),
migrations.RenameField(
model_name="item",
old_name="editor",
new_name="created_by",
),
migrations.RenameField(
model_name="item",
old_name="expiration_date",
new_name="expired_at",
),
migrations.RenameField(
model_name="item",
old_name="update_date",
new_name="edited_at",
),
migrations.AlterField(
model_name="item",
name="edited_at",
field=models.DateTimeField(auto_now=True, verbose_name="修改时间"),
),
migrations.AlterField(
model_name="item",
name="expired_at",
field=models.DateTimeField(blank=True, null=True, verbose_name="有效日期"),
),
migrations.AlterField(
model_name="item",
name="storage",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="items",
to="storage.storage",
verbose_name="属于",
),
),
migrations.AlterField(
model_name="item",
name="created_by",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="created_items",
to=settings.AUTH_USER_MODEL,
verbose_name="录入人",
),
),
migrations.AddField(
model_name="item",
name="edited_by",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="edited_items",
to=settings.AUTH_USER_MODEL,
verbose_name="修改人",
),
),
migrations.AddField(
model_name="item",
name="deleted_at",
field=models.DateTimeField(blank=True, null=True, verbose_name="删除时间"),
),
migrations.AddField(
model_name="item",
name="is_deleted",
field=models.BooleanField(default=False, verbose_name="逻辑删除"),
),
# 临时取消自动设置为当前时间
# 等待执行完脚本后恢复
migrations.AlterField(
model_name="item",
name="edited_at",
field=models.DateTimeField(verbose_name="修改时间"),
),
migrations.RunPython(set_edited_by, reverse_code=reverse_set_edited_by),
migrations.AlterField(
model_name="item",
name="edited_at",
field=models.DateTimeField(auto_now=True, verbose_name="修改时间"),
),
]
| 0 | 3,246 | 23 |
067cdd1fe5c0e35cddffd2297f13aa8f28c0b978 | 903 | py | Python | chapter05/eight_queens_problem.py | persevere-in-coding-persist-in-learning/python2 | b207d0040232abae63638784b34a950b932bef77 | [
"Apache-2.0"
] | 3 | 2020-08-05T01:15:41.000Z | 2020-08-05T09:28:36.000Z | chapter05/eight_queens_problem.py | persevere-in-coding-persist-in-learning/python2 | b207d0040232abae63638784b34a950b932bef77 | [
"Apache-2.0"
] | null | null | null | chapter05/eight_queens_problem.py | persevere-in-coding-persist-in-learning/python2 | b207d0040232abae63638784b34a950b932bef77 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
"""
@desc:
国际象棋中的皇后比中国象棋里的大车还厉害,皇后能横向,纵向和斜向移动,在这三条线上的其他棋子都可以被吃掉。
所谓八皇后问题就是:将八位皇后放在一张8x8的棋盘上,使得每位皇后都无法吃掉别的皇后,(即任意两个皇后都不在同一条横线,
竖线和斜线上),问一共有多少种摆法。此问题是在1848年由棋手马克思·贝瑟尔提出的,后面陆续有包括高斯等大数学家们给出
自己的思考和解法,所以此问题不只是有年头了,简直比82年的拉菲还有年头,我们今天不妨尝尝这老酒。
@author: huijz
@version 0.1
@date 2020-08-31
@email huijz8117@gmail.com
"""
BOARD_SIZE = 8 # 棋盘大小 8*8=64
for answer in solve(BOARD_SIZE):
print answer
| 23.763158 | 60 | 0.654485 | # coding=utf-8
"""
@desc:
国际象棋中的皇后比中国象棋里的大车还厉害,皇后能横向,纵向和斜向移动,在这三条线上的其他棋子都可以被吃掉。
所谓八皇后问题就是:将八位皇后放在一张8x8的棋盘上,使得每位皇后都无法吃掉别的皇后,(即任意两个皇后都不在同一条横线,
竖线和斜线上),问一共有多少种摆法。此问题是在1848年由棋手马克思·贝瑟尔提出的,后面陆续有包括高斯等大数学家们给出
自己的思考和解法,所以此问题不只是有年头了,简直比82年的拉菲还有年头,我们今天不妨尝尝这老酒。
@author: huijz
@version 0.1
@date 2020-08-31
@email huijz8117@gmail.com
"""
BOARD_SIZE = 8 # 棋盘大小 8*8=64
def under_attack(col, queens):
left = right = col
for i, c in reversed(queens):
# 左右有冲突的位置的列号
left, right = left - 1, right + 1
if c in (left, col, right):
return True
return False
def solve(n):
if n == 0:
return [[]]
smaller_solutions = solve(n - 1)
return [solution + [(n, i + 1)]
for i in xrange(BOARD_SIZE)
for solution in smaller_solutions
if not under_attack(i + 1, solution)]
for answer in solve(BOARD_SIZE):
print answer
| 469 | 0 | 46 |
3cfe2ab1ae54a13c77234ff9e4a3a082363da06f | 361 | py | Python | atcoder/corp/ddcc2019_b.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | 1 | 2018-11-12T15:18:55.000Z | 2018-11-12T15:18:55.000Z | atcoder/corp/ddcc2019_b.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null | atcoder/corp/ddcc2019_b.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null |
N = int(input())
ans = 0
K = 100
for i in range(N):
for j in range(N):
a, b = i * 2 * K, (i + 1) * 2 * K
c, d = j * 2 * K, (j + 1) * 2 * K
if in_(a, c) and in_(a, d) and in_(b, c) and in_(b, d):
ans += 1
print(ans)
| 25.785714 | 89 | 0.393352 | def in_(x, y):
return -x + y >= -N * K and x + y >= N * K and x + y <= 3 * K * N and -x + y <= N * K
N = int(input())
ans = 0
K = 100
for i in range(N):
for j in range(N):
a, b = i * 2 * K, (i + 1) * 2 * K
c, d = j * 2 * K, (j + 1) * 2 * K
if in_(a, c) and in_(a, d) and in_(b, c) and in_(b, d):
ans += 1
print(ans)
| 83 | 0 | 22 |
d9a2f33161a7ccbe3aabf596b65a792342fd5b61 | 366 | py | Python | gee_gateway/__init__.py | jdilger/gee-gateway | 2e4d11f01e785359e6f213fe7d647032f1212b76 | [
"MIT"
] | 5 | 2016-12-08T15:40:27.000Z | 2021-09-10T16:01:36.000Z | gee_gateway/__init__.py | jdilger/gee-gateway | 2e4d11f01e785359e6f213fe7d647032f1212b76 | [
"MIT"
] | 1 | 2020-02-03T22:11:09.000Z | 2020-02-03T22:11:09.000Z | gee_gateway/__init__.py | jdilger/gee-gateway | 2e4d11f01e785359e6f213fe7d647032f1212b76 | [
"MIT"
] | 8 | 2017-05-12T20:49:38.000Z | 2020-05-14T19:22:21.000Z | from flask import Blueprint
gee_gateway = Blueprint('gee_gateway', __name__, template_folder='templates', static_folder='static', static_url_path='/static/gee_gateway')
from . import gee, web
| 40.666667 | 140 | 0.795082 | from flask import Blueprint
gee_gateway = Blueprint('gee_gateway', __name__, template_folder='templates', static_folder='static', static_url_path='/static/gee_gateway')
from . import gee, web
def gee_initialize(ee_account='', ee_key_path='', ee_user_token=''):
gee.utils.initialize(ee_account=ee_account, ee_key_path=ee_key_path, ee_user_token=ee_user_token)
| 149 | 0 | 23 |
d31522c6adbf649e52bc307feaa904c8a1650d3f | 2,445 | py | Python | main1.py | wotmd5731/pseudo_random_gen | f79810cd5ac79afe0a73dee73aa21bd8c01aeb9b | [
"MIT"
] | null | null | null | main1.py | wotmd5731/pseudo_random_gen | f79810cd5ac79afe0a73dee73aa21bd8c01aeb9b | [
"MIT"
] | null | null | null | main1.py | wotmd5731/pseudo_random_gen | f79810cd5ac79afe0a73dee73aa21bd8c01aeb9b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import random
import numpy as np
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import os
import csv
"""
NOT USED.
leaky_relu사용.
Pred 값이 - ~ +가 나옴
real target value is only 0 or 1 value
"""
cx = Variable(torch.zeros(3,1, 512))
hx = Variable(torch.zeros(3,1, 512))
f = open('data.csv','r',encoding='utf-8')
rdr = csv.reader(f)
data = []
for line in rdr:
data.append(line[-7:])
# print(line[-7:1])
f.close()
data = data[3:]
np_data = np.array(data, dtype=np.long)
torch_data = torch.from_numpy(np_data).type(torch.LongTensor)
main_num = torch_data[:,:6]
bonus_num = torch_data[:,6].unsqueeze(1)
#flip data seq
inv_idx = torch.arange(main_num.size(0)-1, -1, -1).long()
main_num = main_num.index_select(0, inv_idx)
bonus_num = bonus_num.index_select(0, inv_idx)
main_data = Variable(torch.zeros(main_num.size(0),46).scatter_(1,main_num,1)[:,1:].unsqueeze(0))
bonus_data = Variable(torch.zeros(bonus_num.size(0),46).scatter_(1,bonus_num,1)[:,1:].unsqueeze(0))
net = network()
#loss = nn.CrossEntropyLoss()
#crit = nn.KLDivLoss()
crit = nn.MSELoss()
#crit = nn.BCELoss(size_average = True)
opti = optim.Adam(net.parameters(),lr=0.0001)
for i in range(main_data.size(1)):
out,hx,cx = net(main_data[0,i,:].view(1,1,-1),hx,cx)
out = out.view(-1)
if i == main_data.size(1)-1 :
print(out.data.numpy())
break;
target = main_data[0,i+1,:]
loss = crit(out, target)
print(out.data.numpy())
print('i : ', i ,' loss :',loss.data[0])
net.zero_grad()
loss.backward(retain_graph=True)
nn.utils.clip_grad_norm(net.parameters(), 10) # Clip gradients (normalising by max value of gradient L2 norm)
opti.step()
#out ,hx,cx = net(main_data,hx,cx)
#last_out = out[:,-1,:]
#
#
#
#
#
#
#loss = nn.CrossEntropyLoss()
#
#target = Variable(torch.LongTensor(batch_size).random_(0, classes_no-1))
#
#err = loss(last_output, target)
#err.backward()
#
#
#
#
| 21.447368 | 114 | 0.633538 | # -*- coding: utf-8 -*-
import random
import numpy as np
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import os
import csv
"""
NOT USED.
leaky_relu사용.
Pred 값이 - ~ +가 나옴
real target value is only 0 or 1 value
"""
class network(nn.Module):
def __init__(self):
super().__init__()
self.lstm = nn.LSTM(45 ,512 , 3,batch_first = True)
self.fc = nn.Linear(512 ,45)
def forward(self, x,hx,cx):
out , (hx, cx) = self.lstm(x, (hx, cx))
x = F.leaky_relu(self.fc(out))
# x = F.sigmoid(self.fc(out))
return x ,hx,cx
cx = Variable(torch.zeros(3,1, 512))
hx = Variable(torch.zeros(3,1, 512))
f = open('data.csv','r',encoding='utf-8')
rdr = csv.reader(f)
data = []
for line in rdr:
data.append(line[-7:])
# print(line[-7:1])
f.close()
data = data[3:]
np_data = np.array(data, dtype=np.long)
torch_data = torch.from_numpy(np_data).type(torch.LongTensor)
main_num = torch_data[:,:6]
bonus_num = torch_data[:,6].unsqueeze(1)
#flip data seq
inv_idx = torch.arange(main_num.size(0)-1, -1, -1).long()
main_num = main_num.index_select(0, inv_idx)
bonus_num = bonus_num.index_select(0, inv_idx)
main_data = Variable(torch.zeros(main_num.size(0),46).scatter_(1,main_num,1)[:,1:].unsqueeze(0))
bonus_data = Variable(torch.zeros(bonus_num.size(0),46).scatter_(1,bonus_num,1)[:,1:].unsqueeze(0))
net = network()
#loss = nn.CrossEntropyLoss()
#crit = nn.KLDivLoss()
crit = nn.MSELoss()
#crit = nn.BCELoss(size_average = True)
opti = optim.Adam(net.parameters(),lr=0.0001)
for i in range(main_data.size(1)):
out,hx,cx = net(main_data[0,i,:].view(1,1,-1),hx,cx)
out = out.view(-1)
if i == main_data.size(1)-1 :
print(out.data.numpy())
break;
target = main_data[0,i+1,:]
loss = crit(out, target)
print(out.data.numpy())
print('i : ', i ,' loss :',loss.data[0])
net.zero_grad()
loss.backward(retain_graph=True)
nn.utils.clip_grad_norm(net.parameters(), 10) # Clip gradients (normalising by max value of gradient L2 norm)
opti.step()
#out ,hx,cx = net(main_data,hx,cx)
#last_out = out[:,-1,:]
#
#
#
#
#
#
#loss = nn.CrossEntropyLoss()
#
#target = Variable(torch.LongTensor(batch_size).random_(0, classes_no-1))
#
#err = loss(last_output, target)
#err.backward()
#
#
#
#
| 285 | 4 | 92 |
20b6df6c10f2fe6702e8ead6111932be15ff0bb4 | 2,403 | py | Python | Bot/cogs/staff/shutdown.py | HoggyTheWizard/GuildofGuilds | e0dd5188b41e542587149c38e41d85414ada6e5c | [
"MIT"
] | null | null | null | Bot/cogs/staff/shutdown.py | HoggyTheWizard/GuildofGuilds | e0dd5188b41e542587149c38e41d85414ada6e5c | [
"MIT"
] | null | null | null | Bot/cogs/staff/shutdown.py | HoggyTheWizard/GuildofGuilds | e0dd5188b41e542587149c38e41d85414ada6e5c | [
"MIT"
] | null | null | null | from discord.ext import commands
from Bot.utils.staff.staff_checks import *
from main import main_db
from pathlib import Path
from config import prefixes
users = main_db["users"]
blacklisted_files = ["shutdown", "start", "reload"]
| 33.84507 | 119 | 0.555972 | from discord.ext import commands
from Bot.utils.staff.staff_checks import *
from main import main_db
from pathlib import Path
from config import prefixes
users = main_db["users"]
blacklisted_files = ["shutdown", "start", "reload"]
class shutdown(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.group()
@is_dev()
async def shutdown(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send("Available Subcommands:\n"
f"{prefixes[0]}shutdown all - Shuts off all files\n"
f"{prefixes[0]}shutdown folder <folder_name> - Shuts off all files in a specified folder.\n"
f"{prefixes[0]}shutdown folders - Displays a list of the bot's folders.")
@shutdown.command()
@is_dev()
async def all(self, ctx):
count = 0
for ext in Path().glob("Bot/cogs/*/*.py"):
if ext.parts[2] == "Staff":
if ext.stem in blacklisted_files:
continue
try:
self.bot.unload_extension(".".join(part for part in ext.parts)[:-len(ext.suffix)])
count += 1
except:
print(f"Could not load extension {ext}")
await ctx.send(f"Successfully Shut Down: {count} files.")
@shutdown.command()
@is_dev()
async def folders(self, ctx):
string = "Folders:\n"
for folder in Path().glob(f"Bot/cogs/*"):
try:
folder_name = f"{list(folder.parts)[2]}\n"
string += folder_name
except:
print(f"{folder} was unable to be added to the string.")
continue
await ctx.send(string)
@shutdown.command()
@is_dev()
async def folder(self, ctx, folder):
count = 0
for extension in Path().glob(f"Bot/cogs/{folder}/*.py"):
if extension.parts[2] == "Staff":
if extension.stem in blacklisted_files:
continue
try:
self.bot.unload_extension(".".join(part for part in extension.parts)[:-len(extension.suffix)])
count += 1
except:
print(f"Could not shut down extension {extension}")
await ctx.send(f"Success! Shut down {count} cogs in {folder}")
def setup(bot):
bot.add_cog(shutdown(bot))
| 1,832 | 292 | 46 |
e7acf8eecac1fc1b17b7af722ce3c075dadbe3ca | 3,383 | py | Python | tests/lib/interface/test_predicate.py | Finistere/dependency_manager | 5a183d46ac5d760944dc507d1281813d02d2c75e | [
"MIT"
] | null | null | null | tests/lib/interface/test_predicate.py | Finistere/dependency_manager | 5a183d46ac5d760944dc507d1281813d02d2c75e | [
"MIT"
] | null | null | null | tests/lib/interface/test_predicate.py | Finistere/dependency_manager | 5a183d46ac5d760944dc507d1281813d02d2c75e | [
"MIT"
] | null | null | null | # pyright: reportUnusedClass=false
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Iterator, Optional, TypeVar
import pytest
from antidote import implements, inject, injectable, interface, world
from antidote.lib.injectable import register_injectable_provider
from antidote.lib.interface import NeutralWeight, predicate, Predicate, register_interface_provider
T = TypeVar("T")
@dataclass
@pytest.fixture(autouse=True)
@predicate
@predicate
| 23.493056 | 99 | 0.663021 | # pyright: reportUnusedClass=false
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Iterator, Optional, TypeVar
import pytest
from antidote import implements, inject, injectable, interface, world
from antidote.lib.injectable import register_injectable_provider
from antidote.lib.interface import NeutralWeight, predicate, Predicate, register_interface_provider
T = TypeVar("T")
def _(x: T) -> T:
return x
@dataclass
class Weight:
value: int
@classmethod
def of_neutral(cls, predicate: Optional[Predicate[Any]]) -> Weight:
return Weight(0)
def __lt__(self, other: Weight) -> bool:
return self.value < other.value
def __add__(self, other: Weight) -> Weight:
return Weight(self.value + other.value)
@pytest.fixture(autouse=True)
def setup_world() -> Iterator[None]:
with world.test.empty():
register_injectable_provider()
register_interface_provider()
yield
def test_neutral_weight() -> None:
neutral = NeutralWeight()
assert NeutralWeight() is neutral
assert (NeutralWeight() + NeutralWeight()) == neutral
assert not (NeutralWeight() < neutral)
@predicate
def only_if(condition: bool) -> bool:
return condition
@predicate
def weighted(value: Optional[int] = None) -> Optional[Weight]:
return Weight(value) if value is not None else None
def test_simple_predicate() -> None:
assert isinstance(only_if(True), Predicate)
assert only_if(False).weight() is None
assert only_if(True).weight() is NeutralWeight()
assert isinstance(weighted(0), Predicate)
assert weighted().weight() is None
assert weighted(12).weight() == Weight(12)
def test_predicate_implements() -> None:
@interface
class Base:
...
@_(implements(Base).when(only_if(True)))
class Yes(Base):
...
@_(implements(Base).when(only_if(False)))
class No(Base):
...
assert world.get(Base) is world.get(Yes)
def test_multiple_predicates() -> None:
@interface
class Base:
...
@_(implements(Base).when(only_if(True), weighted(12)))
class Yes(Base):
...
assert world.get(Base) is world.get(Yes)
with pytest.raises(RuntimeError):
@_(implements(Base).when(only_if(True), only_if(True)))
class Invalid(Base):
...
with pytest.raises(RuntimeError):
@_(implements(Base).when(weighted(12), weighted(12)))
class Invalid2(Base):
...
def test_predicate_generated_class() -> None:
p = only_if(True)
assert type(p).__name__ == "OnlyIfPredicate"
assert str(p) == "OnlyIfPredicate(_weight=NeutralWeight)"
p2 = weighted(12)
assert type(p2).__name__ == "WeightedPredicate"
assert str(p2) == "WeightedPredicate(_weight=Weight(value=12))"
def test_predicate_invalid_function() -> None:
with pytest.raises(TypeError):
predicate(object()) # type: ignore
def test_predicate_already_injected() -> None:
@injectable
class Dummy:
...
@predicate
@inject({"dummy": Dummy})
def if_only(condition: bool, dummy: Dummy) -> bool:
return condition
@interface
class Base:
...
@_(implements(Base).when(if_only(True))) # type: ignore
class Yes(Base):
...
assert world.get(Base) is world.get(Yes)
| 2,501 | 105 | 272 |
df82f21dd0f33072fb753615eac78da6261729b3 | 2,580 | py | Python | VirtualBox-5.0.0/src/VBox/ValidationKit/testmanager/core/webservergluecgi.py | egraba/vbox_openbsd | 6cb82f2eed1fa697d088cecc91722b55b19713c2 | [
"MIT"
] | 1 | 2015-04-30T14:18:45.000Z | 2015-04-30T14:18:45.000Z | VirtualBox-5.0.0/src/VBox/ValidationKit/testmanager/core/webservergluecgi.py | egraba/vbox_openbsd | 6cb82f2eed1fa697d088cecc91722b55b19713c2 | [
"MIT"
] | null | null | null | VirtualBox-5.0.0/src/VBox/ValidationKit/testmanager/core/webservergluecgi.py | egraba/vbox_openbsd | 6cb82f2eed1fa697d088cecc91722b55b19713c2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# $Id: webservergluecgi.py $
"""
Test Manager Core - Web Server Abstraction Base Class.
"""
__copyright__ = \
"""
Copyright (C) 2012-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 100880 $"
# Standard python imports.
import cgi;
import cgitb;
import os;
import sys;
# Validation Kit imports.
from testmanager.core.webservergluebase import WebServerGlueBase;
from testmanager import config;
class WebServerGlueCgi(WebServerGlueBase):
"""
CGI glue.
"""
| 30 | 78 | 0.711628 | # -*- coding: utf-8 -*-
# $Id: webservergluecgi.py $
"""
Test Manager Core - Web Server Abstraction Base Class.
"""
__copyright__ = \
"""
Copyright (C) 2012-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 100880 $"
# Standard python imports.
import cgi;
import cgitb;
import os;
import sys;
# Validation Kit imports.
from testmanager.core.webservergluebase import WebServerGlueBase;
from testmanager import config;
class WebServerGlueCgi(WebServerGlueBase):
"""
CGI glue.
"""
def __init__(self, sValidationKitDir, fHtmlOutput=True):
WebServerGlueBase.__init__(self, sValidationKitDir, fHtmlOutput);
if config.g_kfSrvGlueCgiTb is True:
cgitb.enable(format=('html' if fHtmlOutput else 'text'));
def getParameters(self):
return cgi.parse(keep_blank_values=True);
def getClientAddr(self):
return os.environ.get('REMOTE_ADDR');
def getMethod(self):
return os.environ.get('REQUEST_METHOD', 'POST');
def getLoginName(self):
return os.environ.get('REMOTE_USER', WebServerGlueBase.ksUnknownUser);
def getUrlScheme(self):
return 'https' if 'HTTPS' in os.environ else 'http';
def getUrlNetLoc(self):
return os.environ['HTTP_HOST'];
def getUrlPath(self):
return os.environ['REQUEST_URI'];
def getUserAgent(self):
return os.getenv('HTTP_USER_AGENT', '');
def getContentType(self):
return cgi.parse_header(os.environ.get('CONTENT_TYPE', 'text/html'));
def getContentLength(self):
return int(os.environ.get('CONTENT_LENGTH', 0));
def getBodyIoStream(self):
return sys.stdin;
| 837 | 0 | 323 |
72c98fea0717dde69cbc4a74a1ecfb4b2872563f | 23,840 | py | Python | tests/test_vjp.py | AmintorDusko/pennylane-lightning | 9554a95842de9d7759ce96bfa75857e0c9ca756b | [
"Apache-2.0"
] | null | null | null | tests/test_vjp.py | AmintorDusko/pennylane-lightning | 9554a95842de9d7759ce96bfa75857e0c9ca756b | [
"Apache-2.0"
] | null | null | null | tests/test_vjp.py | AmintorDusko/pennylane-lightning | 9554a95842de9d7759ce96bfa75857e0c9ca756b | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the ``vjp`` method of LightningQubit.
"""
from cmath import exp
import pytest
import pennylane as qml
from pennylane import numpy as np
try:
from pennylane_lightning.lightning_qubit_ops import (
VectorJacobianProductC64,
VectorJacobianProductC128,
)
except (ImportError, ModuleNotFoundError):
pytest.skip("No binary module found. Skipping.", allow_module_level=True)
class TestComputeVJP:
"""Tests for the numeric computation of VJPs"""
@pytest.fixture
@pytest.mark.skipif(
not hasattr(np, "complex256"), reason="Numpy only defines complex256 in Linux-like system"
)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_computation(self, tol, dev, C):
"""Test that the correct VJP is returned"""
dev._state = dev._asarray(dev._state, C)
dy = np.array([[1.0, 2.0], [3.0, 4.0]])
jac = np.array([[[1.0, 0.1, 0.2], [0.2, 0.6, 0.1]], [[0.4, -0.7, 1.2], [-0.5, -0.6, 0.7]]])
vjp = dev.compute_vjp(dy, jac)
expected = np.tensordot(dy, jac, axes=[[0, 1], [0, 1]])
assert vjp.shape == (3,)
assert np.allclose(vjp, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_computation_num(self, tol, dev, C):
"""Test that the correct VJP is returned"""
dev._state = dev._asarray(dev._state, C)
dy = np.array([[1.0, 2.0], [3.0, 4.0]])
jac = np.array([[[1.0, 0.1, 0.2], [0.2, 0.6, 0.1]], [[0.4, -0.7, 1.2], [-0.5, -0.6, 0.7]]])
vjp = dev.compute_vjp(dy, jac, num=4)
expected = np.tensordot(dy, jac, axes=[[0, 1], [0, 1]])
assert vjp.shape == (3,)
assert np.allclose(vjp, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_computation_num_error(self, dev, C):
"""Test that the correct VJP is returned"""
dev._state = dev._asarray(dev._state, C)
dy = np.array([[1.0, 2.0], [3.0, 4.0]])
jac = np.array([[[1.0, 0.1, 0.2], [0.2, 0.6, 0.1]], [[0.4, -0.7, 1.2], [-0.5, -0.6, 0.7]]])
with pytest.raises(ValueError, match="Invalid size for the gradient-output vector"):
dev.compute_vjp(dy, jac, num=3)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_jacobian_is_none(self, dev, C):
"""A None Jacobian returns a None VJP"""
dev._state = dev._asarray(dev._state, C)
dy = np.array([[1.0, 2.0], [3.0, 4.0]])
jac = None
vjp = dev.compute_vjp(dy, jac)
assert vjp is None
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_zero_dy(self, dev, C):
"""A zero dy vector will return a zero matrix"""
dev._state = dev._asarray(dev._state, C)
dy = np.zeros([2, 2])
jac = np.array([[[1.0, 0.1, 0.2], [0.2, 0.6, 0.1]], [[0.4, -0.7, 1.2], [-0.5, -0.6, 0.7]]])
vjp = dev.compute_vjp(dy, jac)
assert np.all(vjp == np.zeros([3]))
def test_array_dy(self, dev):
"""Test vjp_compute using Python array"""
dy = [1.0, 1.0, 1.0, 1.0]
jac = [dy, dy, dy, dy]
expected = [4.0, 4.0, 4.0, 4.0]
vjp = dev.compute_vjp(dy, jac)
assert np.all(vjp == expected)
def test_torch_tensor_dy(self, dev):
"""Test vjp_compute using the Torch interface"""
torch = pytest.importorskip("torch")
dtype = getattr(torch, "float32")
dy = torch.ones(4, dtype=dtype)
jac = torch.ones((4, 4), dtype=dtype)
expected = torch.tensor([4.0, 4.0, 4.0, 4.0], dtype=dtype)
vjp = dev.compute_vjp(dy, jac)
assert torch.all(vjp == expected)
def test_tf_tensor_dy(self, dev):
"""Test vjp_compute using the Tensorflow interface"""
tf = pytest.importorskip("tensorflow")
dy = tf.ones(4, dtype=tf.float32)
jac = tf.ones((4, 4), dtype=tf.float32)
expected = tf.constant([4.0, 4.0, 4.0, 4.0], dtype=tf.float32)
vjp = dev.compute_vjp(dy, jac)
assert tf.reduce_all(vjp == expected)
class TestVectorJacobianProduct:
"""Tests for the `vjp` function"""
@pytest.fixture
@pytest.mark.skipif(
not hasattr(np, "complex256"), reason="Numpy only defines complex256 in Linux-like system"
)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_use_device_state(self, tol, dev, C):
"""Tests that when using the device state, the correct answer is still returned."""
dev._state = dev._asarray(dev._state, C)
x, y, z = [0.5, 0.3, -0.7]
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=[0])
qml.Rot(x, y, z, wires=[0])
qml.RY(-0.2, wires=[0])
qml.expval(qml.PauliZ(0))
tape.trainable_params = {1, 2, 3}
dy = np.array([1.0])
fn1 = dev.vjp(tape, dy)
vjp1 = fn1(tape)
qml.execute([tape], dev, None)
fn2 = dev.vjp(tape, dy, use_device_state=True)
vjp2 = fn2(tape)
assert np.allclose(vjp1, vjp2, atol=tol, rtol=0)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_provide_starting_state(self, tol, dev, C):
"""Tests provides correct answer when provided starting state."""
dev._state = dev._asarray(dev._state, C)
x, y, z = [0.5, 0.3, -0.7]
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=[0])
qml.Rot(x, y, z, wires=[0])
qml.RY(-0.2, wires=[0])
qml.expval(qml.PauliZ(0))
tape.trainable_params = {1, 2, 3}
dy = np.array([1.0])
fn1 = dev.vjp(tape, dy)
vjp1 = fn1(tape)
qml.execute([tape], dev, None)
fn2 = dev.vjp(tape, dy, starting_state=dev._pre_rotated_state)
vjp2 = fn2(tape)
assert np.allclose(vjp1, vjp2, atol=tol, rtol=0)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_not_expval(self, dev, C):
"""Test if a QuantumFunctionError is raised for a tape with measurements that are not
expectation values"""
dev._state = dev._asarray(dev._state, C)
with qml.tape.QuantumTape() as tape:
qml.RX(0.1, wires=0)
qml.var(qml.PauliZ(0))
dy = np.array([1.0])
with pytest.raises(qml.QuantumFunctionError, match="Adjoint differentiation method does"):
dev.vjp(tape, dy)(tape)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_finite_shots_warns(self, C):
"""Tests warning raised when finite shots specified"""
dev = qml.device("lightning.qubit", wires=1, shots=1)
dev._state = dev._asarray(dev._state, C)
with qml.tape.QuantumTape() as tape:
qml.expval(qml.PauliZ(0))
dy = np.array([1.0])
with pytest.warns(
UserWarning, match="Requested adjoint differentiation to be computed with finite shots."
):
dev.vjp(tape, dy)(tape)
from pennylane_lightning import LightningQubit as lq
@pytest.mark.skipif(not lq._CPP_BINARY_AVAILABLE, reason="Lightning binary required")
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_unsupported_op(self, dev, C):
"""Test if a QuantumFunctionError is raised for an unsupported operation, i.e.,
multi-parameter operations that are not qml.Rot"""
dev._state = dev._asarray(dev._state, C)
with qml.tape.QuantumTape() as tape:
qml.CRot(0.1, 0.2, 0.3, wires=[0, 1])
qml.expval(qml.PauliZ(0))
dy = np.array([1.0])
with pytest.raises(
qml.QuantumFunctionError, match="The CRot operation is not supported using the"
):
dev.vjp(tape, dy)(tape)
with qml.tape.QuantumTape() as tape:
qml.SingleExcitation(0.1, wires=[0, 1])
qml.expval(qml.PauliZ(0))
with pytest.raises(
qml.QuantumFunctionError,
match="The SingleExcitation operation is not supported using the",
):
dev.vjp(tape, dy)(tape)
@pytest.mark.skipif(not lq._CPP_BINARY_AVAILABLE, reason="Lightning binary required")
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_proj_unsupported(self, dev, C):
"""Test if a QuantumFunctionError is raised for a Projector observable"""
dev._state = dev._asarray(dev._state, C)
with qml.tape.QuantumTape() as tape:
qml.CRX(0.1, wires=[0, 1])
qml.expval(qml.Projector([0, 1], wires=[0, 1]))
dy = np.array([1.0])
with pytest.raises(
qml.QuantumFunctionError, match="differentiation method does not support the Projector"
):
dev.vjp(tape, dy)(tape)
with qml.tape.QuantumTape() as tape:
qml.CRX(0.1, wires=[0, 1])
qml.expval(qml.Projector([0], wires=[0]) @ qml.PauliZ(0))
with pytest.raises(
qml.QuantumFunctionError, match="differentiation method does not support the Projector"
):
dev.vjp(tape, dy)(tape)
@pytest.mark.skipif(not lq._CPP_BINARY_AVAILABLE, reason="Lightning binary required")
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_no_trainable_parameters(self, dev, C):
"""A tape with no trainable parameters will simply return None"""
dev._state = dev._asarray(dev._state, C)
x = 0.4
with qml.tape.QuantumTape() as tape:
qml.RX(x, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape.trainable_params = {}
dy = np.array([1.0])
fn = dev.vjp(tape, dy)
vjp = fn(tape)
assert vjp is None
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_no_trainable_parameters_NEW(self, dev, C):
"""A tape with no trainable parameters will simply return None"""
dev._state = dev._asarray(dev._state, C)
x = 0.4
with qml.tape.QuantumTape() as tape:
qml.RX(x, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape.trainable_params = {}
dy = np.array([1.0])
fn = dev.vjp(tape, dy)
vjp = fn(tape)
assert vjp is None
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_no_trainable_parameters_(self, dev, C):
"""A tape with no trainable parameters will simply return None"""
dev._state = dev._asarray(dev._state, C)
x = 0.4
with qml.tape.QuantumTape() as tape:
qml.RX(x, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape.trainable_params = {}
dy = np.array([1.0])
fn = dev.vjp(tape, dy)
vjp = fn(tape)
assert vjp is None
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_zero_dy(self, dev, C):
"""A zero dy vector will return no tapes and a zero matrix"""
dev._state = dev._asarray(dev._state, C)
x = 0.4
y = 0.6
with qml.tape.QuantumTape() as tape:
qml.RX(x, wires=0)
qml.RX(y, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape.trainable_params = {0, 1}
dy = np.array([0.0])
fn = dev.vjp(tape, dy)
vjp = fn(tape)
assert np.all(vjp == np.zeros([len(tape.trainable_params)]))
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_single_expectation_value(self, tol, dev, C):
"""Tests correct output shape and evaluation for a tape
with a single expval output"""
dev._state = dev._asarray(dev._state, C)
x = 0.543
y = -0.654
with qml.tape.QuantumTape() as tape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
tape.trainable_params = {0, 1}
dy = np.array([1.0])
fn = dev.vjp(tape, dy)
vjp = fn(tape)
expected = np.array([-np.sin(y) * np.sin(x), np.cos(y) * np.cos(x)])
assert np.allclose(vjp, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_multiple_expectation_values(self, tol, dev, C):
"""Tests correct output shape and evaluation for a tape
with multiple expval outputs"""
dev._state = dev._asarray(dev._state, C)
x = 0.543
y = -0.654
with qml.tape.QuantumTape() as tape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.expval(qml.PauliX(1))
tape.trainable_params = {0, 1}
dy = np.array([1.0, 2.0])
fn = dev.vjp(tape, dy)
vjp = fn(tape)
expected = np.array([-np.sin(x), 2 * np.cos(y)])
assert np.allclose(vjp, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_prob_expectation_values(self, dev, C):
"""Tests correct output shape and evaluation for a tape
with prob and expval outputs"""
dev._state = dev._asarray(dev._state, C)
x = 0.543
y = -0.654
with qml.tape.QuantumTape() as tape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.probs(wires=[0, 1])
tape.trainable_params = {0, 1}
dy = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
with pytest.raises(qml.QuantumFunctionError, match="Adjoint differentiation method does"):
dev.vjp(tape, dy)(tape)
class TestBatchVectorJacobianProduct:
"""Tests for the batch_vjp function"""
@pytest.fixture
@pytest.mark.skipif(
not hasattr(np, "complex256"), reason="Numpy only defines complex256 in Linux-like system"
)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_one_tape_no_trainable_parameters(self, dev, C):
"""A tape with no trainable parameters will simply return None"""
dev._state = dev._asarray(dev._state, C)
with qml.tape.QuantumTape() as tape1:
qml.RX(0.4, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
with qml.tape.QuantumTape() as tape2:
qml.RX(0.4, wires=0)
qml.RX(0.6, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape1.trainable_params = {}
tape2.trainable_params = {0, 1}
tapes = [tape1, tape2]
dys = [np.array([1.0]), np.array([1.0])]
fn = dev.batch_vjp(tapes, dys)
vjps = fn(tapes)
assert vjps[0] is None
assert vjps[1] is not None
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_all_tapes_no_trainable_parameters(self, dev, C):
"""If all tapes have no trainable parameters all outputs will be None"""
dev._state = dev._asarray(dev._state, C)
with qml.tape.QuantumTape() as tape1:
qml.RX(0.4, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
with qml.tape.QuantumTape() as tape2:
qml.RX(0.4, wires=0)
qml.RX(0.6, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape1.trainable_params = set()
tape2.trainable_params = set()
tapes = [tape1, tape2]
dys = [np.array([1.0]), np.array([1.0])]
fn = dev.batch_vjp(tapes, dys)
vjps = fn(tapes)
assert vjps[0] is None
assert vjps[1] is None
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_zero_dy(self, dev, C):
"""A zero dy vector will return no tapes and a zero matrix"""
dev._state = dev._asarray(dev._state, C)
with qml.tape.QuantumTape() as tape1:
qml.RX(0.4, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
with qml.tape.QuantumTape() as tape2:
qml.RX(0.4, wires=0)
qml.RX(0.6, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape1.trainable_params = {0}
tape2.trainable_params = {0, 1}
tapes = [tape1, tape2]
dys = [np.array([0.0]), np.array([1.0])]
fn = dev.batch_vjp(tapes, dys)
vjps = fn(tapes)
assert np.allclose(vjps[0], 0)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_reduction_append(self, dev, C):
"""Test the 'append' reduction strategy"""
dev._state = dev._asarray(dev._state, C)
with qml.tape.QuantumTape() as tape1:
qml.RX(0.4, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
with qml.tape.QuantumTape() as tape2:
qml.RX(0.4, wires=0)
qml.RX(0.6, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape1.trainable_params = {0}
tape2.trainable_params = {0, 1}
tapes = [tape1, tape2]
dys = [np.array([1.0]), np.array([1.0])]
fn = dev.batch_vjp(tapes, dys, reduction="append")
vjps = fn(tapes)
assert len(vjps) == 2
assert all(isinstance(v, np.ndarray) for v in vjps)
assert all(len(v) == len(t.trainable_params) for t, v in zip(tapes, vjps))
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_reduction_append_callable(self, dev, C):
"""Test the 'append' reduction strategy"""
dev._state = dev._asarray(dev._state, C)
with qml.tape.QuantumTape() as tape1:
qml.RX(0.4, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
with qml.tape.QuantumTape() as tape2:
qml.RX(0.4, wires=0)
qml.RX(0.6, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape1.trainable_params = {0}
tape2.trainable_params = {0, 1}
tapes = [tape1, tape2]
dys = [np.array([1.0]), np.array([1.0])]
fn = dev.batch_vjp(tapes, dys, reduction="append")
vjps = fn(tapes)
assert len(vjps) == 2
assert all(isinstance(v, np.ndarray) for v in vjps)
assert all(len(v) == len(t.trainable_params) for t, v in zip(tapes, vjps))
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_reduction_extend(self, dev, C):
"""Test the 'extend' reduction strategy"""
dev._state = dev._asarray(dev._state, C)
with qml.tape.QuantumTape() as tape1:
qml.RX(0.4, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
with qml.tape.QuantumTape() as tape2:
qml.RX(0.4, wires=0)
qml.RX(0.6, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape1.trainable_params = {0}
tape2.trainable_params = {0, 1}
tapes = [tape1, tape2]
dys = [np.array([1.0]), np.array([1.0])]
fn = dev.batch_vjp(tapes, dys, reduction="extend")
vjps = fn(tapes)
assert len(vjps) == sum(len(t.trainable_params) for t in tapes)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_reduction_extend_callable(self, dev, C):
"""Test the 'extend' reduction strategy"""
dev._state = dev._asarray(dev._state, C)
with qml.tape.QuantumTape() as tape1:
qml.RX(0.4, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
with qml.tape.QuantumTape() as tape2:
qml.RX(0.4, wires=0)
qml.RX(0.6, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape1.trainable_params = {0}
tape2.trainable_params = {0, 1}
tapes = [tape1, tape2]
dys = [np.array([1.0]), np.array([1.0])]
fn = dev.batch_vjp(tapes, dys, reduction=list.extend)
vjps = fn(tapes)
assert len(vjps) == sum(len(t.trainable_params) for t in tapes)
| 32.928177 | 100 | 0.574664 | # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the ``vjp`` method of LightningQubit.
"""
from cmath import exp
import pytest
import pennylane as qml
from pennylane import numpy as np
try:
from pennylane_lightning.lightning_qubit_ops import (
VectorJacobianProductC64,
VectorJacobianProductC128,
)
except (ImportError, ModuleNotFoundError):
pytest.skip("No binary module found. Skipping.", allow_module_level=True)
class TestComputeVJP:
"""Tests for the numeric computation of VJPs"""
@pytest.fixture
def dev(self):
return qml.device("lightning.qubit", wires=2)
@pytest.mark.skipif(
not hasattr(np, "complex256"), reason="Numpy only defines complex256 in Linux-like system"
)
def test_unsupported_complex_type(self, dev):
dev._state = dev._asarray(dev._state, np.complex256)
dy = np.array([[1.0, 2.0], [3.0, 4.0]])
jac = np.array([[[1.0, 0.1, 0.2], [0.2, 0.6, 0.1]], [[0.4, -0.7, 1.2], [-0.5, -0.6, 0.7]]])
with pytest.raises(TypeError, match="Unsupported complex Type: complex256"):
dev.compute_vjp(dy, jac)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_computation(self, tol, dev, C):
"""Test that the correct VJP is returned"""
dev._state = dev._asarray(dev._state, C)
dy = np.array([[1.0, 2.0], [3.0, 4.0]])
jac = np.array([[[1.0, 0.1, 0.2], [0.2, 0.6, 0.1]], [[0.4, -0.7, 1.2], [-0.5, -0.6, 0.7]]])
vjp = dev.compute_vjp(dy, jac)
expected = np.tensordot(dy, jac, axes=[[0, 1], [0, 1]])
assert vjp.shape == (3,)
assert np.allclose(vjp, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_computation_num(self, tol, dev, C):
"""Test that the correct VJP is returned"""
dev._state = dev._asarray(dev._state, C)
dy = np.array([[1.0, 2.0], [3.0, 4.0]])
jac = np.array([[[1.0, 0.1, 0.2], [0.2, 0.6, 0.1]], [[0.4, -0.7, 1.2], [-0.5, -0.6, 0.7]]])
vjp = dev.compute_vjp(dy, jac, num=4)
expected = np.tensordot(dy, jac, axes=[[0, 1], [0, 1]])
assert vjp.shape == (3,)
assert np.allclose(vjp, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_computation_num_error(self, dev, C):
"""Test that the correct VJP is returned"""
dev._state = dev._asarray(dev._state, C)
dy = np.array([[1.0, 2.0], [3.0, 4.0]])
jac = np.array([[[1.0, 0.1, 0.2], [0.2, 0.6, 0.1]], [[0.4, -0.7, 1.2], [-0.5, -0.6, 0.7]]])
with pytest.raises(ValueError, match="Invalid size for the gradient-output vector"):
dev.compute_vjp(dy, jac, num=3)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_jacobian_is_none(self, dev, C):
"""A None Jacobian returns a None VJP"""
dev._state = dev._asarray(dev._state, C)
dy = np.array([[1.0, 2.0], [3.0, 4.0]])
jac = None
vjp = dev.compute_vjp(dy, jac)
assert vjp is None
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_zero_dy(self, dev, C):
"""A zero dy vector will return a zero matrix"""
dev._state = dev._asarray(dev._state, C)
dy = np.zeros([2, 2])
jac = np.array([[[1.0, 0.1, 0.2], [0.2, 0.6, 0.1]], [[0.4, -0.7, 1.2], [-0.5, -0.6, 0.7]]])
vjp = dev.compute_vjp(dy, jac)
assert np.all(vjp == np.zeros([3]))
def test_array_dy(self, dev):
"""Test vjp_compute using Python array"""
dy = [1.0, 1.0, 1.0, 1.0]
jac = [dy, dy, dy, dy]
expected = [4.0, 4.0, 4.0, 4.0]
vjp = dev.compute_vjp(dy, jac)
assert np.all(vjp == expected)
def test_torch_tensor_dy(self, dev):
"""Test vjp_compute using the Torch interface"""
torch = pytest.importorskip("torch")
dtype = getattr(torch, "float32")
dy = torch.ones(4, dtype=dtype)
jac = torch.ones((4, 4), dtype=dtype)
expected = torch.tensor([4.0, 4.0, 4.0, 4.0], dtype=dtype)
vjp = dev.compute_vjp(dy, jac)
assert torch.all(vjp == expected)
def test_tf_tensor_dy(self, dev):
"""Test vjp_compute using the Tensorflow interface"""
tf = pytest.importorskip("tensorflow")
dy = tf.ones(4, dtype=tf.float32)
jac = tf.ones((4, 4), dtype=tf.float32)
expected = tf.constant([4.0, 4.0, 4.0, 4.0], dtype=tf.float32)
vjp = dev.compute_vjp(dy, jac)
assert tf.reduce_all(vjp == expected)
class TestVectorJacobianProduct:
"""Tests for the `vjp` function"""
@pytest.fixture
def dev(self):
return qml.device("lightning.qubit", wires=2)
@pytest.mark.skipif(
not hasattr(np, "complex256"), reason="Numpy only defines complex256 in Linux-like system"
)
def test_unsupported_complex_type(self, dev):
dev._state = dev._asarray(dev._state, np.complex256)
x, y, z = [0.5, 0.3, -0.7]
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=[0])
qml.Rot(x, y, z, wires=[0])
qml.RY(-0.2, wires=[0])
qml.expval(qml.PauliZ(0))
tape.trainable_params = {1, 2, 3}
dy = np.array([1.0])
with pytest.raises(TypeError, match="Unsupported complex Type: complex256"):
dev.vjp(tape, dy)(tape)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_use_device_state(self, tol, dev, C):
"""Tests that when using the device state, the correct answer is still returned."""
dev._state = dev._asarray(dev._state, C)
x, y, z = [0.5, 0.3, -0.7]
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=[0])
qml.Rot(x, y, z, wires=[0])
qml.RY(-0.2, wires=[0])
qml.expval(qml.PauliZ(0))
tape.trainable_params = {1, 2, 3}
dy = np.array([1.0])
fn1 = dev.vjp(tape, dy)
vjp1 = fn1(tape)
qml.execute([tape], dev, None)
fn2 = dev.vjp(tape, dy, use_device_state=True)
vjp2 = fn2(tape)
assert np.allclose(vjp1, vjp2, atol=tol, rtol=0)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_provide_starting_state(self, tol, dev, C):
"""Tests provides correct answer when provided starting state."""
dev._state = dev._asarray(dev._state, C)
x, y, z = [0.5, 0.3, -0.7]
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=[0])
qml.Rot(x, y, z, wires=[0])
qml.RY(-0.2, wires=[0])
qml.expval(qml.PauliZ(0))
tape.trainable_params = {1, 2, 3}
dy = np.array([1.0])
fn1 = dev.vjp(tape, dy)
vjp1 = fn1(tape)
qml.execute([tape], dev, None)
fn2 = dev.vjp(tape, dy, starting_state=dev._pre_rotated_state)
vjp2 = fn2(tape)
assert np.allclose(vjp1, vjp2, atol=tol, rtol=0)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_not_expval(self, dev, C):
"""Test if a QuantumFunctionError is raised for a tape with measurements that are not
expectation values"""
dev._state = dev._asarray(dev._state, C)
with qml.tape.QuantumTape() as tape:
qml.RX(0.1, wires=0)
qml.var(qml.PauliZ(0))
dy = np.array([1.0])
with pytest.raises(qml.QuantumFunctionError, match="Adjoint differentiation method does"):
dev.vjp(tape, dy)(tape)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_finite_shots_warns(self, C):
"""Tests warning raised when finite shots specified"""
dev = qml.device("lightning.qubit", wires=1, shots=1)
dev._state = dev._asarray(dev._state, C)
with qml.tape.QuantumTape() as tape:
qml.expval(qml.PauliZ(0))
dy = np.array([1.0])
with pytest.warns(
UserWarning, match="Requested adjoint differentiation to be computed with finite shots."
):
dev.vjp(tape, dy)(tape)
from pennylane_lightning import LightningQubit as lq
@pytest.mark.skipif(not lq._CPP_BINARY_AVAILABLE, reason="Lightning binary required")
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_unsupported_op(self, dev, C):
"""Test if a QuantumFunctionError is raised for an unsupported operation, i.e.,
multi-parameter operations that are not qml.Rot"""
dev._state = dev._asarray(dev._state, C)
with qml.tape.QuantumTape() as tape:
qml.CRot(0.1, 0.2, 0.3, wires=[0, 1])
qml.expval(qml.PauliZ(0))
dy = np.array([1.0])
with pytest.raises(
qml.QuantumFunctionError, match="The CRot operation is not supported using the"
):
dev.vjp(tape, dy)(tape)
with qml.tape.QuantumTape() as tape:
qml.SingleExcitation(0.1, wires=[0, 1])
qml.expval(qml.PauliZ(0))
with pytest.raises(
qml.QuantumFunctionError,
match="The SingleExcitation operation is not supported using the",
):
dev.vjp(tape, dy)(tape)
@pytest.mark.skipif(not lq._CPP_BINARY_AVAILABLE, reason="Lightning binary required")
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_proj_unsupported(self, dev, C):
"""Test if a QuantumFunctionError is raised for a Projector observable"""
dev._state = dev._asarray(dev._state, C)
with qml.tape.QuantumTape() as tape:
qml.CRX(0.1, wires=[0, 1])
qml.expval(qml.Projector([0, 1], wires=[0, 1]))
dy = np.array([1.0])
with pytest.raises(
qml.QuantumFunctionError, match="differentiation method does not support the Projector"
):
dev.vjp(tape, dy)(tape)
with qml.tape.QuantumTape() as tape:
qml.CRX(0.1, wires=[0, 1])
qml.expval(qml.Projector([0], wires=[0]) @ qml.PauliZ(0))
with pytest.raises(
qml.QuantumFunctionError, match="differentiation method does not support the Projector"
):
dev.vjp(tape, dy)(tape)
@pytest.mark.skipif(not lq._CPP_BINARY_AVAILABLE, reason="Lightning binary required")
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_unsupported_hermitian_expectation(self, dev, C):
dev._state = dev._asarray(dev._state, C)
obs = np.array([[1, 0], [0, -1]], dtype=np.complex128, requires_grad=False)
with qml.tape.QuantumTape() as tape:
qml.RY(0.1, wires=(0,))
qml.expval(qml.Hermitian(obs, wires=(0,)))
dy = np.array([1.0])
with pytest.raises(
qml.QuantumFunctionError, match="Lightning adjoint differentiation method does not"
):
dev.vjp(tape, dy)(tape)
with qml.tape.QuantumTape() as tape:
qml.RY(0.1, wires=(0,))
qml.expval(qml.Hermitian(obs, wires=(0,)) @ qml.PauliZ(wires=1))
with pytest.raises(
qml.QuantumFunctionError, match="Lightning adjoint differentiation method does not"
):
dev.vjp(tape, dy)(tape)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_no_trainable_parameters(self, dev, C):
"""A tape with no trainable parameters will simply return None"""
dev._state = dev._asarray(dev._state, C)
x = 0.4
with qml.tape.QuantumTape() as tape:
qml.RX(x, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape.trainable_params = {}
dy = np.array([1.0])
fn = dev.vjp(tape, dy)
vjp = fn(tape)
assert vjp is None
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_no_trainable_parameters_NEW(self, dev, C):
"""A tape with no trainable parameters will simply return None"""
dev._state = dev._asarray(dev._state, C)
x = 0.4
with qml.tape.QuantumTape() as tape:
qml.RX(x, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape.trainable_params = {}
dy = np.array([1.0])
fn = dev.vjp(tape, dy)
vjp = fn(tape)
assert vjp is None
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_no_trainable_parameters_(self, dev, C):
"""A tape with no trainable parameters will simply return None"""
dev._state = dev._asarray(dev._state, C)
x = 0.4
with qml.tape.QuantumTape() as tape:
qml.RX(x, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape.trainable_params = {}
dy = np.array([1.0])
fn = dev.vjp(tape, dy)
vjp = fn(tape)
assert vjp is None
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_zero_dy(self, dev, C):
"""A zero dy vector will return no tapes and a zero matrix"""
dev._state = dev._asarray(dev._state, C)
x = 0.4
y = 0.6
with qml.tape.QuantumTape() as tape:
qml.RX(x, wires=0)
qml.RX(y, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape.trainable_params = {0, 1}
dy = np.array([0.0])
fn = dev.vjp(tape, dy)
vjp = fn(tape)
assert np.all(vjp == np.zeros([len(tape.trainable_params)]))
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_single_expectation_value(self, tol, dev, C):
"""Tests correct output shape and evaluation for a tape
with a single expval output"""
dev._state = dev._asarray(dev._state, C)
x = 0.543
y = -0.654
with qml.tape.QuantumTape() as tape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
tape.trainable_params = {0, 1}
dy = np.array([1.0])
fn = dev.vjp(tape, dy)
vjp = fn(tape)
expected = np.array([-np.sin(y) * np.sin(x), np.cos(y) * np.cos(x)])
assert np.allclose(vjp, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_multiple_expectation_values(self, tol, dev, C):
"""Tests correct output shape and evaluation for a tape
with multiple expval outputs"""
dev._state = dev._asarray(dev._state, C)
x = 0.543
y = -0.654
with qml.tape.QuantumTape() as tape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.expval(qml.PauliX(1))
tape.trainable_params = {0, 1}
dy = np.array([1.0, 2.0])
fn = dev.vjp(tape, dy)
vjp = fn(tape)
expected = np.array([-np.sin(x), 2 * np.cos(y)])
assert np.allclose(vjp, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_prob_expectation_values(self, dev, C):
"""Tests correct output shape and evaluation for a tape
with prob and expval outputs"""
dev._state = dev._asarray(dev._state, C)
x = 0.543
y = -0.654
with qml.tape.QuantumTape() as tape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.probs(wires=[0, 1])
tape.trainable_params = {0, 1}
dy = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
with pytest.raises(qml.QuantumFunctionError, match="Adjoint differentiation method does"):
dev.vjp(tape, dy)(tape)
class TestBatchVectorJacobianProduct:
"""Tests for the batch_vjp function"""
@pytest.fixture
def dev(self):
return qml.device("lightning.qubit", wires=2)
@pytest.mark.skipif(
not hasattr(np, "complex256"), reason="Numpy only defines complex256 in Linux-like system"
)
def test_unsupported_complex_type(self, dev):
dev._state = dev._asarray(dev._state, np.complex256)
with qml.tape.QuantumTape() as tape1:
qml.RX(0.4, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
with qml.tape.QuantumTape() as tape2:
qml.RX(0.4, wires=0)
qml.RX(0.6, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape1.trainable_params = {0}
tape2.trainable_params = {0, 1}
tapes = [tape1, tape2]
dys = [np.array([1.0]), np.array([1.0])]
with pytest.raises(TypeError, match="Unsupported complex Type: complex256"):
dev.batch_vjp(tapes, dys)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_one_tape_no_trainable_parameters(self, dev, C):
"""A tape with no trainable parameters will simply return None"""
dev._state = dev._asarray(dev._state, C)
with qml.tape.QuantumTape() as tape1:
qml.RX(0.4, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
with qml.tape.QuantumTape() as tape2:
qml.RX(0.4, wires=0)
qml.RX(0.6, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape1.trainable_params = {}
tape2.trainable_params = {0, 1}
tapes = [tape1, tape2]
dys = [np.array([1.0]), np.array([1.0])]
fn = dev.batch_vjp(tapes, dys)
vjps = fn(tapes)
assert vjps[0] is None
assert vjps[1] is not None
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_all_tapes_no_trainable_parameters(self, dev, C):
"""If all tapes have no trainable parameters all outputs will be None"""
dev._state = dev._asarray(dev._state, C)
with qml.tape.QuantumTape() as tape1:
qml.RX(0.4, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
with qml.tape.QuantumTape() as tape2:
qml.RX(0.4, wires=0)
qml.RX(0.6, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape1.trainable_params = set()
tape2.trainable_params = set()
tapes = [tape1, tape2]
dys = [np.array([1.0]), np.array([1.0])]
fn = dev.batch_vjp(tapes, dys)
vjps = fn(tapes)
assert vjps[0] is None
assert vjps[1] is None
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_zero_dy(self, dev, C):
"""A zero dy vector will return no tapes and a zero matrix"""
dev._state = dev._asarray(dev._state, C)
with qml.tape.QuantumTape() as tape1:
qml.RX(0.4, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
with qml.tape.QuantumTape() as tape2:
qml.RX(0.4, wires=0)
qml.RX(0.6, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape1.trainable_params = {0}
tape2.trainable_params = {0, 1}
tapes = [tape1, tape2]
dys = [np.array([0.0]), np.array([1.0])]
fn = dev.batch_vjp(tapes, dys)
vjps = fn(tapes)
assert np.allclose(vjps[0], 0)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_reduction_append(self, dev, C):
"""Test the 'append' reduction strategy"""
dev._state = dev._asarray(dev._state, C)
with qml.tape.QuantumTape() as tape1:
qml.RX(0.4, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
with qml.tape.QuantumTape() as tape2:
qml.RX(0.4, wires=0)
qml.RX(0.6, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape1.trainable_params = {0}
tape2.trainable_params = {0, 1}
tapes = [tape1, tape2]
dys = [np.array([1.0]), np.array([1.0])]
fn = dev.batch_vjp(tapes, dys, reduction="append")
vjps = fn(tapes)
assert len(vjps) == 2
assert all(isinstance(v, np.ndarray) for v in vjps)
assert all(len(v) == len(t.trainable_params) for t, v in zip(tapes, vjps))
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_reduction_append_callable(self, dev, C):
"""Test the 'append' reduction strategy"""
dev._state = dev._asarray(dev._state, C)
with qml.tape.QuantumTape() as tape1:
qml.RX(0.4, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
with qml.tape.QuantumTape() as tape2:
qml.RX(0.4, wires=0)
qml.RX(0.6, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape1.trainable_params = {0}
tape2.trainable_params = {0, 1}
tapes = [tape1, tape2]
dys = [np.array([1.0]), np.array([1.0])]
fn = dev.batch_vjp(tapes, dys, reduction="append")
vjps = fn(tapes)
assert len(vjps) == 2
assert all(isinstance(v, np.ndarray) for v in vjps)
assert all(len(v) == len(t.trainable_params) for t, v in zip(tapes, vjps))
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_reduction_extend(self, dev, C):
"""Test the 'extend' reduction strategy"""
dev._state = dev._asarray(dev._state, C)
with qml.tape.QuantumTape() as tape1:
qml.RX(0.4, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
with qml.tape.QuantumTape() as tape2:
qml.RX(0.4, wires=0)
qml.RX(0.6, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape1.trainable_params = {0}
tape2.trainable_params = {0, 1}
tapes = [tape1, tape2]
dys = [np.array([1.0]), np.array([1.0])]
fn = dev.batch_vjp(tapes, dys, reduction="extend")
vjps = fn(tapes)
assert len(vjps) == sum(len(t.trainable_params) for t in tapes)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_reduction_extend_callable(self, dev, C):
"""Test the 'extend' reduction strategy"""
dev._state = dev._asarray(dev._state, C)
with qml.tape.QuantumTape() as tape1:
qml.RX(0.4, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
with qml.tape.QuantumTape() as tape2:
qml.RX(0.4, wires=0)
qml.RX(0.6, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape1.trainable_params = {0}
tape2.trainable_params = {0, 1}
tapes = [tape1, tape2]
dys = [np.array([1.0]), np.array([1.0])]
fn = dev.batch_vjp(tapes, dys, reduction=list.extend)
vjps = fn(tapes)
assert len(vjps) == sum(len(t.trainable_params) for t in tapes)
| 2,556 | 0 | 182 |
0a101192ae0b1abd0f873dc0f51892babefc1c5e | 31,147 | py | Python | test/initial_sharding.py | Santiclause/vitess | 870177ca857e3bba2941a999174cc97d2c40c864 | [
"Apache-2.0"
] | null | null | null | test/initial_sharding.py | Santiclause/vitess | 870177ca857e3bba2941a999174cc97d2c40c864 | [
"Apache-2.0"
] | null | null | null | test/initial_sharding.py | Santiclause/vitess | 870177ca857e3bba2941a999174cc97d2c40c864 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This test simulates the first time a database has to be split.
- we start with a keyspace with a single shard and a single table
- we add and populate the sharding key
- we set the sharding key in the topology
- we clone into 2 instances
- we enable filtered replication
- we move all serving types
- we remove the source tablets
- we remove the original shard
"""
import logging
import unittest
from vtdb import keyrange_constants
import base_sharding
import environment
import tablet
import utils
# use_l2vtgate is set if we want to use l2vtgate processes.
# We'll set them up to have:
# l2vtgate1: covers the initial shard, and -80
# l2vtgate2: covers 80-
use_l2vtgate = False
# the l2vtgate processes, if applicable
l2vtgate1 = None
l2vtgate2 = None
# initial shard, covers everything
shard_master = tablet.Tablet()
shard_replica = tablet.Tablet()
shard_rdonly1 = tablet.Tablet()
# split shards
# range '' - 80
shard_0_master = tablet.Tablet()
shard_0_replica = tablet.Tablet()
shard_0_rdonly1 = tablet.Tablet()
# range 80 - ''
shard_1_master = tablet.Tablet()
shard_1_replica = tablet.Tablet()
shard_1_rdonly1 = tablet.Tablet()
all_tablets = [shard_master, shard_replica, shard_rdonly1,
shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1]
# create_schema will create the same schema on the keyspace
# _insert_startup_value inserts a value in the MySQL database before it
# is sharded
# _check_lots returns how many of the values we have, in percents.
# _check_lots_not_present makes sure no data is in the wrong shard
if __name__ == '__main__':
utils.main()
| 43.020718 | 81 | 0.650239 | #!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This test simulates the first time a database has to be split.
- we start with a keyspace with a single shard and a single table
- we add and populate the sharding key
- we set the sharding key in the topology
- we clone into 2 instances
- we enable filtered replication
- we move all serving types
- we remove the source tablets
- we remove the original shard
"""
import logging
import unittest
from vtdb import keyrange_constants
import base_sharding
import environment
import tablet
import utils
# use_l2vtgate is set if we want to use l2vtgate processes.
# We'll set them up to have:
# l2vtgate1: covers the initial shard, and -80
# l2vtgate2: covers 80-
use_l2vtgate = False
# the l2vtgate processes, if applicable
l2vtgate1 = None
l2vtgate2 = None
# initial shard, covers everything
shard_master = tablet.Tablet()
shard_replica = tablet.Tablet()
shard_rdonly1 = tablet.Tablet()
# split shards
# range '' - 80
shard_0_master = tablet.Tablet()
shard_0_replica = tablet.Tablet()
shard_0_rdonly1 = tablet.Tablet()
# range 80 - ''
shard_1_master = tablet.Tablet()
shard_1_replica = tablet.Tablet()
shard_1_rdonly1 = tablet.Tablet()
all_tablets = [shard_master, shard_replica, shard_rdonly1,
shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1]
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [t.init_mysql() for t in all_tablets]
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [t.teardown_mysql() for t in all_tablets]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
for t in all_tablets:
t.remove_tree()
class TestInitialSharding(unittest.TestCase, base_sharding.BaseShardingTest):
# create_schema will create the same schema on the keyspace
def _create_schema(self):
# Note that the primary key columns are not defined first on purpose to test
# that a reordered column list is correctly used everywhere in vtworker.
create_table_template = '''create table %s(
msg varchar(64),
id bigint not null,
parent_id bigint not null,
primary key (parent_id, id),
index by_msg (msg)
) Engine=InnoDB'''
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ('resharding1'),
'test_keyspace'],
auto_log=True)
def _add_sharding_key_to_schema(self):
if base_sharding.keyspace_id_type == keyrange_constants.KIT_BYTES:
t = 'varbinary(64)'
else:
t = 'bigint(20) unsigned'
sql = 'alter table %s add custom_ksid_col ' + t
utils.run_vtctl(['ApplySchema',
'-sql=' + sql % ('resharding1'),
'test_keyspace'],
auto_log=True)
def _mark_sharding_key_not_null(self):
if base_sharding.keyspace_id_type == keyrange_constants.KIT_BYTES:
t = 'varbinary(64)'
else:
t = 'bigint(20) unsigned'
sql = 'alter table %s modify custom_ksid_col ' + t + ' not null'
utils.run_vtctl(['ApplySchema',
'-sql=' + sql % ('resharding1'),
'test_keyspace'],
auto_log=True)
# _insert_startup_value inserts a value in the MySQL database before it
# is sharded
def _insert_startup_value(self, tablet_obj, table, mid, msg):
tablet_obj.mquery('vt_test_keyspace', [
'begin',
'insert into %s(parent_id, id, msg) values(%d, %d, "%s")' %
(table, base_sharding.fixed_parent_id, mid, msg),
'commit'
], write=True)
def _insert_startup_values(self):
self._insert_startup_value(shard_master, 'resharding1', 1, 'msg1')
self._insert_startup_value(shard_master, 'resharding1', 2, 'msg2')
self._insert_startup_value(shard_master, 'resharding1', 3, 'msg3')
def _backfill_keyspace_id(self, tablet_obj):
tablet_obj.mquery('vt_test_keyspace', [
'begin',
'update resharding1 set custom_ksid_col=0x1000000000000000 where id=1',
'update resharding1 set custom_ksid_col=0x9000000000000000 where id=2',
'update resharding1 set custom_ksid_col=0xD000000000000000 where id=3',
'commit'
], write=True)
def _check_startup_values(self):
# check first value is in the left shard
for t in [shard_0_master, shard_0_replica, shard_0_rdonly1]:
self._check_value(t, 'resharding1', 1, 'msg1', 0x1000000000000000)
for t in [shard_1_master, shard_1_replica, shard_1_rdonly1]:
self._check_value(t, 'resharding1', 1, 'msg1',
0x1000000000000000, should_be_here=False)
# check second value is in the right shard
for t in [shard_0_master, shard_0_replica, shard_0_rdonly1]:
self._check_value(t, 'resharding1', 2, 'msg2', 0x9000000000000000,
should_be_here=False)
for t in [shard_1_master, shard_1_replica, shard_1_rdonly1]:
self._check_value(t, 'resharding1', 2, 'msg2', 0x9000000000000000)
# check third value is in the right shard too
for t in [shard_0_master, shard_0_replica, shard_0_rdonly1]:
self._check_value(t, 'resharding1', 3, 'msg3', 0xD000000000000000,
should_be_here=False)
for t in [shard_1_master, shard_1_replica, shard_1_rdonly1]:
self._check_value(t, 'resharding1', 3, 'msg3', 0xD000000000000000)
def _insert_lots(self, count, base=0):
for i in xrange(count):
self._insert_value(shard_master, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i)
self._insert_value(shard_master, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i)
# _check_lots returns how many of the values we have, in percents.
def _check_lots(self, count, base=0):
found = 0
for i in xrange(count):
if self._is_value_present_and_correct(shard_1_replica, 'resharding1',
10000 + base + i, 'msg-range1-%d' %
i, 0xA000000000000000 + base + i):
found += 1
if self._is_value_present_and_correct(shard_1_replica, 'resharding1',
20000 + base + i, 'msg-range2-%d' %
i, 0xE000000000000000 + base + i):
found += 1
percent = found * 100 / count / 2
logging.debug('I have %d%% of the data', percent)
return percent
def _check_lots_timeout(self, count, threshold, timeout, base=0):
while True:
value = self._check_lots(count, base=base)
if value >= threshold:
return value
timeout = utils.wait_step('enough data went through', timeout)
# _check_lots_not_present makes sure no data is in the wrong shard
def _check_lots_not_present(self, count, base=0):
for i in xrange(count):
self._check_value(shard_0_replica, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i,
should_be_here=False)
self._check_value(shard_0_replica, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i,
should_be_here=False)
def test_resharding(self):
global l2vtgate1, l2vtgate2
# create the keyspace with just one shard
shard_master.init_tablet(
'replica',
keyspace='test_keyspace',
shard='0',
tablet_index=0)
shard_replica.init_tablet(
'replica',
keyspace='test_keyspace',
shard='0',
tablet_index=1)
shard_rdonly1.init_tablet(
'rdonly',
keyspace='test_keyspace',
shard='0',
tablet_index=2)
for t in [shard_master, shard_replica, shard_rdonly1]:
t.create_db('vt_test_keyspace')
# replica is not started, InitShardMaster should timeout
shard_master.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
shard_rdonly1.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
for t in [shard_master, shard_rdonly1]:
t.wait_for_vttablet_state('NOT_SERVING')
# reparent to make the tablets work - expect fail
# because replica tablet is not up
_, stderr = utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
shard_master.tablet_alias], auto_log=True, expect_fail=True)
self.assertIn('Tablet test_nj-0000062345 ResetReplication failed', stderr)
# start replica
shard_replica.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
shard_replica.wait_for_vttablet_state('NOT_SERVING')
# reparent to make the tablets work
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
shard_master.tablet_alias], auto_log=True)
utils.wait_for_tablet_type(shard_replica.tablet_alias, 'replica')
utils.wait_for_tablet_type(shard_rdonly1.tablet_alias, 'rdonly')
for t in [shard_master, shard_replica, shard_rdonly1]:
t.wait_for_vttablet_state('SERVING')
# create the tables and add startup values
self._create_schema()
self._insert_startup_values()
# reload schema on all tablets so we can query them
for t in [shard_master, shard_replica, shard_rdonly1]:
utils.run_vtctl(['ReloadSchema', t.tablet_alias], auto_log=True)
# We must start vtgate after tablets are up, or else wait until 1min refresh
# (that is the tablet_refresh_interval parameter for discovery gateway)
# we want cache_ttl at zero so we re-read the topology for every test query.
if use_l2vtgate:
l2vtgate1 = utils.VtGate()
l2vtgate1.start(extra_args=['--enable_forwarding'], tablets=
[shard_master, shard_replica, shard_rdonly1])
l2vtgate1.wait_for_endpoints('test_keyspace.0.master', 1)
l2vtgate1.wait_for_endpoints('test_keyspace.0.replica', 1)
l2vtgate1.wait_for_endpoints('test_keyspace.0.rdonly', 1)
_, l2vtgate1_addr = l2vtgate1.rpc_endpoint()
# Clear utils.vtgate, so it doesn't point to the previous l2vtgate1.
utils.vtgate = None
utils.VtGate().start(cache_ttl='0', l2vtgates=[l2vtgate1_addr,],
extra_args=['-disable_local_gateway'])
utils.vtgate.wait_for_endpoints('test_keyspace.0.master', 1,
var='L2VtgateConnections')
utils.vtgate.wait_for_endpoints('test_keyspace.0.replica', 1,
var='L2VtgateConnections')
utils.vtgate.wait_for_endpoints('test_keyspace.0.rdonly', 1,
var='L2VtgateConnections')
else:
utils.VtGate().start(cache_ttl='0', tablets=[
shard_master, shard_replica, shard_rdonly1])
utils.vtgate.wait_for_endpoints('test_keyspace.0.master', 1)
utils.vtgate.wait_for_endpoints('test_keyspace.0.replica', 1)
utils.vtgate.wait_for_endpoints('test_keyspace.0.rdonly', 1)
# check the Map Reduce API works correctly, should use ExecuteShards,
# as we're not sharded yet.
# we have 3 values in the database, asking for 4 splits will get us
# a single query.
sql = 'select id, msg from resharding1'
s = utils.vtgate.split_query(sql, 'test_keyspace', 4)
self.assertEqual(len(s), 1)
self.assertEqual(s[0]['shard_part']['shards'][0], '0')
# change the schema, backfill keyspace_id, and change schema again
self._add_sharding_key_to_schema()
self._backfill_keyspace_id(shard_master)
self._mark_sharding_key_not_null()
# now we can be a sharded keyspace (and propagate to SrvKeyspace)
utils.run_vtctl(['SetKeyspaceShardingInfo', 'test_keyspace',
'custom_ksid_col', base_sharding.keyspace_id_type])
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'],
auto_log=True)
# run a health check on source replica so it responds to discovery
utils.run_vtctl(['RunHealthCheck', shard_replica.tablet_alias])
# create the split shards
shard_0_master.init_tablet(
'replica',
keyspace='test_keyspace',
shard='-80',
tablet_index=0)
shard_0_replica.init_tablet(
'replica',
keyspace='test_keyspace',
shard='-80',
tablet_index=1)
shard_0_rdonly1.init_tablet(
'rdonly',
keyspace='test_keyspace',
shard='-80',
tablet_index=2)
shard_1_master.init_tablet(
'replica',
keyspace='test_keyspace',
shard='80-',
tablet_index=0)
shard_1_replica.init_tablet(
'replica',
keyspace='test_keyspace',
shard='80-',
tablet_index=1)
shard_1_rdonly1.init_tablet(
'rdonly',
keyspace='test_keyspace',
shard='80-',
tablet_index=2)
for t in [shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1]:
t.create_db('vt_test_keyspace')
t.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
for t in [shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1]:
t.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/-80',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/80-',
shard_1_master.tablet_alias], auto_log=True)
for t in [shard_0_replica, shard_1_replica]:
utils.wait_for_tablet_type(t.tablet_alias, 'replica')
for t in [shard_0_rdonly1, shard_1_rdonly1]:
utils.wait_for_tablet_type(t.tablet_alias, 'rdonly')
sharded_tablets = [shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1]
for t in sharded_tablets:
t.wait_for_vttablet_state('SERVING')
# must restart vtgate after tablets are up, or else wait until 1min refresh
# we want cache_ttl at zero so we re-read the topology for every test query.
utils.vtgate.kill()
if use_l2vtgate:
l2vtgate1.kill()
l2vtgate1 = utils.VtGate()
l2vtgate1.start(extra_args=['--enable_forwarding',
'-tablet_filters',
'test_keyspace|0,test_keyspace|-80'],
tablets=[shard_master, shard_replica, shard_rdonly1,
shard_0_master, shard_0_replica,
shard_0_rdonly1])
l2vtgate1.wait_for_endpoints('test_keyspace.0.master', 1)
l2vtgate1.wait_for_endpoints('test_keyspace.0.replica', 1)
l2vtgate1.wait_for_endpoints('test_keyspace.0.rdonly', 1)
l2vtgate1.wait_for_endpoints('test_keyspace.-80.master', 1)
l2vtgate1.wait_for_endpoints('test_keyspace.-80.replica', 1)
l2vtgate1.wait_for_endpoints('test_keyspace.-80.rdonly', 1)
l2vtgate1.verify_no_endpoint('test_keyspace.80-.master')
l2vtgate1.verify_no_endpoint('test_keyspace.80-.replica')
l2vtgate1.verify_no_endpoint('test_keyspace.80-.rdonly')
# FIXME(alainjobart) we clear tablet_types_to_wait, as this
# l2vtgate2 doesn't serve the current test_keyspace shard, which
# is test_keyspace.0. This is not ideal, we should re-work
# which keyspace/shard a l2vtgate can wait for, as the ones
# filtered by tablet_filters.
l2vtgate2 = utils.VtGate()
l2vtgate2.start(extra_args=['--enable_forwarding',
'-tablet_filters',
'test_keyspace|80-'], tablets=
[shard_1_master, shard_1_replica, shard_1_rdonly1],
tablet_types_to_wait='')
l2vtgate2.wait_for_endpoints('test_keyspace.80-.master', 1)
l2vtgate2.wait_for_endpoints('test_keyspace.80-.replica', 1)
l2vtgate2.wait_for_endpoints('test_keyspace.80-.rdonly', 1)
l2vtgate2.verify_no_endpoint('test_keyspace.0.master')
l2vtgate2.verify_no_endpoint('test_keyspace.0.replica')
l2vtgate2.verify_no_endpoint('test_keyspace.0.rdonly')
l2vtgate2.verify_no_endpoint('test_keyspace.-80.master')
l2vtgate2.verify_no_endpoint('test_keyspace.-80.replica')
l2vtgate2.verify_no_endpoint('test_keyspace.-80.rdonly')
_, l2vtgate1_addr = l2vtgate1.rpc_endpoint()
_, l2vtgate2_addr = l2vtgate2.rpc_endpoint()
utils.vtgate = None
utils.VtGate().start(cache_ttl='0', l2vtgates=[l2vtgate1_addr,
l2vtgate2_addr,],
extra_args=['-disable_local_gateway'])
var = 'L2VtgateConnections'
else:
utils.vtgate = None
utils.VtGate().start(cache_ttl='0', tablets=[
shard_master, shard_replica, shard_rdonly1,
shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1])
var = None
# Wait for the endpoints, either local or remote.
utils.vtgate.wait_for_endpoints('test_keyspace.0.master', 1, var=var)
utils.vtgate.wait_for_endpoints('test_keyspace.0.replica', 1, var=var)
utils.vtgate.wait_for_endpoints('test_keyspace.0.rdonly', 1, var=var)
utils.vtgate.wait_for_endpoints('test_keyspace.-80.master', 1, var=var)
utils.vtgate.wait_for_endpoints('test_keyspace.-80.replica', 1, var=var)
utils.vtgate.wait_for_endpoints('test_keyspace.-80.rdonly', 1, var=var)
utils.vtgate.wait_for_endpoints('test_keyspace.80-.master', 1, var=var)
utils.vtgate.wait_for_endpoints('test_keyspace.80-.replica', 1, var=var)
utils.vtgate.wait_for_endpoints('test_keyspace.80-.rdonly', 1, var=var)
# check the Map Reduce API works correctly, should use ExecuteKeyRanges now,
# as we are sharded (with just one shard).
# again, we have 3 values in the database, asking for 4 splits will get us
# a single query.
sql = 'select id, msg from resharding1'
s = utils.vtgate.split_query(sql, 'test_keyspace', 4)
self.assertEqual(len(s), 1)
self.assertEqual(s[0]['key_range_part']['keyspace'], 'test_keyspace')
# There must be one empty KeyRange which represents the full keyspace.
self.assertEqual(len(s[0]['key_range_part']['key_ranges']), 1)
self.assertEqual(s[0]['key_range_part']['key_ranges'][0], {})
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -\n'
'Partitions(replica): -\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# we need to create the schema, and the worker will do data copying
for keyspace_shard in ('test_keyspace/-80', 'test_keyspace/80-'):
utils.run_vtctl(['CopySchemaShard',
'--exclude_tables', 'unrelated',
shard_rdonly1.tablet_alias,
keyspace_shard],
auto_log=True)
utils.run_vtctl(['RunHealthCheck', shard_rdonly1.tablet_alias])
# Run vtworker as daemon for the following SplitClone commands.
worker_proc, worker_port, worker_rpc_port = utils.run_vtworker_bg(
['--cell', 'test_nj', '--command_display_interval', '10ms',
'--use_v3_resharding_mode=false'],
auto_log=True)
# Initial clone (online).
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--offline=false',
'--exclude_tables', 'unrelated',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/0'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
3, 0, 0, 0)
# Reset vtworker such that we can run the next command.
workerclient_proc = utils.run_vtworker_client_bg(['Reset'], worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Modify the destination shard. SplitClone will revert the changes.
# Delete row 1 (provokes an insert).
shard_0_master.mquery('vt_test_keyspace',
'delete from resharding1 where id=1', write=True)
# Delete row 2 (provokes an insert).
shard_1_master.mquery('vt_test_keyspace',
'delete from resharding1 where id=2', write=True)
# Update row 3 (provokes an update).
shard_1_master.mquery('vt_test_keyspace',
"update resharding1 set msg='msg-not-3' where id=3",
write=True)
# Insert row 4 (provokes a delete).
self._insert_value(shard_1_master, 'resharding1', 4, 'msg4',
0xD000000000000000)
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--exclude_tables', 'unrelated',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/0'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
2, 1, 1, 0)
self.verify_reconciliation_counters(worker_port, 'Offline', 'resharding1',
0, 0, 0, 3)
# Terminate worker daemon because it is no longer needed.
utils.kill_sub_process(worker_proc, soft=True)
# check the startup values are in the right place
self._check_startup_values()
# check the schema too
utils.run_vtctl(['ValidateSchemaKeyspace', 'test_keyspace'], auto_log=True)
# check the binlog players are running
logging.debug('Waiting for binlog players to start on new masters...')
self.check_destination_master(shard_0_master, ['test_keyspace/0'])
self.check_destination_master(shard_1_master, ['test_keyspace/0'])
# check that binlog server exported the stats vars
self.check_binlog_server_vars(shard_replica, horizontal=True)
# testing filtered replication: insert a bunch of data on shard 1,
# check we get most of it after a few seconds, wait for binlog server
# timeout, check we get all of it.
logging.debug('Inserting lots of data on source shard')
self._insert_lots(1000)
logging.debug('Checking 80 percent of data is sent quickly')
v = self._check_lots_timeout(1000, 80, 5)
if v != 100:
logging.debug('Checking all data goes through eventually')
self._check_lots_timeout(1000, 100, 20)
logging.debug('Checking no data was sent the wrong way')
self._check_lots_not_present(1000)
self.check_binlog_player_vars(shard_0_master, ['test_keyspace/0'],
seconds_behind_master_max=30)
self.check_binlog_player_vars(shard_1_master, ['test_keyspace/0'],
seconds_behind_master_max=30)
self.check_binlog_server_vars(shard_replica, horizontal=True,
min_statements=1000, min_transactions=1000)
# use vtworker to compare the data
logging.debug('Running vtworker SplitDiff for -80')
for t in [shard_0_rdonly1, shard_1_rdonly1]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitDiff',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/-80'],
auto_log=True)
logging.debug('Running vtworker SplitDiff for 80-')
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitDiff',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/80-'],
auto_log=True)
utils.pause('Good time to test vtworker for diffs')
# get status for the destination master tablet, make sure we have it all
self.check_running_binlog_player(shard_0_master, 2000, 2000)
self.check_running_binlog_player(shard_1_master, 6000, 2000)
# check we can't migrate the master just yet
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'master'],
expect_fail=True)
# now serve rdonly from the split shards
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'rdonly'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# make sure rdonly tablets are back to serving before hitting vtgate.
for t in [shard_0_rdonly1, shard_1_rdonly1]:
t.wait_for_vttablet_state('SERVING')
if use_l2vtgate:
l2vtgate1.wait_for_endpoints('test_keyspace.-80.rdonly', 1)
l2vtgate2.wait_for_endpoints('test_keyspace.80-.rdonly', 1)
else:
utils.vtgate.wait_for_endpoints('test_keyspace.-80.rdonly', 1)
utils.vtgate.wait_for_endpoints('test_keyspace.80-.rdonly', 1)
# check the Map Reduce API works correctly, should use ExecuteKeyRanges
# on both destination shards now.
# we ask for 2 splits to only have one per shard
sql = 'select id, msg from resharding1'
timeout = 10.0
while True:
try:
s = utils.vtgate.split_query(sql, 'test_keyspace', 2)
break
except Exception: # pylint: disable=broad-except
timeout = utils.wait_step(
'vtgate executes split_query properly', timeout)
self.assertEqual(len(s), 2)
self.assertEqual(s[0]['key_range_part']['keyspace'], 'test_keyspace')
self.assertEqual(s[1]['key_range_part']['keyspace'], 'test_keyspace')
self.assertEqual(len(s[0]['key_range_part']['key_ranges']), 1)
self.assertEqual(len(s[1]['key_range_part']['key_ranges']), 1)
# then serve replica from the split shards
source_tablet = shard_replica
destination_tablets = [shard_0_replica, shard_1_replica]
utils.run_vtctl(
['MigrateServedTypes', 'test_keyspace/0', 'replica'], auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# move replica back and forth
utils.run_vtctl(
['MigrateServedTypes', '-reverse', 'test_keyspace/0', 'replica'],
auto_log=True)
# After a backwards migration, queryservice should be enabled on
# source and disabled on destinations
utils.check_tablet_query_service(self, source_tablet, True, False)
utils.check_tablet_query_services(self, destination_tablets, False, True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'replica'],
auto_log=True)
# After a forwards migration, queryservice should be disabled on
# source and enabled on destinations
utils.check_tablet_query_service(self, source_tablet, False, True)
utils.check_tablet_query_services(self, destination_tablets, True, False)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# then serve master from the split shards
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'master'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# check the binlog players are gone now
self.check_no_binlog_player(shard_0_master)
self.check_no_binlog_player(shard_1_master)
# make sure we can't delete a shard with tablets
utils.run_vtctl(['DeleteShard', 'test_keyspace/0'], expect_fail=True)
# remove the original tablets in the original shard
tablet.kill_tablets([shard_master, shard_replica, shard_rdonly1])
for t in [shard_replica, shard_rdonly1]:
utils.run_vtctl(['DeleteTablet', t.tablet_alias], auto_log=True)
utils.run_vtctl(['DeleteTablet', '-allow_master',
shard_master.tablet_alias], auto_log=True)
# rebuild the serving graph, all mentions of the old shards shoud be gone
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# delete the original shard
utils.run_vtctl(['DeleteShard', 'test_keyspace/0'], auto_log=True)
# kill everything else
tablet.kill_tablets([shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1])
if __name__ == '__main__':
utils.main()
| 28,452 | 56 | 365 |
714062181c6a8c08dde0d08f41e9a77ca3c05897 | 229 | py | Python | tests/settings_pg.py | maykinmedia/djadyen | 8bde7172c72d68975d4a77c7ef6bed73412619dc | [
"BSD-3-Clause"
] | 3 | 2018-10-19T06:57:50.000Z | 2020-11-12T11:20:37.000Z | tests/settings_pg.py | maykinmedia/djadyen | 8bde7172c72d68975d4a77c7ef6bed73412619dc | [
"BSD-3-Clause"
] | 16 | 2017-02-14T12:37:58.000Z | 2019-04-25T07:55:42.000Z | tests/settings_pg.py | maykinmedia/djadyen | 8bde7172c72d68975d4a77c7ef6bed73412619dc | [
"BSD-3-Clause"
] | 2 | 2018-05-16T10:08:34.000Z | 2019-09-29T23:31:04.000Z | from .settings import *
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'djadyen',
# 'USERNAME': 'djadyen',
# 'PASSWORD': 'djadyen',
# }
# }
| 20.818182 | 61 | 0.50655 | from .settings import *
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'djadyen',
# 'USERNAME': 'djadyen',
# 'PASSWORD': 'djadyen',
# }
# }
| 0 | 0 | 0 |
c88544e218ee014200864ebbb2fdc9913c90a8f4 | 3,068 | py | Python | super_sac/adv_estimator.py | QData/super_sac | f44f0ab88bc462da0cee354b406822db7084f430 | [
"MIT"
] | 1 | 2021-12-10T20:25:44.000Z | 2021-12-10T20:25:44.000Z | super_sac/adv_estimator.py | QData/super_sac | f44f0ab88bc462da0cee354b406822db7084f430 | [
"MIT"
] | null | null | null | super_sac/adv_estimator.py | QData/super_sac | f44f0ab88bc462da0cee354b406822db7084f430 | [
"MIT"
] | 1 | 2022-01-14T19:26:14.000Z | 2022-01-14T19:26:14.000Z | import random
import torch
from torch import nn
import torch.nn.functional as F
| 34.088889 | 86 | 0.563233 | import random
import torch
from torch import nn
import torch.nn.functional as F
class AdvantageEstimator(nn.Module):
def __init__(
self,
encoder,
actors,
critics,
popart=False,
discrete_method="indirect",
continuous_method="mean",
discrete=False,
):
super().__init__()
assert continuous_method in ["mean", "max"]
assert discrete_method in ["indirect", "direct"]
self.encoder = encoder
self.actors = actors
self.critics = critics
self.popart = popart
self.cont_method = continuous_method
self.discrete = discrete
self.discrete_method = discrete_method
def pop(self, ensemble_idx, *args):
q = self.critics[ensemble_idx](*args)
if self.popart[ensemble_idx]:
return self.popart[ensemble_idx](q)
else:
return q
def discrete_direct_forward(self, obs, action):
# use dueling arch adv
raise NotImplementedError
def discrete_indirect_forward(self, obs, action, ensemble_idx):
state_rep = self.encoder(obs)
with torch.no_grad():
# V(s) = E_{a ~ \pi(s)} [Q(s, a)]
probs = torch.stack(
[actor(state_rep).probs for actor in self.actors], dim=0
).mean(0)
min_q = self.pop(ensemble_idx, state_rep)
value = (probs * min_q).sum(-1, keepdim=True)
# Q(s, a)
q_preds = self.pop(ensemble_idx, state_rep).gather(-1, action.long())
# A(s, a) = Q(s, a) - V(s)
adv = q_preds - value
return adv
def continuous_forward(self, obs, action, ensemble_idx, n=4):
with torch.no_grad():
# get an action distribution from the policy
state_rep = self.encoder(obs)
policy_actions = [
self.actors[ensemble_idx](state_rep).sample() for _ in range(n)
]
# get the q value for each of the n actions
q_a_preds = torch.stack(
[self.pop(ensemble_idx, state_rep, act) for act in policy_actions],
dim=0,
)
if self.cont_method == "mean":
# V(s) = E_{a ~ \pi(s)} [Q(s, a)]
value = q_a_preds.mean(0)
elif self.cont_method == "max":
# Optimisitc value estimate: V(s) = max_{a1, a2, a3, ..., aN}(Q(s, a))
value = q_a_preds.max(0).values
q_preds = self.pop(ensemble_idx, state_rep, action)
# A(s, a) = Q(s, a) - V(s)
adv = q_preds - value
return adv
def forward(self, obs, action, ensemble_idx):
# TODO
if self.discrete:
if self.discrete_method == "indirect":
return self.discrete_indirect_forward(obs, action, ensemble_idx)
elif self.discrete_method == "direct":
return self.discrete_direct_forward(obs, action, ensemble_idx)
else:
return self.continuous_forward(obs, action, ensemble_idx)
| 2,787 | 15 | 184 |
96a4c857fca98f8eee279e5b54706931a2a8bdf5 | 478 | py | Python | AiSD_03/Zadanie_9.py | DLQuake/Algorytmy_i_struktury_danych | 210d0b4e868e5cc9d6aa730a2297d8074e4d52a1 | [
"MIT"
] | null | null | null | AiSD_03/Zadanie_9.py | DLQuake/Algorytmy_i_struktury_danych | 210d0b4e868e5cc9d6aa730a2297d8074e4d52a1 | [
"MIT"
] | null | null | null | AiSD_03/Zadanie_9.py | DLQuake/Algorytmy_i_struktury_danych | 210d0b4e868e5cc9d6aa730a2297d8074e4d52a1 | [
"MIT"
] | null | null | null | # Zaimplementować funkcję remove_duplicates(txt: str) -> str, która zwróci wartość parametru txt pozbawioną sąsiadujących duplikujących się znaków. Przykład: XXYZZZ -> XYZ
print(remove_duplicates("XXYZZ")) | 31.866667 | 171 | 0.682008 | # Zaimplementować funkcję remove_duplicates(txt: str) -> str, która zwróci wartość parametru txt pozbawioną sąsiadujących duplikujących się znaków. Przykład: XXYZZZ -> XYZ
def remove_duplicates(txt: str):
if len(txt)==0 or len(txt)==1:
return txt
elif txt[0]==txt[1]:
pom1=txt[1:]
pom2=remove_duplicates(pom1)
return pom2
else:
pom1=txt[1:]
pom2=remove_duplicates(pom1)
return txt[0]+pom2
print(remove_duplicates("XXYZZ")) | 249 | 0 | 23 |
c88837628aad1517d6dea3ae4d7df98cf1f290e6 | 4,476 | py | Python | bomba.py | ncos/checkq | 79506a9eafc9c5dacc84ee3dc37f799d734f4701 | [
"MIT"
] | 3 | 2016-01-25T02:19:28.000Z | 2019-09-01T22:03:31.000Z | bomba.py | ncos/checkq | 79506a9eafc9c5dacc84ee3dc37f799d734f4701 | [
"MIT"
] | null | null | null | bomba.py | ncos/checkq | 79506a9eafc9c5dacc84ee3dc37f799d734f4701 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import cv2
import numpy as np
p = Process('/root/Desktop/b.jpg')
img_disp = ImageDisplay('result')
img_disp.spin(p)
| 33.155556 | 138 | 0.59294 | #!/usr/bin/python
import cv2
import numpy as np
class ImageDisplay:
def __init__(self, windowname):
self.windowname = windowname
cv2.namedWindow(windowname, cv2.WINDOW_NORMAL)
def show_blend(self, images, windowname):
if len(images) == 0:
print "WARNING: No images provided!"
return
height0, width0 = images[0].shape[:2]
for i, image in enumerate(images):
if (len(image.shape) != 3) or (image.shape[2] != 3):
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
height, width = image.shape[:2]
if (height != height0):
image = cv2.resize(image, (int(float(width*height0)/float(height)), height0), fx=0, fy=0, interpolation = cv2.INTER_CUBIC)
if (i == 0):
both = image
continue
both = np.hstack((both, image))
cv2.imshow(windowname, both)
def show_separate(self, images, windowname):
if len(images) == 0:
print "WARNING: No images provided!"
return
for i, image in enumerate(images):
name = windowname
if i != 0:
name = windowname + '_' + str(i)
cv2.namedWindow(name, cv2.WINDOW_NORMAL)
cv2.imshow(name, image)
def show(self, images, windowname = 'result', blend = True):
if blend:
self.show_blend(images, windowname)
else:
self.show_separate(images, windowname)
def show_wait(self, images, blend = True):
self.show(images, blend)
cv2.waitKey(0)
cv2.destroyAllWindows()
def spin(self, imager):
imager.init_interface(self.windowname)
while (1):
images = imager.get()
self.show_blend(images, self.windowname)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
class Process:
def __init__(self, path):
self.windowname = ''
self.orig_bgr = cv2.imread(path, cv2.IMREAD_COLOR)
self.orig_bgr = cv2.resize(self.orig_bgr, None, fx=1.0, fy=1.0, interpolation = cv2.INTER_CUBIC)
self.orig_hsv = cv2.cvtColor(self.orig_bgr, cv2.COLOR_BGR2HSV)
self.orig_gry = cv2.cvtColor(self.orig_bgr, cv2.COLOR_BGR2GRAY)
def nothing(self):
return
def init_interface(self, windowname):
self.windowname = windowname
cv2.createTrackbar('l1', self.windowname, 10, 255, self.nothing)
cv2.createTrackbar('l2', self.windowname, 5, 255, self.nothing)
cv2.createTrackbar('l3', self.windowname, 0, 255, self.nothing)
cv2.createTrackbar('r1', self.windowname, 255, 255, self.nothing)
cv2.createTrackbar('r2', self.windowname, 255, 255, self.nothing)
cv2.createTrackbar('r3', self.windowname, 255, 255, self.nothing)
def hist(self, image):
from matplotlib import pyplot as plt
plt.hist(image.ravel(),256,[0,256])
#plt.draw()
def get(self):
r1 = cv2.getTrackbarPos('r1', self.windowname)
r2 = cv2.getTrackbarPos('r2', self.windowname)
r3 = cv2.getTrackbarPos('r3', self.windowname)
l1 = cv2.getTrackbarPos('l1', self.windowname)
l2 = cv2.getTrackbarPos('l2', self.windowname)
l3 = cv2.getTrackbarPos('l3', self.windowname)
# Remove light
h, s, v = cv2.split(self.orig_hsv)
kernel = np.ones((9*2+1, 9*2+1), np.uint8)
v_dilated = cv2.dilate(v, kernel, iterations = 1)
v_out = cv2.subtract(v_dilated, v)
#ret, v_t = cv2.threshold(v, l3, r3, cv2.THRESH_TRUNC)
# Binarization
#ret, ots = cv2.threshold(v_out, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
#et, ots2 = cv2.threshold(v, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
#self.hist(v_out)
#for i in xrange(l1):
# ret, mask = cv2.threshold(v_out, l2, 255, cv2.THRESH_TOZERO)
# v_out = cv2.bitwise_and(v_out, mask)
# v_out = cv2.add(v_out, (v_out/l3))
v_out = cv2.bitwise_not(v_out)
th3 = cv2.adaptiveThreshold(v, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,l1*2+1,l2)
th4 = cv2.adaptiveThreshold(v_out, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,l1*2+1,l2)
return [v_out, th3, th4]
p = Process('/root/Desktop/b.jpg')
img_disp = ImageDisplay('result')
img_disp.spin(p)
| 4,000 | -9 | 345 |
13f06832e4295172b8c261e2389cec10b0362703 | 3,512 | py | Python | moksha.hub/moksha/hub/__init__.py | hroncok/moksha | 08b3f668a9a3ca45fe49174eaace7b89bb995a92 | [
"Apache-2.0"
] | 11 | 2015-01-17T04:27:26.000Z | 2018-06-07T21:56:25.000Z | moksha.hub/moksha/hub/__init__.py | hroncok/moksha | 08b3f668a9a3ca45fe49174eaace7b89bb995a92 | [
"Apache-2.0"
] | 41 | 2015-05-08T18:54:46.000Z | 2019-10-25T05:08:31.000Z | moksha.hub/moksha/hub/__init__.py | hroncok/moksha | 08b3f668a9a3ca45fe49174eaace7b89bb995a92 | [
"Apache-2.0"
] | 16 | 2015-06-26T21:52:42.000Z | 2021-05-10T17:27:32.000Z | # This file is part of Moksha.
# Copyright (C) 2008-2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
import signal
import sys
import os
try:
from twisted.internet.error import ReactorNotRunning
except ImportError: # Twisted 8.2.0 on RHEL5
from moksha.common.lib.helpers import appconfig
from moksha.common.lib.helpers import get_moksha_config_path
log = logging.getLogger('moksha.hub')
NO_CONFIG_MESSAGE = """
Cannot find Moksha configuration! Place a development.ini or production.ini
in /etc/moksha or in the current directory.
"""
from moksha.hub.hub import CentralMokshaHub
def main(options=None, consumers=None, producers=None, framework=True):
""" The main MokshaHub method """
# If we're running as a framework, then we're strictly calling other
# people's code. So, as the outermost piece of software in the stack, we're
# responsible for setting up logging.
# If we're not running as a framework, but as a library, then someone else
# is calling us. Therefore, we'll let them set up the logging themselves.
if framework:
setup_logger('-v' in sys.argv or '--verbose' in sys.argv)
config = {}
if not options:
if sys.argv[-1].endswith('.ini'):
config_path = os.path.abspath(sys.argv[-1])
else:
config_path = get_moksha_config_path()
if not config_path:
print(NO_CONFIG_MESSAGE)
return
cfg = appconfig('config:' + config_path)
config.update(cfg)
else:
config.update(options)
hub = CentralMokshaHub(config, consumers=consumers, producers=producers)
global _hub
_hub = hub
signal.signal(signal.SIGHUP, handle_signal)
signal.signal(signal.SIGINT, handle_signal)
log.info("Running the MokshaHub reactor")
from moksha.hub.reactor import reactor
threadcount = config.get('moksha.threadpool_size', None)
if not threadcount:
N = int(config.get('moksha.workers_per_consumer', 1))
threadcount = 1 + hub.num_producers + hub.num_consumers * N
threadcount = int(threadcount)
log.info("Suggesting threadpool size at %i" % threadcount)
reactor.suggestThreadPoolSize(threadcount)
reactor.run(installSignalHandlers=False)
log.info("MokshaHub reactor stopped")
| 31.63964 | 80 | 0.693622 | # This file is part of Moksha.
# Copyright (C) 2008-2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
import signal
import sys
import os
try:
from twisted.internet.error import ReactorNotRunning
except ImportError: # Twisted 8.2.0 on RHEL5
class ReactorNotRunning(object):
pass
from moksha.common.lib.helpers import appconfig
from moksha.common.lib.helpers import get_moksha_config_path
log = logging.getLogger('moksha.hub')
NO_CONFIG_MESSAGE = """
Cannot find Moksha configuration! Place a development.ini or production.ini
in /etc/moksha or in the current directory.
"""
from moksha.hub.hub import CentralMokshaHub
def setup_logger(verbose):
logging.basicConfig()
root = logging.getLogger()
handler = root.handlers[0]
level = verbose and logging.DEBUG or logging.INFO
root.setLevel(level)
format = logging.Formatter(
'[%(name)12s] %(levelname)s %(asctime)s %(message)s')
handler.setFormatter(format)
def main(options=None, consumers=None, producers=None, framework=True):
""" The main MokshaHub method """
# If we're running as a framework, then we're strictly calling other
# people's code. So, as the outermost piece of software in the stack, we're
# responsible for setting up logging.
# If we're not running as a framework, but as a library, then someone else
# is calling us. Therefore, we'll let them set up the logging themselves.
if framework:
setup_logger('-v' in sys.argv or '--verbose' in sys.argv)
config = {}
if not options:
if sys.argv[-1].endswith('.ini'):
config_path = os.path.abspath(sys.argv[-1])
else:
config_path = get_moksha_config_path()
if not config_path:
print(NO_CONFIG_MESSAGE)
return
cfg = appconfig('config:' + config_path)
config.update(cfg)
else:
config.update(options)
hub = CentralMokshaHub(config, consumers=consumers, producers=producers)
global _hub
_hub = hub
def handle_signal(signum, stackframe):
from moksha.hub.reactor import reactor
if signum in [signal.SIGHUP, signal.SIGINT]:
hub.stop()
try:
reactor.stop()
except ReactorNotRunning:
pass
signal.signal(signal.SIGHUP, handle_signal)
signal.signal(signal.SIGINT, handle_signal)
log.info("Running the MokshaHub reactor")
from moksha.hub.reactor import reactor
threadcount = config.get('moksha.threadpool_size', None)
if not threadcount:
N = int(config.get('moksha.workers_per_consumer', 1))
threadcount = 1 + hub.num_producers + hub.num_consumers * N
threadcount = int(threadcount)
log.info("Suggesting threadpool size at %i" % threadcount)
reactor.suggestThreadPoolSize(threadcount)
reactor.run(installSignalHandlers=False)
log.info("MokshaHub reactor stopped")
| 546 | 24 | 76 |
ad89261a9f20fc8289c0d58ac0edee9defef16a5 | 20 | py | Python | nifi-linksmart-processors/src/test/resources/get_exception.py | linksmart/nifi-linksmart-processor | 74317b338669525ea60538ad2fe13841e71edaf4 | [
"Apache-2.0"
] | null | null | null | nifi-linksmart-processors/src/test/resources/get_exception.py | linksmart/nifi-linksmart-processor | 74317b338669525ea60538ad2fe13841e71edaf4 | [
"Apache-2.0"
] | null | null | null | nifi-linksmart-processors/src/test/resources/get_exception.py | linksmart/nifi-linksmart-processor | 74317b338669525ea60538ad2fe13841e71edaf4 | [
"Apache-2.0"
] | null | null | null | import sys
a = 10/0 | 6.666667 | 10 | 0.65 | import sys
a = 10/0 | 0 | 0 | 0 |
084c8ad941c710c63aaa85337527e56dbf84f640 | 584 | py | Python | trackash/budget/admin.py | black-redoc/trackash | 99ded8445eaaa1bdf616d43c36ba402356e2f9d3 | [
"MIT"
] | null | null | null | trackash/budget/admin.py | black-redoc/trackash | 99ded8445eaaa1bdf616d43c36ba402356e2f9d3 | [
"MIT"
] | null | null | null | trackash/budget/admin.py | black-redoc/trackash | 99ded8445eaaa1bdf616d43c36ba402356e2f9d3 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Income, Expense, Budget, Extract
@admin.register(Budget)
@admin.register(Income)
@admin.register(Expense)
@admin.register(Extract)
| 24.333333 | 65 | 0.724315 | from django.contrib import admin
from .models import Income, Expense, Budget, Extract
@admin.register(Budget)
class BudgetAdmin(admin.ModelAdmin):
list_display = ("balance", "incomes", "expenses")
@admin.register(Income)
class IncomeAdmin(admin.ModelAdmin):
list_display = ("created_at", "concept", "value", "category")
@admin.register(Expense)
class ExpenseAdmin(admin.ModelAdmin):
list_display = ("created_at", "concept", "value", "category")
@admin.register(Extract)
class ExtractAdmin(admin.ModelAdmin):
list_display = ("since", "until", "month", "year")
| 0 | 303 | 88 |
131e7ecadea4a9957479632d96bd39eede25e3ea | 878 | py | Python | scripts/field/eunwol_house.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 54 | 2019-04-16T23:24:48.000Z | 2021-12-18T11:41:50.000Z | scripts/field/eunwol_house.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 3 | 2019-05-19T15:19:41.000Z | 2020-04-27T16:29:16.000Z | scripts/field/eunwol_house.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 49 | 2020-11-25T23:29:16.000Z | 2022-03-26T16:20:24.000Z | # 410000001
if sm.hasQuest(38002):
sm.removeEscapeButton()
sm.flipDialoguePlayerAsSpeaker()
sm.sendNext("What happened? A house and a new name... But what happened to my friends? Are they alive? If I am, then maybe we failed to seal the Black Mage...")
sm.sendSay("No. They wouldn't give up that easily. They're probably hiding out somewhere, waiting to get back together. I need to look after myself for now, and get my strength back.")
sm.sendSay("Level 10... It's better than nothing, but it's not the best feeling. I'll hang around and get stronger. That's the only thing I can do now.")
sm.setQRValue(38002, "clear", False)
elif sm.hasQuest(38018):
sm.removeEscapeButton()
sm.flipDialoguePlayerAsSpeaker()
sm.sendNext("W-what is that thing? It looks so fuzzy. I don't think I should touch it...")
sm.setQRValue(38018, "clear", False) | 67.538462 | 188 | 0.719818 | # 410000001
if sm.hasQuest(38002):
sm.removeEscapeButton()
sm.flipDialoguePlayerAsSpeaker()
sm.sendNext("What happened? A house and a new name... But what happened to my friends? Are they alive? If I am, then maybe we failed to seal the Black Mage...")
sm.sendSay("No. They wouldn't give up that easily. They're probably hiding out somewhere, waiting to get back together. I need to look after myself for now, and get my strength back.")
sm.sendSay("Level 10... It's better than nothing, but it's not the best feeling. I'll hang around and get stronger. That's the only thing I can do now.")
sm.setQRValue(38002, "clear", False)
elif sm.hasQuest(38018):
sm.removeEscapeButton()
sm.flipDialoguePlayerAsSpeaker()
sm.sendNext("W-what is that thing? It looks so fuzzy. I don't think I should touch it...")
sm.setQRValue(38018, "clear", False) | 0 | 0 | 0 |
b7e295aaee12733b24208bc9ecfd8c3391cf29ce | 10,398 | py | Python | daceml/autodiff/utils.py | spcl/daceml | a096cf009d1c9fbb2f1872a86a016d209d344f22 | [
"BSD-3-Clause"
] | 30 | 2020-09-09T21:13:36.000Z | 2022-03-15T01:58:10.000Z | daceml/autodiff/utils.py | Shigangli/daceml | c2133c677d5174763e30cf2d98dcb3fd64059db3 | [
"BSD-3-Clause"
] | 83 | 2020-09-05T11:45:06.000Z | 2021-09-28T14:21:44.000Z | daceml/autodiff/utils.py | Shigangli/daceml | c2133c677d5174763e30cf2d98dcb3fd64059db3 | [
"BSD-3-Clause"
] | 7 | 2020-09-03T13:28:45.000Z | 2021-12-12T02:53:22.000Z | import collections
import typing
import copy
import inspect
import ast
import astunparse
import dace
import dace.sdfg.nodes as nd
import dace.data as dt
from dace.frontend.python.parser import DaceProgram
from daceml.autodiff.base_abc import BackwardContext, BackwardResult
import daceml.util.utils as utils
def forward_in_desc_with_name(forward_node: nd.Node, context: BackwardContext,
name) -> dt.Data:
""" Find the descriptor of the data that connects to input connector `name`.
:param forward_node: the node.
:param context: the backward context.
:param name: the input connector name.
:return: the descriptor of the data that connects to connector `name`.
"""
return utils.in_desc_with_name(forward_node, context.forward_state,
context.forward_sdfg, name)
def forward_out_desc_with_name(forward_node: nd.Node, context: BackwardContext,
name) -> dt.Data:
""" Find the descriptor of the data that connects to output connector `name`.
:param forward_node: the node.
:param context: the backward context.
:param name: the output connector name.
:return: the descriptor of the data that connects to connector `name`.
"""
return utils.out_desc_with_name(forward_node, context.forward_state,
context.forward_sdfg, name)
def add_backward_desc_for_connector(backward_sdfg: dace.SDFG,
forward_node: nd.Node,
context: BackwardContext, connector: str,
input: bool) -> str:
""" Adds the backward array for the connector of ``forward_node``.
:param backward_sdfg: the sdfg to add to.
:param forward_node: the forward node with the connector that we want to add a descriptor for
:param connector: the connector on the forward node that we want to add the descriptor for
:param input: ``True`` if the connector is an input, ``False`` otherwise
:return: the name of the newly added array in ``backward_sdfg``.
"""
if input:
edge = utils.in_edge_with_name(forward_node, context.forward_state,
connector)
else:
edge = utils.out_edge_with_name(forward_node, context.forward_state,
connector)
arr_name = edge.data.data
forward_desc = context.forward_sdfg.arrays[arr_name]
new_desc = copy.deepcopy(forward_desc)
new_desc.transient = False
return backward_sdfg.add_datadesc(arr_name + "_grad",
new_desc,
find_new_name=True)
def add_backward_desc(backward_sdfg: dace.SDFG, forward_sdfg: dace.SDFG,
forward_desc: dt.Data, forward_name: str) -> str:
""" Adds the backward array for the given descriptor.
:param backward_sdfg: the sdfg to add to.
:param forward_sdfg: the forward sdfg.
:param forward_desc: the data descriptor of the forward array from ``forward_sdfg``.
:param forward_name: a name for the forward array (does not have to match it's actual name).
:return: the name of the newly added array in ``backward_sdfg``.
"""
backward_name = utils.find_str_not_in_set(forward_sdfg.arrays,
forward_name + "_grad")
new_desc = copy.deepcopy(forward_desc)
new_desc.transient = False
return backward_sdfg.add_datadesc(backward_name, new_desc)
def backward_program_for_node(
program, context: BackwardContext,
forward_node: nd.Node) -> typing.Tuple[nd.Node, BackwardResult]:
""" Expand a function to the backward function for a node.
The dtypes for the arguments will be extracted by matching the parameter names to edges.
Gradient parameters should be the name of the forward parameter, appended with _grad. For these arguments the
data descriptors will match the data descriptors of the inputs/outputs they correspond to.
"""
input_names = set(inp.name for inp in forward_node.schema.inputs)
output_names = set(outp.name for outp in forward_node.schema.outputs)
if input_names.intersection(output_names):
# this is currently the case for only one onnx op
raise ValueError(
"program_for_node cannot be applied on nodes of this type;"
" '{}' is both an input and an output".format(
next(input_names.intersection(output_names))))
params = inspect.signature(program).parameters
backward_result = BackwardResult.empty()
inputs = {}
outputs = {}
for name, param in params.items():
if name in input_names:
inputs[name] = forward_in_desc_with_name(forward_node, context,
name)
elif name_without_grad_in(name, input_names):
outputs[name] = forward_in_desc_with_name(forward_node, context,
name[:-5])
backward_result.required_grad_names[name[:-5]] = name
elif name in output_names:
inputs[name] = forward_out_desc_with_name(forward_node, context,
name)
elif name_without_grad_in(name, output_names):
inputs[name] = forward_out_desc_with_name(forward_node, context,
name[:-5])
backward_result.given_grad_names[name[:-5]] = name
else:
raise ValueError(
"'{}' was not found as an input or output for {}".format(
name, forward_node.schema.name))
program.__annotations__ = {**inputs, **outputs}
sdfg = DaceProgram(program, (), {}, False, dace.DeviceType.CPU).to_sdfg()
result_node = context.backward_state.add_nested_sdfg(
sdfg, None, set(inputs), set(outputs))
return result_node, backward_result
def connect_output_from_forward(forward_node: nd.Node, backward_node: nd.Node,
context: BackwardContext,
output_connector_name: str):
""" Connect an output of the forward node as an input to the backward node. This is done by forwarding the array
from the forward pass.
Conceptually, this is similar to pytorch's ctx.save_for_backward.
:param forward_node: the node in the forward pass.
:param backward_node: the node in the backward pass.
:param context: the backward context.
:param output_connector_name: the name of the connector on the backward pass. The output of that connector will
be forwarded to the connector of the same name on the backward node.
"""
output_edge = utils.out_edge_with_name(forward_node, context.forward_state,
output_connector_name)
# add the array of the output to backward_input_arrays that it will be forwarded by the autodiff engine
output_arr_name = output_edge.data.data
if output_arr_name not in context.backward_generator.backward_input_arrays:
data_desc = context.forward_sdfg.arrays[output_arr_name]
context.backward_generator.backward_input_arrays[
output_arr_name] = copy.deepcopy(data_desc)
if context.backward_generator.separate_sdfgs:
data_desc.transient = False
context.backward_sdfg.add_datadesc(output_arr_name, data_desc)
read = context.backward_state.add_read(output_arr_name)
else:
cand = [
n for n, _ in context.backward_state.all_nodes_recursive()
if isinstance(n, nd.AccessNode) and n.data == output_arr_name
]
assert len(cand) == 1
read = cand[0]
context.backward_state.add_edge(read, None, backward_node,
output_connector_name,
copy.deepcopy(output_edge.data))
def cast_consts_to_type(code: str, dtype: dace.typeclass) -> str:
""" Convert a piece of code so that constants are wrapped in casts to ``dtype``.
For example:
x * (3 / 2)
becomes:
x * (dace.float32(3) / dace.float32(2))
This is only done when it is required due to a Div operator.
:param code: the code string to convert.
:param dtype: the dace typeclass to wrap cast to
:return: a string of the converted code.
"""
return astunparse.unparse(CastConsts().visit(ast.parse(code)))
| 40.937008 | 119 | 0.614637 | import collections
import typing
import copy
import inspect
import ast
import astunparse
import dace
import dace.sdfg.nodes as nd
import dace.data as dt
from dace.frontend.python.parser import DaceProgram
from daceml.autodiff.base_abc import BackwardContext, BackwardResult
import daceml.util.utils as utils
def forward_in_desc_with_name(forward_node: nd.Node, context: BackwardContext,
name) -> dt.Data:
""" Find the descriptor of the data that connects to input connector `name`.
:param forward_node: the node.
:param context: the backward context.
:param name: the input connector name.
:return: the descriptor of the data that connects to connector `name`.
"""
return utils.in_desc_with_name(forward_node, context.forward_state,
context.forward_sdfg, name)
def forward_out_desc_with_name(forward_node: nd.Node, context: BackwardContext,
name) -> dt.Data:
""" Find the descriptor of the data that connects to output connector `name`.
:param forward_node: the node.
:param context: the backward context.
:param name: the output connector name.
:return: the descriptor of the data that connects to connector `name`.
"""
return utils.out_desc_with_name(forward_node, context.forward_state,
context.forward_sdfg, name)
def add_backward_desc_for_connector(backward_sdfg: dace.SDFG,
forward_node: nd.Node,
context: BackwardContext, connector: str,
input: bool) -> str:
""" Adds the backward array for the connector of ``forward_node``.
:param backward_sdfg: the sdfg to add to.
:param forward_node: the forward node with the connector that we want to add a descriptor for
:param connector: the connector on the forward node that we want to add the descriptor for
:param input: ``True`` if the connector is an input, ``False`` otherwise
:return: the name of the newly added array in ``backward_sdfg``.
"""
if input:
edge = utils.in_edge_with_name(forward_node, context.forward_state,
connector)
else:
edge = utils.out_edge_with_name(forward_node, context.forward_state,
connector)
arr_name = edge.data.data
forward_desc = context.forward_sdfg.arrays[arr_name]
new_desc = copy.deepcopy(forward_desc)
new_desc.transient = False
return backward_sdfg.add_datadesc(arr_name + "_grad",
new_desc,
find_new_name=True)
def add_backward_desc(backward_sdfg: dace.SDFG, forward_sdfg: dace.SDFG,
forward_desc: dt.Data, forward_name: str) -> str:
""" Adds the backward array for the given descriptor.
:param backward_sdfg: the sdfg to add to.
:param forward_sdfg: the forward sdfg.
:param forward_desc: the data descriptor of the forward array from ``forward_sdfg``.
:param forward_name: a name for the forward array (does not have to match it's actual name).
:return: the name of the newly added array in ``backward_sdfg``.
"""
backward_name = utils.find_str_not_in_set(forward_sdfg.arrays,
forward_name + "_grad")
new_desc = copy.deepcopy(forward_desc)
new_desc.transient = False
return backward_sdfg.add_datadesc(backward_name, new_desc)
def backward_program_for_node(
program, context: BackwardContext,
forward_node: nd.Node) -> typing.Tuple[nd.Node, BackwardResult]:
""" Expand a function to the backward function for a node.
The dtypes for the arguments will be extracted by matching the parameter names to edges.
Gradient parameters should be the name of the forward parameter, appended with _grad. For these arguments the
data descriptors will match the data descriptors of the inputs/outputs they correspond to.
"""
input_names = set(inp.name for inp in forward_node.schema.inputs)
output_names = set(outp.name for outp in forward_node.schema.outputs)
if input_names.intersection(output_names):
# this is currently the case for only one onnx op
raise ValueError(
"program_for_node cannot be applied on nodes of this type;"
" '{}' is both an input and an output".format(
next(input_names.intersection(output_names))))
def name_without_grad_in(name, collection):
return name[-5:] == "_grad" and name[:-5] in collection
params = inspect.signature(program).parameters
backward_result = BackwardResult.empty()
inputs = {}
outputs = {}
for name, param in params.items():
if name in input_names:
inputs[name] = forward_in_desc_with_name(forward_node, context,
name)
elif name_without_grad_in(name, input_names):
outputs[name] = forward_in_desc_with_name(forward_node, context,
name[:-5])
backward_result.required_grad_names[name[:-5]] = name
elif name in output_names:
inputs[name] = forward_out_desc_with_name(forward_node, context,
name)
elif name_without_grad_in(name, output_names):
inputs[name] = forward_out_desc_with_name(forward_node, context,
name[:-5])
backward_result.given_grad_names[name[:-5]] = name
else:
raise ValueError(
"'{}' was not found as an input or output for {}".format(
name, forward_node.schema.name))
program.__annotations__ = {**inputs, **outputs}
sdfg = DaceProgram(program, (), {}, False, dace.DeviceType.CPU).to_sdfg()
result_node = context.backward_state.add_nested_sdfg(
sdfg, None, set(inputs), set(outputs))
return result_node, backward_result
def connect_output_from_forward(forward_node: nd.Node, backward_node: nd.Node,
context: BackwardContext,
output_connector_name: str):
""" Connect an output of the forward node as an input to the backward node. This is done by forwarding the array
from the forward pass.
Conceptually, this is similar to pytorch's ctx.save_for_backward.
:param forward_node: the node in the forward pass.
:param backward_node: the node in the backward pass.
:param context: the backward context.
:param output_connector_name: the name of the connector on the backward pass. The output of that connector will
be forwarded to the connector of the same name on the backward node.
"""
output_edge = utils.out_edge_with_name(forward_node, context.forward_state,
output_connector_name)
# add the array of the output to backward_input_arrays that it will be forwarded by the autodiff engine
output_arr_name = output_edge.data.data
if output_arr_name not in context.backward_generator.backward_input_arrays:
data_desc = context.forward_sdfg.arrays[output_arr_name]
context.backward_generator.backward_input_arrays[
output_arr_name] = copy.deepcopy(data_desc)
if context.backward_generator.separate_sdfgs:
data_desc.transient = False
context.backward_sdfg.add_datadesc(output_arr_name, data_desc)
read = context.backward_state.add_read(output_arr_name)
else:
cand = [
n for n, _ in context.backward_state.all_nodes_recursive()
if isinstance(n, nd.AccessNode) and n.data == output_arr_name
]
assert len(cand) == 1
read = cand[0]
context.backward_state.add_edge(read, None, backward_node,
output_connector_name,
copy.deepcopy(output_edge.data))
def cast_consts_to_type(code: str, dtype: dace.typeclass) -> str:
""" Convert a piece of code so that constants are wrapped in casts to ``dtype``.
For example:
x * (3 / 2)
becomes:
x * (dace.float32(3) / dace.float32(2))
This is only done when it is required due to a Div operator.
:param code: the code string to convert.
:param dtype: the dace typeclass to wrap cast to
:return: a string of the converted code.
"""
class CastConsts(ast.NodeTransformer):
def __init__(self):
self._in_div_stack = collections.deque()
def visit_Num(self, node):
if self._in_div_stack:
return ast.copy_location(
ast.parse(
f"dace.{dtype.to_string()}({astunparse.unparse(node)})"
).body[0].value, node)
else:
return self.generic_visit(node)
def visit_BinOp(self, node: ast.BinOp):
if node.op.__class__.__name__ == "Pow":
# within pow, we don't need to cast unless there is a new div
old_stack = self._in_div_stack
# reset the stack
self._in_div_stack = collections.deque()
node = self.generic_visit(node)
self._in_div_stack = old_stack
return node
elif node.op.__class__.__name__ == "Div":
self._in_div_stack.append(None)
node = self.generic_visit(node)
self._in_div_stack.popleft()
return node
else:
return self.generic_visit(node)
def visit_Constant(self, node):
if self._in_div_stack:
return ast.copy_location(
ast.parse(
f"dace.{dtype.to_string()}({astunparse.unparse(node)})"
).body[0].value, node)
else:
return self.generic_visit(node)
return astunparse.unparse(CastConsts().visit(ast.parse(code)))
| 1,445 | 17 | 176 |
11b8ebdd1c7b704fd7e62c4aa52c1ebd4b17118e | 885 | py | Python | app/db.py | narutox900/GotItOnboardFinal | 32d4c186adb1ac0cb0cd719f4a3aa8d5e85ea07b | [
"MIT"
] | null | null | null | app/db.py | narutox900/GotItOnboardFinal | 32d4c186adb1ac0cb0cd719f4a3aa8d5e85ea07b | [
"MIT"
] | null | null | null | app/db.py | narutox900/GotItOnboardFinal | 32d4c186adb1ac0cb0cd719f4a3aa8d5e85ea07b | [
"MIT"
] | null | null | null | import click
from flask.cli import with_appcontext
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.types import DateTime
# Force mysql to compile fraction of seconds
@compiles(DateTime, "mysql")
db = SQLAlchemy()
@click.command("init-db")
@with_appcontext
@click.command("clear-db")
@with_appcontext
def init_app(app):
"""Register database functions with the Flask app. This is called by
the application factory.
"""
app.cli.add_command(init_db_command)
app.cli.add_command(clear_db_command)
| 19.23913 | 72 | 0.734463 | import click
from flask.cli import with_appcontext
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.types import DateTime
# Force mysql to compile fraction of seconds
@compiles(DateTime, "mysql")
def compile_datetime_mysql(type_, compiler, **kw):
return "DATETIME(6)"
db = SQLAlchemy()
def init_db():
db.create_all()
def clear_db():
db.drop_all()
@click.command("init-db")
@with_appcontext
def init_db_command():
init_db()
click.echo("Initialized the database.")
@click.command("clear-db")
@with_appcontext
def clear_db_command():
clear_db()
click.echo("Cleared the database.")
def init_app(app):
"""Register database functions with the Flask app. This is called by
the application factory.
"""
app.cli.add_command(init_db_command)
app.cli.add_command(clear_db_command)
| 195 | 0 | 112 |
2d277923a1f07571de22bab838242f14997d5396 | 3,669 | py | Python | benchmarks/run_benchmark.py | kokizzu/prometeo | ce8aa66a21bec51581cad0a7a0998b0a86581734 | [
"BSD-2-Clause"
] | 509 | 2019-11-06T22:08:49.000Z | 2022-03-30T01:06:52.000Z | benchmarks/run_benchmark.py | kokizzu/prometeo | ce8aa66a21bec51581cad0a7a0998b0a86581734 | [
"BSD-2-Clause"
] | 14 | 2018-10-23T10:08:24.000Z | 2022-01-14T02:02:17.000Z | benchmarks/run_benchmark.py | kokizzu/prometeo | ce8aa66a21bec51581cad0a7a0998b0a86581734 | [
"BSD-2-Clause"
] | 22 | 2019-06-12T10:01:13.000Z | 2022-02-27T06:38:21.000Z | import numpy as np
import subprocess
import json
import matplotlib
import matplotlib.pyplot as plt
import matplotlib as mpl
plt.rcParams['text.usetex'] = True
plt.rcParams['text.latex.preamble'] = [r'\usepackage{lmodern}']
font = {'family':'serif'}
plt.rc('font',**font)
NM = range(2,150,4)
# NM = range(2,20,2)
NREP_small = 10000
NREP_medium = 100
NREP_large = 10
AVG_CPU_TIME = []
res_file = 'riccati_benchmark_prometeo.json'
RUN = False
UPDATE_res = False
UPDATE_FIGURE = True
figname = 'riccati_benchmark'
blasfeo_res_file = 'riccati_benchmark_blasfeo_api.json'
LOAD_BLASFEO_RES = True
numpy_res_file = 'riccati_benchmark_numpy.json'
LOAD_NUMPY_RES = True
numpy_blasfeo_res_file = 'riccati_benchmark_numpy_blasfeo.json'
LOAD_NUMPY_BLASFEO_RES = True
julia_res_file = 'riccati_benchmark_julia.json'
LOAD_JULIA_RES = True
if not UPDATE_res:
print('Warning: not updating result file! This will just '
'plot the results at the end of the benchmark.')
if RUN:
for i in range(len(NM)):
print('running Riccati benchmark for case NM = {}'.format(NM[i]))
code = ""
if NM[i] < 30:
NREP = NREP_small
elif NM[i] < 100:
NREP = NREP_medium
else:
NREP = NREP_large
with open('riccati_mass_spring.py.in') as template:
code = template.read()
code = code.replace('NM', str(NM[i]))
code = code.replace('NREP', str(NREP))
with open('riccati_mass_spring.py', 'w+') as bench_file:
bench_file.write(code)
cmd = 'pmt riccati_mass_spring.py --cgen=True'
proc = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE)
try:
outs, errs = proc.communicate()
except TimeOutExpired:
proc.kill()
print('Exception raised at NM = {}'.format(NM[i]))
outs, errs = proc.communicate()
AVG_CPU_TIME.append([float(outs.decode())/NREP, NM[i]])
if UPDATE_res:
with open(res_file, 'w+') as res:
json.dump(AVG_CPU_TIME, res)
else:
with open(res_file) as res:
AVG_CPU_TIME = json.load(res)
AVG_CPU_TIME = np.array(AVG_CPU_TIME)
plt.figure()
plt.semilogy(2*AVG_CPU_TIME[:,1], AVG_CPU_TIME[:,0])
legend = [r'\texttt{prometeo}']
if LOAD_BLASFEO_RES:
with open(blasfeo_res_file) as res:
AVG_CPU_TIME_BLASFEO = json.load(res)
AVG_CPU_TIME_BLASFEO = np.array(AVG_CPU_TIME_BLASFEO)
plt.semilogy(2*AVG_CPU_TIME_BLASFEO[:,1], AVG_CPU_TIME_BLASFEO[:,0], 'o')
legend.append(r'\texttt{BLASFEO}')
if LOAD_NUMPY_RES:
with open(numpy_res_file) as res:
AVG_CPU_TIME_BLASFEO = json.load(res)
AVG_CPU_TIME_BLASFEO = np.array(AVG_CPU_TIME_BLASFEO)
plt.semilogy(2*AVG_CPU_TIME_BLASFEO[:,1], AVG_CPU_TIME_BLASFEO[:,0], '--', alpha=0.7)
legend.append(r'\texttt{NumPy}')
if LOAD_JULIA_RES:
with open(julia_res_file) as res:
AVG_CPU_TIME_BLASFEO = json.load(res)
AVG_CPU_TIME_BLASFEO = np.array(AVG_CPU_TIME_BLASFEO)
plt.semilogy(2*AVG_CPU_TIME_BLASFEO[:,1], AVG_CPU_TIME_BLASFEO[:,0], '--',alpha=0.7)
legend.append(r'\texttt{Julia}')
if LOAD_NUMPY_BLASFEO_RES:
with open(numpy_blasfeo_res_file) as res:
AVG_CPU_TIME_BLASFEO = json.load(res)
AVG_CPU_TIME_BLASFEO = np.array(AVG_CPU_TIME_BLASFEO)
plt.semilogy(2*AVG_CPU_TIME_BLASFEO[:,1], AVG_CPU_TIME_BLASFEO[:,0])
legend.append(r'\texttt{NumPy + BLASFEO}')
plt.legend(legend)
plt.grid()
plt.xlabel(r'matrix size ($n_x$)')
plt.ylabel(r'CPU time [s]')
plt.title(r'Riccati factorization')
if UPDATE_FIGURE:
plt.savefig(figname + '.png', dpi=300, bbox_inches="tight")
plt.show()
| 30.07377 | 89 | 0.677569 | import numpy as np
import subprocess
import json
import matplotlib
import matplotlib.pyplot as plt
import matplotlib as mpl
plt.rcParams['text.usetex'] = True
plt.rcParams['text.latex.preamble'] = [r'\usepackage{lmodern}']
font = {'family':'serif'}
plt.rc('font',**font)
NM = range(2,150,4)
# NM = range(2,20,2)
NREP_small = 10000
NREP_medium = 100
NREP_large = 10
AVG_CPU_TIME = []
res_file = 'riccati_benchmark_prometeo.json'
RUN = False
UPDATE_res = False
UPDATE_FIGURE = True
figname = 'riccati_benchmark'
blasfeo_res_file = 'riccati_benchmark_blasfeo_api.json'
LOAD_BLASFEO_RES = True
numpy_res_file = 'riccati_benchmark_numpy.json'
LOAD_NUMPY_RES = True
numpy_blasfeo_res_file = 'riccati_benchmark_numpy_blasfeo.json'
LOAD_NUMPY_BLASFEO_RES = True
julia_res_file = 'riccati_benchmark_julia.json'
LOAD_JULIA_RES = True
if not UPDATE_res:
print('Warning: not updating result file! This will just '
'plot the results at the end of the benchmark.')
if RUN:
for i in range(len(NM)):
print('running Riccati benchmark for case NM = {}'.format(NM[i]))
code = ""
if NM[i] < 30:
NREP = NREP_small
elif NM[i] < 100:
NREP = NREP_medium
else:
NREP = NREP_large
with open('riccati_mass_spring.py.in') as template:
code = template.read()
code = code.replace('NM', str(NM[i]))
code = code.replace('NREP', str(NREP))
with open('riccati_mass_spring.py', 'w+') as bench_file:
bench_file.write(code)
cmd = 'pmt riccati_mass_spring.py --cgen=True'
proc = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE)
try:
outs, errs = proc.communicate()
except TimeOutExpired:
proc.kill()
print('Exception raised at NM = {}'.format(NM[i]))
outs, errs = proc.communicate()
AVG_CPU_TIME.append([float(outs.decode())/NREP, NM[i]])
if UPDATE_res:
with open(res_file, 'w+') as res:
json.dump(AVG_CPU_TIME, res)
else:
with open(res_file) as res:
AVG_CPU_TIME = json.load(res)
AVG_CPU_TIME = np.array(AVG_CPU_TIME)
plt.figure()
plt.semilogy(2*AVG_CPU_TIME[:,1], AVG_CPU_TIME[:,0])
legend = [r'\texttt{prometeo}']
if LOAD_BLASFEO_RES:
with open(blasfeo_res_file) as res:
AVG_CPU_TIME_BLASFEO = json.load(res)
AVG_CPU_TIME_BLASFEO = np.array(AVG_CPU_TIME_BLASFEO)
plt.semilogy(2*AVG_CPU_TIME_BLASFEO[:,1], AVG_CPU_TIME_BLASFEO[:,0], 'o')
legend.append(r'\texttt{BLASFEO}')
if LOAD_NUMPY_RES:
with open(numpy_res_file) as res:
AVG_CPU_TIME_BLASFEO = json.load(res)
AVG_CPU_TIME_BLASFEO = np.array(AVG_CPU_TIME_BLASFEO)
plt.semilogy(2*AVG_CPU_TIME_BLASFEO[:,1], AVG_CPU_TIME_BLASFEO[:,0], '--', alpha=0.7)
legend.append(r'\texttt{NumPy}')
if LOAD_JULIA_RES:
with open(julia_res_file) as res:
AVG_CPU_TIME_BLASFEO = json.load(res)
AVG_CPU_TIME_BLASFEO = np.array(AVG_CPU_TIME_BLASFEO)
plt.semilogy(2*AVG_CPU_TIME_BLASFEO[:,1], AVG_CPU_TIME_BLASFEO[:,0], '--',alpha=0.7)
legend.append(r'\texttt{Julia}')
if LOAD_NUMPY_BLASFEO_RES:
with open(numpy_blasfeo_res_file) as res:
AVG_CPU_TIME_BLASFEO = json.load(res)
AVG_CPU_TIME_BLASFEO = np.array(AVG_CPU_TIME_BLASFEO)
plt.semilogy(2*AVG_CPU_TIME_BLASFEO[:,1], AVG_CPU_TIME_BLASFEO[:,0])
legend.append(r'\texttt{NumPy + BLASFEO}')
plt.legend(legend)
plt.grid()
plt.xlabel(r'matrix size ($n_x$)')
plt.ylabel(r'CPU time [s]')
plt.title(r'Riccati factorization')
if UPDATE_FIGURE:
plt.savefig(figname + '.png', dpi=300, bbox_inches="tight")
plt.show()
| 0 | 0 | 0 |
56a1896325abf664998601ebfdf9e7d181777606 | 29,130 | py | Python | psx/_dump_/50/_dump_ida_/overlay_c/set_funcs.py | maoa3/scalpel | 2e7381b516cded28996d290438acc618d00b2aa7 | [
"Unlicense"
] | 15 | 2018-06-28T01:11:25.000Z | 2021-09-27T15:57:18.000Z | psx/_dump_/50/_dump_ida_/overlay_c/set_funcs.py | maoa3/scalpel | 2e7381b516cded28996d290438acc618d00b2aa7 | [
"Unlicense"
] | 7 | 2018-06-29T04:08:23.000Z | 2019-10-17T13:57:22.000Z | psx/_dump_/50/_dump_ida_/overlay_c/set_funcs.py | maoa3/scalpel | 2e7381b516cded28996d290438acc618d00b2aa7 | [
"Unlicense"
] | 7 | 2018-06-28T01:11:34.000Z | 2020-05-23T09:21:48.000Z | del_items(0x8012F26C)
SetType(0x8012F26C, "void GameOnlyTestRoutine__Fv()")
del_items(0x8012F274)
SetType(0x8012F274, "int vecleny__Fii(int a, int b)")
del_items(0x8012F298)
SetType(0x8012F298, "int veclenx__Fii(int a, int b)")
del_items(0x8012F2C4)
SetType(0x8012F2C4, "void GetDamageAmt__FiPiT1(int i, int *mind, int *maxd)")
del_items(0x8012F8BC)
SetType(0x8012F8BC, "int CheckBlock__Fiiii(int fx, int fy, int tx, int ty)")
del_items(0x8012F9A4)
SetType(0x8012F9A4, "int FindClosest__Fiii(int sx, int sy, int rad)")
del_items(0x8012FB40)
SetType(0x8012FB40, "int GetSpellLevel__Fii(int id, int sn)")
del_items(0x8012FBB4)
SetType(0x8012FBB4, "int GetDirection8__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x8012FDD0)
SetType(0x8012FDD0, "int GetDirection16__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x8012FFEC)
SetType(0x8012FFEC, "void DeleteMissile__Fii(int mi, int i)")
del_items(0x80130044)
SetType(0x80130044, "void GetMissileVel__Fiiiiii(int i, int sx, int sy, int dx, int dy, int v)")
del_items(0x801301F8)
SetType(0x801301F8, "void PutMissile__Fi(int i)")
del_items(0x801302FC)
SetType(0x801302FC, "void GetMissilePos__Fi(int i)")
del_items(0x80130424)
SetType(0x80130424, "void MoveMissilePos__Fi(int i)")
del_items(0x8013058C)
SetType(0x8013058C, "unsigned char MonsterTrapHit__FiiiiiUc(int m, int mindam, int maxdam, int dist, int t, int shift)")
del_items(0x80130900)
SetType(0x80130900, "unsigned char MonsterMHit__FiiiiiiUc(int pnum, int m, int mindam, int maxdam, int dist, int t, int shift)")
del_items(0x80131060)
SetType(0x80131060, "unsigned char PlayerMHit__FiiiiiiUcUc(int pnum, int m, int dist, int mind, int maxd, int mtype, int shift, int earflag)")
del_items(0x80131ACC)
SetType(0x80131ACC, "unsigned char Plr2PlrMHit__FiiiiiiUc(int pnum, int p, int mindam, int maxdam, int dist, int mtype, int shift)")
del_items(0x801322A8)
SetType(0x801322A8, "void CheckMissileCol__FiiiUciiUc(int i, int mindam, int maxdam, unsigned char shift, int mx, int my, int nodel)")
del_items(0x80132724)
SetType(0x80132724, "unsigned char GetTableValue__FUci(unsigned char code, int dir)")
del_items(0x801327B8)
SetType(0x801327B8, "void SetMissAnim__Fii(int mi, int animtype)")
del_items(0x80132888)
SetType(0x80132888, "void SetMissDir__Fii(int mi, int dir)")
del_items(0x801328CC)
SetType(0x801328CC, "void AddLArrow__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80132A8C)
SetType(0x80132A8C, "void AddArrow__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80132C48)
SetType(0x80132C48, "void GetVileMissPos__Fiii(int mi, int dx, int dy)")
del_items(0x80132D6C)
SetType(0x80132D6C, "void AddRndTeleport__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801330DC)
SetType(0x801330DC, "void AddFirebolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)")
del_items(0x80133348)
SetType(0x80133348, "void AddMagmaball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013345C)
SetType(0x8013345C, "void AddTeleport__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80133654)
SetType(0x80133654, "void AddLightball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801337A8)
SetType(0x801337A8, "void AddFirewall__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80133990)
SetType(0x80133990, "void AddFireball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80133BEC)
SetType(0x80133BEC, "void AddLightctrl__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80133CD4)
SetType(0x80133CD4, "void AddLightning__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80133E9C)
SetType(0x80133E9C, "void AddMisexp__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801340A8)
SetType(0x801340A8, "void AddWeapexp__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80134190)
SetType(0x80134190, "unsigned char CheckIfTrig__Fii(int x, int y)")
del_items(0x80134274)
SetType(0x80134274, "void AddTown__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80134698)
SetType(0x80134698, "void AddFlash__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801348A8)
SetType(0x801348A8, "void AddFlash2__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80134A88)
SetType(0x80134A88, "void AddManashield__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80134B50)
SetType(0x80134B50, "void AddFiremove__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80134CAC)
SetType(0x80134CAC, "void AddGuardian__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80135118)
SetType(0x80135118, "void AddChain__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80135174)
SetType(0x80135174, "void AddRhino__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80135330)
SetType(0x80135330, "void AddFlare__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80135614)
SetType(0x80135614, "void AddAcid__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80135718)
SetType(0x80135718, "void AddAcidpud__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801357F0)
SetType(0x801357F0, "void AddStone__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80135AE8)
SetType(0x80135AE8, "void AddGolem__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80135CA0)
SetType(0x80135CA0, "void AddBoom__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80135D34)
SetType(0x80135D34, "void AddHeal__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80135F5C)
SetType(0x80135F5C, "void AddHealOther__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80135FC4)
SetType(0x80135FC4, "void AddElement__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801361F0)
SetType(0x801361F0, "void AddIdentify__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801362A0)
SetType(0x801362A0, "void AddFirewallC__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80136550)
SetType(0x80136550, "void AddInfra__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013664C)
SetType(0x8013664C, "void AddWave__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801366D0)
SetType(0x801366D0, "void AddNova__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801368E8)
SetType(0x801368E8, "void AddRepair__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80136998)
SetType(0x80136998, "void AddRecharge__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80136A48)
SetType(0x80136A48, "void AddDisarm__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80136AB0)
SetType(0x80136AB0, "void AddApoca__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80136CEC)
SetType(0x80136CEC, "void AddFlame__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int seqno)")
del_items(0x80136F08)
SetType(0x80136F08, "void AddFlamec__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80136FF8)
SetType(0x80136FF8, "void AddCbolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)")
del_items(0x801371EC)
SetType(0x801371EC, "void AddHbolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)")
del_items(0x801373AC)
SetType(0x801373AC, "void AddResurrect__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80137420)
SetType(0x80137420, "void AddResurrectBeam__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801374A8)
SetType(0x801374A8, "void AddTelekinesis__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80137510)
SetType(0x80137510, "void AddBoneSpirit__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013770C)
SetType(0x8013770C, "void AddRportal__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801377AC)
SetType(0x801377AC, "void AddDiabApoca__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801378E8)
SetType(0x801378E8, "int AddMissile__Fiiiiiiciii(int sx, int sy, int v1, int v2, int midir, int mitype, int micaster, int id, int v3, int spllvl)")
del_items(0x80137D38)
SetType(0x80137D38, "int Sentfire__Fiii(int i, int sx, int sy)")
del_items(0x80137F1C)
SetType(0x80137F1C, "void MI_Dummy__Fi(int i)")
del_items(0x80137F24)
SetType(0x80137F24, "void MI_Golem__Fi(int i)")
del_items(0x80138180)
SetType(0x80138180, "void MI_SetManashield__Fi(int i)")
del_items(0x801381BC)
SetType(0x801381BC, "void MI_LArrow__Fi(int i)")
del_items(0x80138924)
SetType(0x80138924, "void MI_Arrow__Fi(int i)")
del_items(0x80138B40)
SetType(0x80138B40, "void MI_Firebolt__Fi(int i)")
del_items(0x80139200)
SetType(0x80139200, "void MI_Lightball__Fi(int i)")
del_items(0x80139488)
SetType(0x80139488, "void MI_Acidpud__Fi(int i)")
del_items(0x80139598)
SetType(0x80139598, "void MI_Firewall__Fi(int i)")
del_items(0x8013985C)
SetType(0x8013985C, "void MI_Fireball__Fi(int i)")
del_items(0x8013A220)
SetType(0x8013A220, "void MI_Lightctrl__Fi(int i)")
del_items(0x8013A59C)
SetType(0x8013A59C, "void MI_Lightning__Fi(int i)")
del_items(0x8013A688)
SetType(0x8013A688, "void MI_Town__Fi(int i)")
del_items(0x8013A8C0)
SetType(0x8013A8C0, "void MI_Flash__Fi(int i)")
del_items(0x8013AC14)
SetType(0x8013AC14, "void MI_Flash2__Fi(int i)")
del_items(0x8013ADDC)
SetType(0x8013ADDC, "void MI_Manashield__Fi(int i)")
del_items(0x8013B100)
SetType(0x8013B100, "void MI_Firemove__Fi(int i)")
del_items(0x8013B38C)
SetType(0x8013B38C, "void MI_Guardian__Fi(int i)")
del_items(0x8013B63C)
SetType(0x8013B63C, "void MI_Chain__Fi(int i)")
del_items(0x8013B8A8)
SetType(0x8013B8A8, "void MI_Weapexp__Fi(int i)")
del_items(0x8013BB60)
SetType(0x8013BB60, "void MI_Misexp__Fi(int i)")
del_items(0x8013BE1C)
SetType(0x8013BE1C, "void MI_Acidsplat__Fi(int i)")
del_items(0x8013BFB8)
SetType(0x8013BFB8, "void MI_Teleport__Fi(int i)")
del_items(0x8013C380)
SetType(0x8013C380, "void MI_Stone__Fi(int i)")
del_items(0x8013C52C)
SetType(0x8013C52C, "void MI_Boom__Fi(int i)")
del_items(0x8013C624)
SetType(0x8013C624, "void MI_Rhino__Fi(int i)")
del_items(0x8013C9D0)
SetType(0x8013C9D0, "void MI_FirewallC__Fi(int i)")
del_items(0x8013CC58)
SetType(0x8013CC58, "void MI_Infra__Fi(int i)")
del_items(0x8013CD10)
SetType(0x8013CD10, "void MI_Apoca__Fi(int i)")
del_items(0x8013CFA4)
SetType(0x8013CFA4, "void MI_Wave__Fi(int i)")
del_items(0x8013D4A0)
SetType(0x8013D4A0, "void MI_Nova__Fi(int i)")
del_items(0x8013D760)
SetType(0x8013D760, "void MI_Flame__Fi(int i)")
del_items(0x8013D958)
SetType(0x8013D958, "void MI_Flamec__Fi(int i)")
del_items(0x8013DBE0)
SetType(0x8013DBE0, "void MI_Cbolt__Fi(int i)")
del_items(0x8013DEE4)
SetType(0x8013DEE4, "void MI_Hbolt__Fi(int i)")
del_items(0x8013E1F0)
SetType(0x8013E1F0, "void MI_Element__Fi(int i)")
del_items(0x8013E8A8)
SetType(0x8013E8A8, "void MI_Bonespirit__Fi(int i)")
del_items(0x8013ECB0)
SetType(0x8013ECB0, "void MI_ResurrectBeam__Fi(int i)")
del_items(0x8013ED20)
SetType(0x8013ED20, "void MI_Rportal__Fi(int i)")
del_items(0x8013EF44)
SetType(0x8013EF44, "void ProcessMissiles__Fv()")
del_items(0x8013F338)
SetType(0x8013F338, "void ClearMissileSpot__Fi(int mi)")
del_items(0x8013F3F0)
SetType(0x8013F3F0, "void MoveToScrollTarget__7CBlocks(struct CBlocks *this)")
del_items(0x8013F404)
SetType(0x8013F404, "void MonstPartJump__Fi(int m)")
del_items(0x8013F598)
SetType(0x8013F598, "void DeleteMonster__Fi(int i)")
del_items(0x8013F5D0)
SetType(0x8013F5D0, "int M_GetDir__Fi(int i)")
del_items(0x8013F62C)
SetType(0x8013F62C, "void M_StartDelay__Fii(int i, int len)")
del_items(0x8013F674)
SetType(0x8013F674, "void M_StartRAttack__Fiii(int i, int missile_type, int dam)")
del_items(0x8013F78C)
SetType(0x8013F78C, "void M_StartRSpAttack__Fiii(int i, int missile_type, int dam)")
del_items(0x8013F8B0)
SetType(0x8013F8B0, "void M_StartSpAttack__Fi(int i)")
del_items(0x8013F998)
SetType(0x8013F998, "void M_StartEat__Fi(int i)")
del_items(0x8013FA68)
SetType(0x8013FA68, "void M_GetKnockback__Fi(int i)")
del_items(0x8013FC40)
SetType(0x8013FC40, "void M_StartHit__Fiii(int i, int pnum, int dam)")
del_items(0x8013FF38)
SetType(0x8013FF38, "void M_DiabloDeath__FiUc(int i, unsigned char sendmsg)")
del_items(0x8014024C)
SetType(0x8014024C, "void M2MStartHit__Fiii(int mid, int i, int dam)")
del_items(0x801404F8)
SetType(0x801404F8, "void MonstStartKill__FiiUc(int i, int pnum, unsigned char sendmsg)")
del_items(0x801407E4)
SetType(0x801407E4, "void M2MStartKill__Fii(int i, int mid)")
del_items(0x80140BAC)
SetType(0x80140BAC, "void M_StartKill__Fii(int i, int pnum)")
del_items(0x80140C9C)
SetType(0x80140C9C, "void M_StartFadein__FiiUc(int i, int md, unsigned char backwards)")
del_items(0x80140DF0)
SetType(0x80140DF0, "void M_StartFadeout__FiiUc(int i, int md, unsigned char backwards)")
del_items(0x80140F38)
SetType(0x80140F38, "void M_StartHeal__Fi(int i)")
del_items(0x80140FB8)
SetType(0x80140FB8, "void M_ChangeLightOffset__Fi(int monst)")
del_items(0x80141120)
SetType(0x80141120, "int M_DoStand__Fi(int i)")
del_items(0x80141188)
SetType(0x80141188, "int M_DoWalk__Fi(int i)")
del_items(0x8014140C)
SetType(0x8014140C, "int M_DoWalk2__Fi(int i)")
del_items(0x801415F8)
SetType(0x801415F8, "int M_DoWalk3__Fi(int i)")
del_items(0x801418BC)
SetType(0x801418BC, "void M_TryM2MHit__Fiiiii(int i, int mid, int hper, int mind, int maxd)")
del_items(0x80141A84)
SetType(0x80141A84, "void M_TryH2HHit__Fiiiii(int i, int pnum, int Hit, int MinDam, int MaxDam)")
del_items(0x80142098)
SetType(0x80142098, "int M_DoAttack__Fi(int i)")
del_items(0x8014223C)
SetType(0x8014223C, "int M_DoRAttack__Fi(int i)")
del_items(0x801423B4)
SetType(0x801423B4, "int M_DoRSpAttack__Fi(int i)")
del_items(0x801425A4)
SetType(0x801425A4, "int M_DoSAttack__Fi(int i)")
del_items(0x80142678)
SetType(0x80142678, "int M_DoFadein__Fi(int i)")
del_items(0x80142748)
SetType(0x80142748, "int M_DoFadeout__Fi(int i)")
del_items(0x8014285C)
SetType(0x8014285C, "int M_DoHeal__Fi(int i)")
del_items(0x80142908)
SetType(0x80142908, "int M_DoTalk__Fi(int i)")
del_items(0x80142E74)
SetType(0x80142E74, "void M_Teleport__Fi(int i)")
del_items(0x801430A8)
SetType(0x801430A8, "int M_DoGotHit__Fi(int i)")
del_items(0x80143108)
SetType(0x80143108, "void DoEnding__Fv()")
del_items(0x801431C8)
SetType(0x801431C8, "void PrepDoEnding__Fv()")
del_items(0x801432E0)
SetType(0x801432E0, "int M_DoDeath__Fi(int i)")
del_items(0x801434B0)
SetType(0x801434B0, "int M_DoSpStand__Fi(int i)")
del_items(0x80143554)
SetType(0x80143554, "int M_DoDelay__Fi(int i)")
del_items(0x80143644)
SetType(0x80143644, "int M_DoStone__Fi(int i)")
del_items(0x801436C8)
SetType(0x801436C8, "void M_WalkDir__Fii(int i, int md)")
del_items(0x801438F0)
SetType(0x801438F0, "void GroupUnity__Fi(int i)")
del_items(0x80143CDC)
SetType(0x80143CDC, "unsigned char M_CallWalk__Fii(int i, int md)")
del_items(0x80143EC8)
SetType(0x80143EC8, "unsigned char M_PathWalk__Fi(int i, char plr2monst[9], unsigned char (*Check)())")
del_items(0x80143F8C)
SetType(0x80143F8C, "unsigned char M_CallWalk2__Fii(int i, int md)")
del_items(0x801440A0)
SetType(0x801440A0, "unsigned char M_DumbWalk__Fii(int i, int md)")
del_items(0x801440F4)
SetType(0x801440F4, "unsigned char M_RoundWalk__FiiRi(int i, int md, int *dir)")
del_items(0x80144294)
SetType(0x80144294, "void MAI_Zombie__Fi(int i)")
del_items(0x8014448C)
SetType(0x8014448C, "void MAI_SkelSd__Fi(int i)")
del_items(0x80144624)
SetType(0x80144624, "void MAI_Snake__Fi(int i)")
del_items(0x80144A08)
SetType(0x80144A08, "void MAI_Bat__Fi(int i)")
del_items(0x80144DC0)
SetType(0x80144DC0, "void MAI_SkelBow__Fi(int i)")
del_items(0x80144FA4)
SetType(0x80144FA4, "void MAI_Fat__Fi(int i)")
del_items(0x80145154)
SetType(0x80145154, "void MAI_Sneak__Fi(int i)")
del_items(0x80145540)
SetType(0x80145540, "void MAI_Fireman__Fi(int i)")
del_items(0x80145838)
SetType(0x80145838, "void MAI_Fallen__Fi(int i)")
del_items(0x80145B54)
SetType(0x80145B54, "void MAI_Cleaver__Fi(int i)")
del_items(0x80145C3C)
SetType(0x80145C3C, "void MAI_Round__FiUc(int i, unsigned char special)")
del_items(0x801460A8)
SetType(0x801460A8, "void MAI_GoatMc__Fi(int i)")
del_items(0x801460C8)
SetType(0x801460C8, "void MAI_Ranged__FiiUc(int i, int missile_type, unsigned char special)")
del_items(0x801462E8)
SetType(0x801462E8, "void MAI_GoatBow__Fi(int i)")
del_items(0x8014630C)
SetType(0x8014630C, "void MAI_Succ__Fi(int i)")
del_items(0x80146330)
SetType(0x80146330, "void MAI_AcidUniq__Fi(int i)")
del_items(0x80146354)
SetType(0x80146354, "void MAI_Scav__Fi(int i)")
del_items(0x8014676C)
SetType(0x8014676C, "void MAI_Garg__Fi(int i)")
del_items(0x8014694C)
SetType(0x8014694C, "void MAI_RoundRanged__FiiUciUc(int i, int missile_type, unsigned char checkdoors, int dam, int lessmissiles)")
del_items(0x80146E60)
SetType(0x80146E60, "void MAI_Magma__Fi(int i)")
del_items(0x80146E8C)
SetType(0x80146E8C, "void MAI_Storm__Fi(int i)")
del_items(0x80146EB8)
SetType(0x80146EB8, "void MAI_Acid__Fi(int i)")
del_items(0x80146EE8)
SetType(0x80146EE8, "void MAI_Diablo__Fi(int i)")
del_items(0x80146F14)
SetType(0x80146F14, "void MAI_RR2__Fiii(int i, int mistype, int dam)")
del_items(0x80147414)
SetType(0x80147414, "void MAI_Mega__Fi(int i)")
del_items(0x80147438)
SetType(0x80147438, "void MAI_SkelKing__Fi(int i)")
del_items(0x80147974)
SetType(0x80147974, "void MAI_Rhino__Fi(int i)")
del_items(0x80147E1C)
SetType(0x80147E1C, "void MAI_Counselor__Fi(int i, unsigned char counsmiss[4], int _mx, int _my)")
del_items(0x801482E8)
SetType(0x801482E8, "void MAI_Garbud__Fi(int i)")
del_items(0x801484F0)
SetType(0x801484F0, "void MAI_Zhar__Fi(int i)")
del_items(0x801486E8)
SetType(0x801486E8, "void MAI_SnotSpil__Fi(int i)")
del_items(0x80148934)
SetType(0x80148934, "void MAI_Lazurus__Fi(int i)")
del_items(0x80148BA8)
SetType(0x80148BA8, "void MAI_Lazhelp__Fi(int i)")
del_items(0x80148CC8)
SetType(0x80148CC8, "void MAI_Lachdanan__Fi(int i)")
del_items(0x80148E74)
SetType(0x80148E74, "void MAI_Warlord__Fi(int i)")
del_items(0x80148FC0)
SetType(0x80148FC0, "void DeleteMonsterList__Fv()")
del_items(0x801490DC)
SetType(0x801490DC, "void ProcessMonsters__Fv()")
del_items(0x8014966C)
SetType(0x8014966C, "unsigned char DirOK__Fii(int i, int mdir)")
del_items(0x80149A54)
SetType(0x80149A54, "unsigned char PosOkMissile__Fii(int x, int y)")
del_items(0x80149ABC)
SetType(0x80149ABC, "unsigned char CheckNoSolid__Fii(int x, int y)")
del_items(0x80149B00)
SetType(0x80149B00, "unsigned char LineClearF__FPFii_Uciiii(unsigned char (*Clear)(), int x1, int y1, int x2, int y2)")
del_items(0x80149D88)
SetType(0x80149D88, "unsigned char LineClear__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x80149DC8)
SetType(0x80149DC8, "unsigned char LineClearF1__FPFiii_Uciiiii(unsigned char (*Clear)(), int monst, int x1, int y1, int x2, int y2)")
del_items(0x8014A05C)
SetType(0x8014A05C, "void M_FallenFear__Fii(int x, int y)")
del_items(0x8014A22C)
SetType(0x8014A22C, "void PrintMonstHistory__Fi(int mt)")
del_items(0x8014A4E0)
SetType(0x8014A4E0, "void PrintUniqueHistory__Fv()")
del_items(0x8014A604)
SetType(0x8014A604, "void MissToMonst__Fiii(int i, int x, int y)")
del_items(0x8014AA80)
SetType(0x8014AA80, "unsigned char PosOkMonst2__Fiii(int i, int x, int y)")
del_items(0x8014AC9C)
SetType(0x8014AC9C, "unsigned char PosOkMonst3__Fiii(int i, int x, int y)")
del_items(0x8014AF90)
SetType(0x8014AF90, "int M_SpawnSkel__Fiii(int x, int y, int dir)")
del_items(0x8014B0E8)
SetType(0x8014B0E8, "void TalktoMonster__Fi(int i)")
del_items(0x8014B214)
SetType(0x8014B214, "void SpawnGolum__Fiiii(int i, int x, int y, int mi)")
del_items(0x8014B46C)
SetType(0x8014B46C, "unsigned char CanTalkToMonst__Fi(int m)")
del_items(0x8014B4A4)
SetType(0x8014B4A4, "unsigned char CheckMonsterHit__FiRUc(int m, unsigned char *ret)")
del_items(0x8014B570)
SetType(0x8014B570, "void MAI_Golum__Fi(int i)")
del_items(0x8014B8E4)
SetType(0x8014B8E4, "unsigned char MAI_Path__Fi(int i)")
del_items(0x8014BA48)
SetType(0x8014BA48, "void M_StartAttack__Fi(int i)")
del_items(0x8014BB30)
SetType(0x8014BB30, "void M_StartWalk__Fiiiiii(int i, int xvel, int yvel, int xadd, int yadd, int EndDir)")
del_items(0x8014BC90)
SetType(0x8014BC90, "void FreeInvGFX__Fv()")
del_items(0x8014BC98)
SetType(0x8014BC98, "void InvDrawSlot__Fiii(int X, int Y, int Frame)")
del_items(0x8014BD1C)
SetType(0x8014BD1C, "void InvDrawSlotBack__FiiiiUc(int X, int Y, int W, int H, int Flag)")
del_items(0x8014BF70)
SetType(0x8014BF70, "void InvDrawItem__FiiiUci(int ItemX, int ItemY, int ItemNo, unsigned char StatFlag, int TransFlag)")
del_items(0x8014C040)
SetType(0x8014C040, "void InvDrawSlots__Fv()")
del_items(0x8014C318)
SetType(0x8014C318, "void PrintStat__FiiPcUc(int Y, int Txt0, char *Txt1, unsigned char Col)")
del_items(0x8014C3E4)
SetType(0x8014C3E4, "void DrawInvStats__Fv()")
del_items(0x8014CF00)
SetType(0x8014CF00, "void DrawInvBack__Fv()")
del_items(0x8014CF88)
SetType(0x8014CF88, "void DrawInvCursor__Fv()")
del_items(0x8014D464)
SetType(0x8014D464, "void DrawInvMsg__Fv()")
del_items(0x8014D62C)
SetType(0x8014D62C, "void DrawInvUnique__Fv()")
del_items(0x8014D750)
SetType(0x8014D750, "void DrawInv__Fv()")
del_items(0x8014D790)
SetType(0x8014D790, "void DrawInvTSK__FP4TASK(struct TASK *T)")
del_items(0x8014DAD4)
SetType(0x8014DAD4, "void DoThatDrawInv__Fv()")
del_items(0x8014E29C)
SetType(0x8014E29C, "unsigned char AutoPlace__FiiiiUc(int pnum, int ii, int sx, int sy, int saveflag)")
del_items(0x8014E5BC)
SetType(0x8014E5BC, "unsigned char SpecialAutoPlace__FiiiiUc(int pnum, int ii, int sx, int sy, int saveflag)")
del_items(0x8014E958)
SetType(0x8014E958, "unsigned char GoldAutoPlace__Fi(int pnum)")
del_items(0x8014EE28)
SetType(0x8014EE28, "unsigned char WeaponAutoPlace__Fi(int pnum)")
del_items(0x8014F0B4)
SetType(0x8014F0B4, "int SwapItem__FP10ItemStructT0(struct ItemStruct *a, struct ItemStruct *b)")
del_items(0x8014F1B0)
SetType(0x8014F1B0, "void CheckInvPaste__Fiii(int pnum, int mx, int my)")
del_items(0x80150E9C)
SetType(0x80150E9C, "void CheckInvCut__Fiii(int pnum, int mx, int my)")
del_items(0x8015194C)
SetType(0x8015194C, "void RemoveInvItem__Fii(int pnum, int iv)")
del_items(0x80151BF4)
SetType(0x80151BF4, "void RemoveSpdBarItem__Fii(int pnum, int iv)")
del_items(0x80151CE8)
SetType(0x80151CE8, "void CheckInvScrn__Fv()")
del_items(0x80151D60)
SetType(0x80151D60, "void CheckItemStats__Fi(int pnum)")
del_items(0x80151DE4)
SetType(0x80151DE4, "void CheckBookLevel__Fi(int pnum)")
del_items(0x80151F18)
SetType(0x80151F18, "void CheckQuestItem__Fi(int pnum)")
del_items(0x80152394)
SetType(0x80152394, "void InvGetItem__Fii(int pnum, int ii)")
del_items(0x80152690)
SetType(0x80152690, "void AutoGetItem__Fii(int pnum, int ii)")
del_items(0x80153100)
SetType(0x80153100, "void SyncGetItem__FiiiUsi(int x, int y, int idx, unsigned short ci, int iseed)")
del_items(0x8015328C)
SetType(0x8015328C, "unsigned char TryInvPut__Fv()")
del_items(0x80153454)
SetType(0x80153454, "int InvPutItem__Fiii(int pnum, int x, int y)")
del_items(0x801538FC)
SetType(0x801538FC, "int SyncPutItem__FiiiiUsiUciiiiiUl(int pnum, int x, int y, int idx, int icreateinfo, int iseed, int Id, int dur, int mdur, int ch, int mch, int ivalue, unsigned long ibuff)")
del_items(0x80153E58)
SetType(0x80153E58, "char CheckInvHLight__Fv()")
del_items(0x801541A0)
SetType(0x801541A0, "void RemoveScroll__Fi(int pnum)")
del_items(0x80154384)
SetType(0x80154384, "unsigned char UseScroll__Fv()")
del_items(0x801545EC)
SetType(0x801545EC, "void UseStaffCharge__FP12PlayerStruct(struct PlayerStruct *ptrplr)")
del_items(0x80154654)
SetType(0x80154654, "unsigned char UseStaff__Fv()")
del_items(0x80154714)
SetType(0x80154714, "void StartGoldDrop__Fv()")
del_items(0x80154810)
SetType(0x80154810, "unsigned char UseInvItem__Fii(int pnum, int cii)")
del_items(0x80154D34)
SetType(0x80154D34, "void DoTelekinesis__Fv()")
del_items(0x80154E5C)
SetType(0x80154E5C, "long CalculateGold__Fi(int pnum)")
del_items(0x80154F94)
SetType(0x80154F94, "unsigned char DropItemBeforeTrig__Fv()")
del_items(0x80154FEC)
SetType(0x80154FEC, "void ControlInv__Fv()")
del_items(0x801552F8)
SetType(0x801552F8, "void InvGetItemWH__Fi(int Pos)")
del_items(0x801553EC)
SetType(0x801553EC, "void InvAlignObject__Fv()")
del_items(0x801555A0)
SetType(0x801555A0, "void InvSetItemCurs__Fv()")
del_items(0x80155730)
SetType(0x80155730, "void InvMoveCursLeft__Fv()")
del_items(0x801558D8)
SetType(0x801558D8, "void InvMoveCursRight__Fv()")
del_items(0x80155B8C)
SetType(0x80155B8C, "void InvMoveCursUp__Fv()")
del_items(0x80155D84)
SetType(0x80155D84, "void InvMoveCursDown__Fv()")
del_items(0x8015608C)
SetType(0x8015608C, "void DumpMonsters__7CBlocks(struct CBlocks *this)")
del_items(0x801560B4)
SetType(0x801560B4, "void Flush__4CPad(struct CPad *this)")
del_items(0x801560D8)
SetType(0x801560D8, "void SetRGB__6DialogUcUcUc(struct Dialog *this, unsigned char R, unsigned char G, unsigned char B)")
del_items(0x801560F8)
SetType(0x801560F8, "void SetBack__6Dialogi(struct Dialog *this, int Type)")
del_items(0x80156100)
SetType(0x80156100, "void SetBorder__6Dialogi(struct Dialog *this, int Type)")
del_items(0x80156108)
SetType(0x80156108, "int SetOTpos__6Dialogi(struct Dialog *this, int OT)")
del_items(0x80156114)
SetType(0x80156114, "void ___6Dialog(struct Dialog *this, int __in_chrg)")
del_items(0x8015613C)
SetType(0x8015613C, "struct Dialog *__6Dialog(struct Dialog *this)")
del_items(0x80156198)
SetType(0x80156198, "void StartAutomap__Fv()")
del_items(0x801561A8)
SetType(0x801561A8, "void AutomapUp__Fv()")
del_items(0x801561C8)
SetType(0x801561C8, "void AutomapDown__Fv()")
del_items(0x801561E8)
SetType(0x801561E8, "void AutomapLeft__Fv()")
del_items(0x80156208)
SetType(0x80156208, "void AutomapRight__Fv()")
del_items(0x80156228)
SetType(0x80156228, "struct LINE_F2 *AMGetLine__FUcUcUc(unsigned char R, unsigned char G, unsigned char B)")
del_items(0x801562D4)
SetType(0x801562D4, "void AmDrawLine__Fiiii(int x0, int y0, int x1, int y1)")
del_items(0x8015633C)
SetType(0x8015633C, "void AmDrawPlayer__Fiiiii(int x0, int y0, int x1, int y1, int PNum)")
del_items(0x801563C4)
SetType(0x801563C4, "void DrawAutomapPlr__Fv()")
del_items(0x80156714)
SetType(0x80156714, "void DrawAutoMapVertWall__Fiiii(int X, int Y, int Length, int asd)")
del_items(0x80156808)
SetType(0x80156808, "void DrawAutoMapHorzWall__Fiiii(int X, int Y, int Length, int asd)")
del_items(0x801568FC)
SetType(0x801568FC, "void DrawAutoMapVertDoor__Fii(int X, int Y)")
del_items(0x80156AD0)
SetType(0x80156AD0, "void DrawAutoMapHorzDoor__Fii(int X, int Y)")
del_items(0x80156CA8)
SetType(0x80156CA8, "void DrawAutoMapVertGrate__Fii(int X, int Y)")
del_items(0x80156D5C)
SetType(0x80156D5C, "void DrawAutoMapHorzGrate__Fii(int X, int Y)")
del_items(0x80156E10)
SetType(0x80156E10, "void DrawAutoMapSquare__Fii(int X, int Y)")
del_items(0x80156F58)
SetType(0x80156F58, "void DrawAutoMapStairs__Fii(int X, int Y)")
del_items(0x80157158)
SetType(0x80157158, "void DrawAutomap__Fv()")
del_items(0x801575FC)
SetType(0x801575FC, "void PRIM_GetPrim__FPP7LINE_F2(struct LINE_F2 **Prim)")
| 48.308458 | 195 | 0.789255 | del_items(0x8012F26C)
SetType(0x8012F26C, "void GameOnlyTestRoutine__Fv()")
del_items(0x8012F274)
SetType(0x8012F274, "int vecleny__Fii(int a, int b)")
del_items(0x8012F298)
SetType(0x8012F298, "int veclenx__Fii(int a, int b)")
del_items(0x8012F2C4)
SetType(0x8012F2C4, "void GetDamageAmt__FiPiT1(int i, int *mind, int *maxd)")
del_items(0x8012F8BC)
SetType(0x8012F8BC, "int CheckBlock__Fiiii(int fx, int fy, int tx, int ty)")
del_items(0x8012F9A4)
SetType(0x8012F9A4, "int FindClosest__Fiii(int sx, int sy, int rad)")
del_items(0x8012FB40)
SetType(0x8012FB40, "int GetSpellLevel__Fii(int id, int sn)")
del_items(0x8012FBB4)
SetType(0x8012FBB4, "int GetDirection8__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x8012FDD0)
SetType(0x8012FDD0, "int GetDirection16__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x8012FFEC)
SetType(0x8012FFEC, "void DeleteMissile__Fii(int mi, int i)")
del_items(0x80130044)
SetType(0x80130044, "void GetMissileVel__Fiiiiii(int i, int sx, int sy, int dx, int dy, int v)")
del_items(0x801301F8)
SetType(0x801301F8, "void PutMissile__Fi(int i)")
del_items(0x801302FC)
SetType(0x801302FC, "void GetMissilePos__Fi(int i)")
del_items(0x80130424)
SetType(0x80130424, "void MoveMissilePos__Fi(int i)")
del_items(0x8013058C)
SetType(0x8013058C, "unsigned char MonsterTrapHit__FiiiiiUc(int m, int mindam, int maxdam, int dist, int t, int shift)")
del_items(0x80130900)
SetType(0x80130900, "unsigned char MonsterMHit__FiiiiiiUc(int pnum, int m, int mindam, int maxdam, int dist, int t, int shift)")
del_items(0x80131060)
SetType(0x80131060, "unsigned char PlayerMHit__FiiiiiiUcUc(int pnum, int m, int dist, int mind, int maxd, int mtype, int shift, int earflag)")
del_items(0x80131ACC)
SetType(0x80131ACC, "unsigned char Plr2PlrMHit__FiiiiiiUc(int pnum, int p, int mindam, int maxdam, int dist, int mtype, int shift)")
del_items(0x801322A8)
SetType(0x801322A8, "void CheckMissileCol__FiiiUciiUc(int i, int mindam, int maxdam, unsigned char shift, int mx, int my, int nodel)")
del_items(0x80132724)
SetType(0x80132724, "unsigned char GetTableValue__FUci(unsigned char code, int dir)")
del_items(0x801327B8)
SetType(0x801327B8, "void SetMissAnim__Fii(int mi, int animtype)")
del_items(0x80132888)
SetType(0x80132888, "void SetMissDir__Fii(int mi, int dir)")
del_items(0x801328CC)
SetType(0x801328CC, "void AddLArrow__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80132A8C)
SetType(0x80132A8C, "void AddArrow__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80132C48)
SetType(0x80132C48, "void GetVileMissPos__Fiii(int mi, int dx, int dy)")
del_items(0x80132D6C)
SetType(0x80132D6C, "void AddRndTeleport__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801330DC)
SetType(0x801330DC, "void AddFirebolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)")
del_items(0x80133348)
SetType(0x80133348, "void AddMagmaball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013345C)
SetType(0x8013345C, "void AddTeleport__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80133654)
SetType(0x80133654, "void AddLightball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801337A8)
SetType(0x801337A8, "void AddFirewall__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80133990)
SetType(0x80133990, "void AddFireball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80133BEC)
SetType(0x80133BEC, "void AddLightctrl__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80133CD4)
SetType(0x80133CD4, "void AddLightning__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80133E9C)
SetType(0x80133E9C, "void AddMisexp__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801340A8)
SetType(0x801340A8, "void AddWeapexp__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80134190)
SetType(0x80134190, "unsigned char CheckIfTrig__Fii(int x, int y)")
del_items(0x80134274)
SetType(0x80134274, "void AddTown__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80134698)
SetType(0x80134698, "void AddFlash__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801348A8)
SetType(0x801348A8, "void AddFlash2__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80134A88)
SetType(0x80134A88, "void AddManashield__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80134B50)
SetType(0x80134B50, "void AddFiremove__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80134CAC)
SetType(0x80134CAC, "void AddGuardian__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80135118)
SetType(0x80135118, "void AddChain__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80135174)
SetType(0x80135174, "void AddRhino__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80135330)
SetType(0x80135330, "void AddFlare__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80135614)
SetType(0x80135614, "void AddAcid__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80135718)
SetType(0x80135718, "void AddAcidpud__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801357F0)
SetType(0x801357F0, "void AddStone__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80135AE8)
SetType(0x80135AE8, "void AddGolem__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80135CA0)
SetType(0x80135CA0, "void AddBoom__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80135D34)
SetType(0x80135D34, "void AddHeal__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80135F5C)
SetType(0x80135F5C, "void AddHealOther__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80135FC4)
SetType(0x80135FC4, "void AddElement__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801361F0)
SetType(0x801361F0, "void AddIdentify__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801362A0)
SetType(0x801362A0, "void AddFirewallC__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80136550)
SetType(0x80136550, "void AddInfra__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013664C)
SetType(0x8013664C, "void AddWave__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801366D0)
SetType(0x801366D0, "void AddNova__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801368E8)
SetType(0x801368E8, "void AddRepair__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80136998)
SetType(0x80136998, "void AddRecharge__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80136A48)
SetType(0x80136A48, "void AddDisarm__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80136AB0)
SetType(0x80136AB0, "void AddApoca__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80136CEC)
SetType(0x80136CEC, "void AddFlame__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int seqno)")
del_items(0x80136F08)
SetType(0x80136F08, "void AddFlamec__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80136FF8)
SetType(0x80136FF8, "void AddCbolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)")
del_items(0x801371EC)
SetType(0x801371EC, "void AddHbolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)")
del_items(0x801373AC)
SetType(0x801373AC, "void AddResurrect__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80137420)
SetType(0x80137420, "void AddResurrectBeam__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801374A8)
SetType(0x801374A8, "void AddTelekinesis__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80137510)
SetType(0x80137510, "void AddBoneSpirit__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013770C)
SetType(0x8013770C, "void AddRportal__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801377AC)
SetType(0x801377AC, "void AddDiabApoca__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801378E8)
SetType(0x801378E8, "int AddMissile__Fiiiiiiciii(int sx, int sy, int v1, int v2, int midir, int mitype, int micaster, int id, int v3, int spllvl)")
del_items(0x80137D38)
SetType(0x80137D38, "int Sentfire__Fiii(int i, int sx, int sy)")
del_items(0x80137F1C)
SetType(0x80137F1C, "void MI_Dummy__Fi(int i)")
del_items(0x80137F24)
SetType(0x80137F24, "void MI_Golem__Fi(int i)")
del_items(0x80138180)
SetType(0x80138180, "void MI_SetManashield__Fi(int i)")
del_items(0x801381BC)
SetType(0x801381BC, "void MI_LArrow__Fi(int i)")
del_items(0x80138924)
SetType(0x80138924, "void MI_Arrow__Fi(int i)")
del_items(0x80138B40)
SetType(0x80138B40, "void MI_Firebolt__Fi(int i)")
del_items(0x80139200)
SetType(0x80139200, "void MI_Lightball__Fi(int i)")
del_items(0x80139488)
SetType(0x80139488, "void MI_Acidpud__Fi(int i)")
del_items(0x80139598)
SetType(0x80139598, "void MI_Firewall__Fi(int i)")
del_items(0x8013985C)
SetType(0x8013985C, "void MI_Fireball__Fi(int i)")
del_items(0x8013A220)
SetType(0x8013A220, "void MI_Lightctrl__Fi(int i)")
del_items(0x8013A59C)
SetType(0x8013A59C, "void MI_Lightning__Fi(int i)")
del_items(0x8013A688)
SetType(0x8013A688, "void MI_Town__Fi(int i)")
del_items(0x8013A8C0)
SetType(0x8013A8C0, "void MI_Flash__Fi(int i)")
del_items(0x8013AC14)
SetType(0x8013AC14, "void MI_Flash2__Fi(int i)")
del_items(0x8013ADDC)
SetType(0x8013ADDC, "void MI_Manashield__Fi(int i)")
del_items(0x8013B100)
SetType(0x8013B100, "void MI_Firemove__Fi(int i)")
del_items(0x8013B38C)
SetType(0x8013B38C, "void MI_Guardian__Fi(int i)")
del_items(0x8013B63C)
SetType(0x8013B63C, "void MI_Chain__Fi(int i)")
del_items(0x8013B8A8)
SetType(0x8013B8A8, "void MI_Weapexp__Fi(int i)")
del_items(0x8013BB60)
SetType(0x8013BB60, "void MI_Misexp__Fi(int i)")
del_items(0x8013BE1C)
SetType(0x8013BE1C, "void MI_Acidsplat__Fi(int i)")
del_items(0x8013BFB8)
SetType(0x8013BFB8, "void MI_Teleport__Fi(int i)")
del_items(0x8013C380)
SetType(0x8013C380, "void MI_Stone__Fi(int i)")
del_items(0x8013C52C)
SetType(0x8013C52C, "void MI_Boom__Fi(int i)")
del_items(0x8013C624)
SetType(0x8013C624, "void MI_Rhino__Fi(int i)")
del_items(0x8013C9D0)
SetType(0x8013C9D0, "void MI_FirewallC__Fi(int i)")
del_items(0x8013CC58)
SetType(0x8013CC58, "void MI_Infra__Fi(int i)")
del_items(0x8013CD10)
SetType(0x8013CD10, "void MI_Apoca__Fi(int i)")
del_items(0x8013CFA4)
SetType(0x8013CFA4, "void MI_Wave__Fi(int i)")
del_items(0x8013D4A0)
SetType(0x8013D4A0, "void MI_Nova__Fi(int i)")
del_items(0x8013D760)
SetType(0x8013D760, "void MI_Flame__Fi(int i)")
del_items(0x8013D958)
SetType(0x8013D958, "void MI_Flamec__Fi(int i)")
del_items(0x8013DBE0)
SetType(0x8013DBE0, "void MI_Cbolt__Fi(int i)")
del_items(0x8013DEE4)
SetType(0x8013DEE4, "void MI_Hbolt__Fi(int i)")
del_items(0x8013E1F0)
SetType(0x8013E1F0, "void MI_Element__Fi(int i)")
del_items(0x8013E8A8)
SetType(0x8013E8A8, "void MI_Bonespirit__Fi(int i)")
del_items(0x8013ECB0)
SetType(0x8013ECB0, "void MI_ResurrectBeam__Fi(int i)")
del_items(0x8013ED20)
SetType(0x8013ED20, "void MI_Rportal__Fi(int i)")
del_items(0x8013EF44)
SetType(0x8013EF44, "void ProcessMissiles__Fv()")
del_items(0x8013F338)
SetType(0x8013F338, "void ClearMissileSpot__Fi(int mi)")
del_items(0x8013F3F0)
SetType(0x8013F3F0, "void MoveToScrollTarget__7CBlocks(struct CBlocks *this)")
del_items(0x8013F404)
SetType(0x8013F404, "void MonstPartJump__Fi(int m)")
del_items(0x8013F598)
SetType(0x8013F598, "void DeleteMonster__Fi(int i)")
del_items(0x8013F5D0)
SetType(0x8013F5D0, "int M_GetDir__Fi(int i)")
del_items(0x8013F62C)
SetType(0x8013F62C, "void M_StartDelay__Fii(int i, int len)")
del_items(0x8013F674)
SetType(0x8013F674, "void M_StartRAttack__Fiii(int i, int missile_type, int dam)")
del_items(0x8013F78C)
SetType(0x8013F78C, "void M_StartRSpAttack__Fiii(int i, int missile_type, int dam)")
del_items(0x8013F8B0)
SetType(0x8013F8B0, "void M_StartSpAttack__Fi(int i)")
del_items(0x8013F998)
SetType(0x8013F998, "void M_StartEat__Fi(int i)")
del_items(0x8013FA68)
SetType(0x8013FA68, "void M_GetKnockback__Fi(int i)")
del_items(0x8013FC40)
SetType(0x8013FC40, "void M_StartHit__Fiii(int i, int pnum, int dam)")
del_items(0x8013FF38)
SetType(0x8013FF38, "void M_DiabloDeath__FiUc(int i, unsigned char sendmsg)")
del_items(0x8014024C)
SetType(0x8014024C, "void M2MStartHit__Fiii(int mid, int i, int dam)")
del_items(0x801404F8)
SetType(0x801404F8, "void MonstStartKill__FiiUc(int i, int pnum, unsigned char sendmsg)")
del_items(0x801407E4)
SetType(0x801407E4, "void M2MStartKill__Fii(int i, int mid)")
del_items(0x80140BAC)
SetType(0x80140BAC, "void M_StartKill__Fii(int i, int pnum)")
del_items(0x80140C9C)
SetType(0x80140C9C, "void M_StartFadein__FiiUc(int i, int md, unsigned char backwards)")
del_items(0x80140DF0)
SetType(0x80140DF0, "void M_StartFadeout__FiiUc(int i, int md, unsigned char backwards)")
del_items(0x80140F38)
SetType(0x80140F38, "void M_StartHeal__Fi(int i)")
del_items(0x80140FB8)
SetType(0x80140FB8, "void M_ChangeLightOffset__Fi(int monst)")
del_items(0x80141120)
SetType(0x80141120, "int M_DoStand__Fi(int i)")
del_items(0x80141188)
SetType(0x80141188, "int M_DoWalk__Fi(int i)")
del_items(0x8014140C)
SetType(0x8014140C, "int M_DoWalk2__Fi(int i)")
del_items(0x801415F8)
SetType(0x801415F8, "int M_DoWalk3__Fi(int i)")
del_items(0x801418BC)
SetType(0x801418BC, "void M_TryM2MHit__Fiiiii(int i, int mid, int hper, int mind, int maxd)")
del_items(0x80141A84)
SetType(0x80141A84, "void M_TryH2HHit__Fiiiii(int i, int pnum, int Hit, int MinDam, int MaxDam)")
del_items(0x80142098)
SetType(0x80142098, "int M_DoAttack__Fi(int i)")
del_items(0x8014223C)
SetType(0x8014223C, "int M_DoRAttack__Fi(int i)")
del_items(0x801423B4)
SetType(0x801423B4, "int M_DoRSpAttack__Fi(int i)")
del_items(0x801425A4)
SetType(0x801425A4, "int M_DoSAttack__Fi(int i)")
del_items(0x80142678)
SetType(0x80142678, "int M_DoFadein__Fi(int i)")
del_items(0x80142748)
SetType(0x80142748, "int M_DoFadeout__Fi(int i)")
del_items(0x8014285C)
SetType(0x8014285C, "int M_DoHeal__Fi(int i)")
del_items(0x80142908)
SetType(0x80142908, "int M_DoTalk__Fi(int i)")
del_items(0x80142E74)
SetType(0x80142E74, "void M_Teleport__Fi(int i)")
del_items(0x801430A8)
SetType(0x801430A8, "int M_DoGotHit__Fi(int i)")
del_items(0x80143108)
SetType(0x80143108, "void DoEnding__Fv()")
del_items(0x801431C8)
SetType(0x801431C8, "void PrepDoEnding__Fv()")
del_items(0x801432E0)
SetType(0x801432E0, "int M_DoDeath__Fi(int i)")
del_items(0x801434B0)
SetType(0x801434B0, "int M_DoSpStand__Fi(int i)")
del_items(0x80143554)
SetType(0x80143554, "int M_DoDelay__Fi(int i)")
del_items(0x80143644)
SetType(0x80143644, "int M_DoStone__Fi(int i)")
del_items(0x801436C8)
SetType(0x801436C8, "void M_WalkDir__Fii(int i, int md)")
del_items(0x801438F0)
SetType(0x801438F0, "void GroupUnity__Fi(int i)")
del_items(0x80143CDC)
SetType(0x80143CDC, "unsigned char M_CallWalk__Fii(int i, int md)")
del_items(0x80143EC8)
SetType(0x80143EC8, "unsigned char M_PathWalk__Fi(int i, char plr2monst[9], unsigned char (*Check)())")
del_items(0x80143F8C)
SetType(0x80143F8C, "unsigned char M_CallWalk2__Fii(int i, int md)")
del_items(0x801440A0)
SetType(0x801440A0, "unsigned char M_DumbWalk__Fii(int i, int md)")
del_items(0x801440F4)
SetType(0x801440F4, "unsigned char M_RoundWalk__FiiRi(int i, int md, int *dir)")
del_items(0x80144294)
SetType(0x80144294, "void MAI_Zombie__Fi(int i)")
del_items(0x8014448C)
SetType(0x8014448C, "void MAI_SkelSd__Fi(int i)")
del_items(0x80144624)
SetType(0x80144624, "void MAI_Snake__Fi(int i)")
del_items(0x80144A08)
SetType(0x80144A08, "void MAI_Bat__Fi(int i)")
del_items(0x80144DC0)
SetType(0x80144DC0, "void MAI_SkelBow__Fi(int i)")
del_items(0x80144FA4)
SetType(0x80144FA4, "void MAI_Fat__Fi(int i)")
del_items(0x80145154)
SetType(0x80145154, "void MAI_Sneak__Fi(int i)")
del_items(0x80145540)
SetType(0x80145540, "void MAI_Fireman__Fi(int i)")
del_items(0x80145838)
SetType(0x80145838, "void MAI_Fallen__Fi(int i)")
del_items(0x80145B54)
SetType(0x80145B54, "void MAI_Cleaver__Fi(int i)")
del_items(0x80145C3C)
SetType(0x80145C3C, "void MAI_Round__FiUc(int i, unsigned char special)")
del_items(0x801460A8)
SetType(0x801460A8, "void MAI_GoatMc__Fi(int i)")
del_items(0x801460C8)
SetType(0x801460C8, "void MAI_Ranged__FiiUc(int i, int missile_type, unsigned char special)")
del_items(0x801462E8)
SetType(0x801462E8, "void MAI_GoatBow__Fi(int i)")
del_items(0x8014630C)
SetType(0x8014630C, "void MAI_Succ__Fi(int i)")
del_items(0x80146330)
SetType(0x80146330, "void MAI_AcidUniq__Fi(int i)")
del_items(0x80146354)
SetType(0x80146354, "void MAI_Scav__Fi(int i)")
del_items(0x8014676C)
SetType(0x8014676C, "void MAI_Garg__Fi(int i)")
del_items(0x8014694C)
SetType(0x8014694C, "void MAI_RoundRanged__FiiUciUc(int i, int missile_type, unsigned char checkdoors, int dam, int lessmissiles)")
del_items(0x80146E60)
SetType(0x80146E60, "void MAI_Magma__Fi(int i)")
del_items(0x80146E8C)
SetType(0x80146E8C, "void MAI_Storm__Fi(int i)")
del_items(0x80146EB8)
SetType(0x80146EB8, "void MAI_Acid__Fi(int i)")
del_items(0x80146EE8)
SetType(0x80146EE8, "void MAI_Diablo__Fi(int i)")
del_items(0x80146F14)
SetType(0x80146F14, "void MAI_RR2__Fiii(int i, int mistype, int dam)")
del_items(0x80147414)
SetType(0x80147414, "void MAI_Mega__Fi(int i)")
del_items(0x80147438)
SetType(0x80147438, "void MAI_SkelKing__Fi(int i)")
del_items(0x80147974)
SetType(0x80147974, "void MAI_Rhino__Fi(int i)")
del_items(0x80147E1C)
SetType(0x80147E1C, "void MAI_Counselor__Fi(int i, unsigned char counsmiss[4], int _mx, int _my)")
del_items(0x801482E8)
SetType(0x801482E8, "void MAI_Garbud__Fi(int i)")
del_items(0x801484F0)
SetType(0x801484F0, "void MAI_Zhar__Fi(int i)")
del_items(0x801486E8)
SetType(0x801486E8, "void MAI_SnotSpil__Fi(int i)")
del_items(0x80148934)
SetType(0x80148934, "void MAI_Lazurus__Fi(int i)")
del_items(0x80148BA8)
SetType(0x80148BA8, "void MAI_Lazhelp__Fi(int i)")
del_items(0x80148CC8)
SetType(0x80148CC8, "void MAI_Lachdanan__Fi(int i)")
del_items(0x80148E74)
SetType(0x80148E74, "void MAI_Warlord__Fi(int i)")
del_items(0x80148FC0)
SetType(0x80148FC0, "void DeleteMonsterList__Fv()")
del_items(0x801490DC)
SetType(0x801490DC, "void ProcessMonsters__Fv()")
del_items(0x8014966C)
SetType(0x8014966C, "unsigned char DirOK__Fii(int i, int mdir)")
del_items(0x80149A54)
SetType(0x80149A54, "unsigned char PosOkMissile__Fii(int x, int y)")
del_items(0x80149ABC)
SetType(0x80149ABC, "unsigned char CheckNoSolid__Fii(int x, int y)")
del_items(0x80149B00)
SetType(0x80149B00, "unsigned char LineClearF__FPFii_Uciiii(unsigned char (*Clear)(), int x1, int y1, int x2, int y2)")
del_items(0x80149D88)
SetType(0x80149D88, "unsigned char LineClear__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x80149DC8)
SetType(0x80149DC8, "unsigned char LineClearF1__FPFiii_Uciiiii(unsigned char (*Clear)(), int monst, int x1, int y1, int x2, int y2)")
del_items(0x8014A05C)
SetType(0x8014A05C, "void M_FallenFear__Fii(int x, int y)")
del_items(0x8014A22C)
SetType(0x8014A22C, "void PrintMonstHistory__Fi(int mt)")
del_items(0x8014A4E0)
SetType(0x8014A4E0, "void PrintUniqueHistory__Fv()")
del_items(0x8014A604)
SetType(0x8014A604, "void MissToMonst__Fiii(int i, int x, int y)")
del_items(0x8014AA80)
SetType(0x8014AA80, "unsigned char PosOkMonst2__Fiii(int i, int x, int y)")
del_items(0x8014AC9C)
SetType(0x8014AC9C, "unsigned char PosOkMonst3__Fiii(int i, int x, int y)")
del_items(0x8014AF90)
SetType(0x8014AF90, "int M_SpawnSkel__Fiii(int x, int y, int dir)")
del_items(0x8014B0E8)
SetType(0x8014B0E8, "void TalktoMonster__Fi(int i)")
del_items(0x8014B214)
SetType(0x8014B214, "void SpawnGolum__Fiiii(int i, int x, int y, int mi)")
del_items(0x8014B46C)
SetType(0x8014B46C, "unsigned char CanTalkToMonst__Fi(int m)")
del_items(0x8014B4A4)
SetType(0x8014B4A4, "unsigned char CheckMonsterHit__FiRUc(int m, unsigned char *ret)")
del_items(0x8014B570)
SetType(0x8014B570, "void MAI_Golum__Fi(int i)")
del_items(0x8014B8E4)
SetType(0x8014B8E4, "unsigned char MAI_Path__Fi(int i)")
del_items(0x8014BA48)
SetType(0x8014BA48, "void M_StartAttack__Fi(int i)")
del_items(0x8014BB30)
SetType(0x8014BB30, "void M_StartWalk__Fiiiiii(int i, int xvel, int yvel, int xadd, int yadd, int EndDir)")
del_items(0x8014BC90)
SetType(0x8014BC90, "void FreeInvGFX__Fv()")
del_items(0x8014BC98)
SetType(0x8014BC98, "void InvDrawSlot__Fiii(int X, int Y, int Frame)")
del_items(0x8014BD1C)
SetType(0x8014BD1C, "void InvDrawSlotBack__FiiiiUc(int X, int Y, int W, int H, int Flag)")
del_items(0x8014BF70)
SetType(0x8014BF70, "void InvDrawItem__FiiiUci(int ItemX, int ItemY, int ItemNo, unsigned char StatFlag, int TransFlag)")
del_items(0x8014C040)
SetType(0x8014C040, "void InvDrawSlots__Fv()")
del_items(0x8014C318)
SetType(0x8014C318, "void PrintStat__FiiPcUc(int Y, int Txt0, char *Txt1, unsigned char Col)")
del_items(0x8014C3E4)
SetType(0x8014C3E4, "void DrawInvStats__Fv()")
del_items(0x8014CF00)
SetType(0x8014CF00, "void DrawInvBack__Fv()")
del_items(0x8014CF88)
SetType(0x8014CF88, "void DrawInvCursor__Fv()")
del_items(0x8014D464)
SetType(0x8014D464, "void DrawInvMsg__Fv()")
del_items(0x8014D62C)
SetType(0x8014D62C, "void DrawInvUnique__Fv()")
del_items(0x8014D750)
SetType(0x8014D750, "void DrawInv__Fv()")
del_items(0x8014D790)
SetType(0x8014D790, "void DrawInvTSK__FP4TASK(struct TASK *T)")
del_items(0x8014DAD4)
SetType(0x8014DAD4, "void DoThatDrawInv__Fv()")
del_items(0x8014E29C)
SetType(0x8014E29C, "unsigned char AutoPlace__FiiiiUc(int pnum, int ii, int sx, int sy, int saveflag)")
del_items(0x8014E5BC)
SetType(0x8014E5BC, "unsigned char SpecialAutoPlace__FiiiiUc(int pnum, int ii, int sx, int sy, int saveflag)")
del_items(0x8014E958)
SetType(0x8014E958, "unsigned char GoldAutoPlace__Fi(int pnum)")
del_items(0x8014EE28)
SetType(0x8014EE28, "unsigned char WeaponAutoPlace__Fi(int pnum)")
del_items(0x8014F0B4)
SetType(0x8014F0B4, "int SwapItem__FP10ItemStructT0(struct ItemStruct *a, struct ItemStruct *b)")
del_items(0x8014F1B0)
SetType(0x8014F1B0, "void CheckInvPaste__Fiii(int pnum, int mx, int my)")
del_items(0x80150E9C)
SetType(0x80150E9C, "void CheckInvCut__Fiii(int pnum, int mx, int my)")
del_items(0x8015194C)
SetType(0x8015194C, "void RemoveInvItem__Fii(int pnum, int iv)")
del_items(0x80151BF4)
SetType(0x80151BF4, "void RemoveSpdBarItem__Fii(int pnum, int iv)")
del_items(0x80151CE8)
SetType(0x80151CE8, "void CheckInvScrn__Fv()")
del_items(0x80151D60)
SetType(0x80151D60, "void CheckItemStats__Fi(int pnum)")
del_items(0x80151DE4)
SetType(0x80151DE4, "void CheckBookLevel__Fi(int pnum)")
del_items(0x80151F18)
SetType(0x80151F18, "void CheckQuestItem__Fi(int pnum)")
del_items(0x80152394)
SetType(0x80152394, "void InvGetItem__Fii(int pnum, int ii)")
del_items(0x80152690)
SetType(0x80152690, "void AutoGetItem__Fii(int pnum, int ii)")
del_items(0x80153100)
SetType(0x80153100, "void SyncGetItem__FiiiUsi(int x, int y, int idx, unsigned short ci, int iseed)")
del_items(0x8015328C)
SetType(0x8015328C, "unsigned char TryInvPut__Fv()")
del_items(0x80153454)
SetType(0x80153454, "int InvPutItem__Fiii(int pnum, int x, int y)")
del_items(0x801538FC)
SetType(0x801538FC, "int SyncPutItem__FiiiiUsiUciiiiiUl(int pnum, int x, int y, int idx, int icreateinfo, int iseed, int Id, int dur, int mdur, int ch, int mch, int ivalue, unsigned long ibuff)")
del_items(0x80153E58)
SetType(0x80153E58, "char CheckInvHLight__Fv()")
del_items(0x801541A0)
SetType(0x801541A0, "void RemoveScroll__Fi(int pnum)")
del_items(0x80154384)
SetType(0x80154384, "unsigned char UseScroll__Fv()")
del_items(0x801545EC)
SetType(0x801545EC, "void UseStaffCharge__FP12PlayerStruct(struct PlayerStruct *ptrplr)")
del_items(0x80154654)
SetType(0x80154654, "unsigned char UseStaff__Fv()")
del_items(0x80154714)
SetType(0x80154714, "void StartGoldDrop__Fv()")
del_items(0x80154810)
SetType(0x80154810, "unsigned char UseInvItem__Fii(int pnum, int cii)")
del_items(0x80154D34)
SetType(0x80154D34, "void DoTelekinesis__Fv()")
del_items(0x80154E5C)
SetType(0x80154E5C, "long CalculateGold__Fi(int pnum)")
del_items(0x80154F94)
SetType(0x80154F94, "unsigned char DropItemBeforeTrig__Fv()")
del_items(0x80154FEC)
SetType(0x80154FEC, "void ControlInv__Fv()")
del_items(0x801552F8)
SetType(0x801552F8, "void InvGetItemWH__Fi(int Pos)")
del_items(0x801553EC)
SetType(0x801553EC, "void InvAlignObject__Fv()")
del_items(0x801555A0)
SetType(0x801555A0, "void InvSetItemCurs__Fv()")
del_items(0x80155730)
SetType(0x80155730, "void InvMoveCursLeft__Fv()")
del_items(0x801558D8)
SetType(0x801558D8, "void InvMoveCursRight__Fv()")
del_items(0x80155B8C)
SetType(0x80155B8C, "void InvMoveCursUp__Fv()")
del_items(0x80155D84)
SetType(0x80155D84, "void InvMoveCursDown__Fv()")
del_items(0x8015608C)
SetType(0x8015608C, "void DumpMonsters__7CBlocks(struct CBlocks *this)")
del_items(0x801560B4)
SetType(0x801560B4, "void Flush__4CPad(struct CPad *this)")
del_items(0x801560D8)
SetType(0x801560D8, "void SetRGB__6DialogUcUcUc(struct Dialog *this, unsigned char R, unsigned char G, unsigned char B)")
del_items(0x801560F8)
SetType(0x801560F8, "void SetBack__6Dialogi(struct Dialog *this, int Type)")
del_items(0x80156100)
SetType(0x80156100, "void SetBorder__6Dialogi(struct Dialog *this, int Type)")
del_items(0x80156108)
SetType(0x80156108, "int SetOTpos__6Dialogi(struct Dialog *this, int OT)")
del_items(0x80156114)
SetType(0x80156114, "void ___6Dialog(struct Dialog *this, int __in_chrg)")
del_items(0x8015613C)
SetType(0x8015613C, "struct Dialog *__6Dialog(struct Dialog *this)")
del_items(0x80156198)
SetType(0x80156198, "void StartAutomap__Fv()")
del_items(0x801561A8)
SetType(0x801561A8, "void AutomapUp__Fv()")
del_items(0x801561C8)
SetType(0x801561C8, "void AutomapDown__Fv()")
del_items(0x801561E8)
SetType(0x801561E8, "void AutomapLeft__Fv()")
del_items(0x80156208)
SetType(0x80156208, "void AutomapRight__Fv()")
del_items(0x80156228)
SetType(0x80156228, "struct LINE_F2 *AMGetLine__FUcUcUc(unsigned char R, unsigned char G, unsigned char B)")
del_items(0x801562D4)
SetType(0x801562D4, "void AmDrawLine__Fiiii(int x0, int y0, int x1, int y1)")
del_items(0x8015633C)
SetType(0x8015633C, "void AmDrawPlayer__Fiiiii(int x0, int y0, int x1, int y1, int PNum)")
del_items(0x801563C4)
SetType(0x801563C4, "void DrawAutomapPlr__Fv()")
del_items(0x80156714)
SetType(0x80156714, "void DrawAutoMapVertWall__Fiiii(int X, int Y, int Length, int asd)")
del_items(0x80156808)
SetType(0x80156808, "void DrawAutoMapHorzWall__Fiiii(int X, int Y, int Length, int asd)")
del_items(0x801568FC)
SetType(0x801568FC, "void DrawAutoMapVertDoor__Fii(int X, int Y)")
del_items(0x80156AD0)
SetType(0x80156AD0, "void DrawAutoMapHorzDoor__Fii(int X, int Y)")
del_items(0x80156CA8)
SetType(0x80156CA8, "void DrawAutoMapVertGrate__Fii(int X, int Y)")
del_items(0x80156D5C)
SetType(0x80156D5C, "void DrawAutoMapHorzGrate__Fii(int X, int Y)")
del_items(0x80156E10)
SetType(0x80156E10, "void DrawAutoMapSquare__Fii(int X, int Y)")
del_items(0x80156F58)
SetType(0x80156F58, "void DrawAutoMapStairs__Fii(int X, int Y)")
del_items(0x80157158)
SetType(0x80157158, "void DrawAutomap__Fv()")
del_items(0x801575FC)
SetType(0x801575FC, "void PRIM_GetPrim__FPP7LINE_F2(struct LINE_F2 **Prim)")
| 0 | 0 | 0 |
53654b6927aa429eb4b1d14084bab5ab43f23388 | 7,762 | py | Python | ansible_collections/ctera/ctera/plugins/modules/ctera_portal_plan.py | ctera/ctera-ansible-collections | 0a3a664e271015a3a701349fc4fd1ad7df0acebd | [
"Apache-2.0"
] | null | null | null | ansible_collections/ctera/ctera/plugins/modules/ctera_portal_plan.py | ctera/ctera-ansible-collections | 0a3a664e271015a3a701349fc4fd1ad7df0acebd | [
"Apache-2.0"
] | 3 | 2020-04-29T20:30:05.000Z | 2020-08-04T22:51:11.000Z | ansible_collections/ctera/ctera/plugins/modules/ctera_portal_plan.py | ctera/ctera-ansible-collections | 0a3a664e271015a3a701349fc4fd1ad7df0acebd | [
"Apache-2.0"
] | 2 | 2020-04-28T17:01:53.000Z | 2021-03-01T19:23:14.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, CTERA Networks Ltd.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: ctera_portal_plan
short_description: CTERA-Networks Portal Plan configuration and management
description:
- Create, modify and delete plans.
extends_documentation_fragment:
- ctera.ctera.vportal
author:
- Saimon Michelson (@saimonation)
- Ygal Blum (@ygalblum)
options:
state:
description:
- Whether the specified plan should exist or not.
type: str
choices: ['present', 'absent']
default: 'present'
name:
description: The name of the plan
required: True
type: str
retention:
description: The data retention policy
type: list
elements: dict
suboptions:
policy_name:
description: The name of the policy
type: str
required: True
choices:
- retainAll
- hourly
- daily
- weekly
- monthly
- quarterly
- yearly
- retainDeleted
duration:
description: The duration for the policy
type: int
required: True
quotas:
description: The items included in the plan and their respective quota
type: list
elements: dict
suboptions:
item_name:
description: The name of the plan item
type: str
required: True
choices:
- EV4
- EV8
- EV16
- EV32
- EV64
- EV128
- WA
- SA
- Share
- Connect
amount:
description: The quota's amount
type: int
required: True
'''
EXAMPLES = '''
- name: Portal Plan
ctera_portal_plan:
name: 'example'
retention:
- policy_name: retainAll
duration: 24
quotas:
- item_name: EV16
amount: 100
ctera_host: "{{ ctera_portal_hostname }}"
ctera_user: "{{ ctera_portal_user }}"
ctera_password: "{{ ctera_portal_password }}"
'''
RETURN = '''
name:
description: Name of the Plan
returned: when state is present
type: str
sample: example
'''
import ansible_collections.ctera.ctera.plugins.module_utils.ctera_common as ctera_common
from ansible_collections.ctera.ctera.plugins.module_utils.ctera_portal_base import CteraPortalBase
try:
from cterasdk import CTERAException
except ImportError: # pragma: no cover
pass # caught by ctera_common
if __name__ == '__main__': # pragma: no cover
main()
| 32.613445 | 135 | 0.583355 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, CTERA Networks Ltd.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: ctera_portal_plan
short_description: CTERA-Networks Portal Plan configuration and management
description:
- Create, modify and delete plans.
extends_documentation_fragment:
- ctera.ctera.vportal
author:
- Saimon Michelson (@saimonation)
- Ygal Blum (@ygalblum)
options:
state:
description:
- Whether the specified plan should exist or not.
type: str
choices: ['present', 'absent']
default: 'present'
name:
description: The name of the plan
required: True
type: str
retention:
description: The data retention policy
type: list
elements: dict
suboptions:
policy_name:
description: The name of the policy
type: str
required: True
choices:
- retainAll
- hourly
- daily
- weekly
- monthly
- quarterly
- yearly
- retainDeleted
duration:
description: The duration for the policy
type: int
required: True
quotas:
description: The items included in the plan and their respective quota
type: list
elements: dict
suboptions:
item_name:
description: The name of the plan item
type: str
required: True
choices:
- EV4
- EV8
- EV16
- EV32
- EV64
- EV128
- WA
- SA
- Share
- Connect
amount:
description: The quota's amount
type: int
required: True
'''
EXAMPLES = '''
- name: Portal Plan
ctera_portal_plan:
name: 'example'
retention:
- policy_name: retainAll
duration: 24
quotas:
- item_name: EV16
amount: 100
ctera_host: "{{ ctera_portal_hostname }}"
ctera_user: "{{ ctera_portal_user }}"
ctera_password: "{{ ctera_portal_password }}"
'''
RETURN = '''
name:
description: Name of the Plan
returned: when state is present
type: str
sample: example
'''
import ansible_collections.ctera.ctera.plugins.module_utils.ctera_common as ctera_common
from ansible_collections.ctera.ctera.plugins.module_utils.ctera_portal_base import CteraPortalBase
try:
from cterasdk import CTERAException
except ImportError: # pragma: no cover
pass # caught by ctera_common
class CteraPortalPlan(CteraPortalBase):
_create_params = ['name', 'email', 'first_name', 'last_name', 'password', 'role', 'company', 'comment', 'password_change']
_retention_policy_names = ['retainAll', 'hourly', 'daily', 'weekly', 'monthly', 'quarterly', 'yearly', 'retainDeleted']
_quotas_item_names = ['EV4', 'EV8', 'EV16', 'EV32', 'EV64', 'EV128', 'WA', 'SA', 'Share', 'Connect']
def __init__(self):
super().__init__(
dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
name=dict(type='str', required=True),
retention=dict(
type='list',
elements='dict',
options=dict(
policy_name=dict(type='str', required=True, choices=CteraPortalPlan._retention_policy_names),
duration=dict(type='int', required=True)
)
),
quotas=dict(
type='list',
elements='dict',
options=dict(
item_name=dict(type='str', required=True, choices=CteraPortalPlan._quotas_item_names),
amount=dict(type='int', required=True)
)
)
)
)
@property
def _generic_failure_message(self): # pragma: no cover
return 'Plan management failed'
def _execute(self):
state = self.parameters.pop('state')
plan = self._get_plan()
if state == 'present':
self._ensure_present(plan)
else:
self._ensure_absent(plan)
def _get_plan(self):
plan = None
try:
plan = self._ctera_portal.plans.get(
self.parameters['name'],
include=[
'name',
'retentionPolicy',
'vGateways4',
'vGateways8',
'appliances',
'vGateways32',
'vGateways64',
'vGateways128',
'workstationAgents',
'serverAgents',
'cloudDrives',
'cloudDrivesLite'
]
)
except CTERAException:
pass
return self._to_plan_dict(plan) if plan else None
def _ensure_present(self, plan):
if plan:
modified_attributes = ctera_common.get_modified_attributes(plan, self.parameters)
if modified_attributes:
self._ctera_portal.plans.modify(self.parameters['name'], **self._translate_params_obj(modified_attributes))
self.ansible_module.ctera_return_value().changed().msg('Plan modified').put(name=self.parameters['name'])
else:
self.ansible_module.ctera_return_value().skipped().msg('Plan details did not change').put(name=self.parameters['name'])
else:
self._ctera_portal.plans.add(self.parameters['name'], **self._translate_params_obj(self.parameters))
self.ansible_module.ctera_return_value().changed().msg('Plan created').put(name=self.parameters['name'])
@staticmethod
def _translate_params_obj(parameters):
return dict(
retention={i['policy_name']: i['duration'] for i in parameters['retention']} if parameters.get('retention') else None,
quotas={i['item_name']: i['amount'] for i in parameters['quotas']} if parameters.get('quotas') else None
)
def _ensure_absent(self, plan):
if plan:
self._ctera_portal.plans.delete(self.parameters['name'])
self.ansible_module.ctera_return_value().changed().msg('Plan deleted').put(name=self.parameters['name'])
else:
self.ansible_module.ctera_return_value().skipped().msg('Plan already does not exist').put(name=self.parameters['name'])
@staticmethod
def _to_plan_dict(plan):
return dict(
name=plan.name,
retention=[
dict(policy_name=k, duration=v) for k, v in plan.retentionPolicy.__dict__.items() if not k.startswith("_")
],
quotas=[
dict(item_name='EV4', amount=plan.vGateways4.amount),
dict(item_name='EV8', amount=plan.vGateways8.amount),
dict(item_name='EV16', amount=plan.appliances.amount),
dict(item_name='EV64', amount=plan.vGateways64.amount),
dict(item_name='EV128', amount=plan.vGateways128.amount),
dict(item_name='WA', amount=plan.workstationAgents.amount),
dict(item_name='SA', amount=plan.serverAgents.amount),
dict(item_name='Share', amount=plan.cloudDrives.amount),
dict(item_name='Connect', amount=plan.cloudDrivesLite.amount),
]
)
def main(): # pragma: no cover
CteraPortalPlan().run()
if __name__ == '__main__': # pragma: no cover
main()
| 4,313 | 640 | 46 |
d207914ecf9eca2833a7d7d4678a7431ddca2809 | 734 | py | Python | sensorCalibration.py | cbates8/CompostMonitoringSystem | 765fbf60e3d684c83c8fda6d9f18b5de2b7e03ab | [
"MIT"
] | null | null | null | sensorCalibration.py | cbates8/CompostMonitoringSystem | 765fbf60e3d684c83c8fda6d9f18b5de2b7e03ab | [
"MIT"
] | null | null | null | sensorCalibration.py | cbates8/CompostMonitoringSystem | 765fbf60e3d684c83c8fda6d9f18b5de2b7e03ab | [
"MIT"
] | null | null | null | import adafruit_mcp3xxx.mcp3008 as MCP
from multiprocessing import Process
from moistureSensor import MoistureSensor
moisture_one = MoistureSensor(MCP.P0)
moisture_two = MoistureSensor(MCP.P1)
moisture_three = MoistureSensor(MCP.P3)
# Calibrates the sensors in parallel.
p1 = Process(target=moisture_one.calibrate())
p1.start()
p2 = Process(target=moisture_two.calibrate())
p2.start()
p3 = Process(target=moisture_three.calibrate())
p3.start()
p1.join()
p2.join()
p3.join()
with open("/home/pi/CompostMonitoringSystem/calibrationValues.csv", "w") as ofile:
ofile.write("Sensor, AirVal, WaterVal\n")
sensors = [moisture_one, moisture_two, moisture_three]
for s in sensors:
ofile.write(f"{s.pinNum},{s.airVal},{s.waterVal}\n") | 28.230769 | 82 | 0.77248 | import adafruit_mcp3xxx.mcp3008 as MCP
from multiprocessing import Process
from moistureSensor import MoistureSensor
moisture_one = MoistureSensor(MCP.P0)
moisture_two = MoistureSensor(MCP.P1)
moisture_three = MoistureSensor(MCP.P3)
# Calibrates the sensors in parallel.
p1 = Process(target=moisture_one.calibrate())
p1.start()
p2 = Process(target=moisture_two.calibrate())
p2.start()
p3 = Process(target=moisture_three.calibrate())
p3.start()
p1.join()
p2.join()
p3.join()
with open("/home/pi/CompostMonitoringSystem/calibrationValues.csv", "w") as ofile:
ofile.write("Sensor, AirVal, WaterVal\n")
sensors = [moisture_one, moisture_two, moisture_three]
for s in sensors:
ofile.write(f"{s.pinNum},{s.airVal},{s.waterVal}\n") | 0 | 0 | 0 |
cfb14c38b8c02167d0a208d3b4653323a9779125 | 1,741 | py | Python | app.py | MostafaBalata/smart-monitor-system | 079b4e1cf2e2639a710e6854b628cb7ee820ab5b | [
"MIT"
] | 4 | 2017-03-01T21:20:37.000Z | 2018-07-17T08:29:08.000Z | app.py | MostafaBalata/smart-monitor-system | 079b4e1cf2e2639a710e6854b628cb7ee820ab5b | [
"MIT"
] | 1 | 2017-03-03T12:45:25.000Z | 2017-03-03T12:45:25.000Z | app.py | MostafaBalata/smart-monitor-system | 079b4e1cf2e2639a710e6854b628cb7ee820ab5b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from flask import Flask, render_template, Response
# emulated camera
#from camera import Camera
from camera_pi import Camera
# Raspberry Pi camera module (requires picamera package)
# from camera_pi import Camera
from motion_tracker import get_frame
app = Flask(__name__)
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
def gen(camera):
"""Video streaming generator function."""
#motion_track()
while True:
frame = camera.get_frame()
# print type(frame),frame
# yield (frame)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
from StringIO import StringIO
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(Camera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/tracking')
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True, threaded=True)
| 26.784615 | 81 | 0.63297 | #!/usr/bin/env python
from flask import Flask, render_template, Response
# emulated camera
#from camera import Camera
from camera_pi import Camera
# Raspberry Pi camera module (requires picamera package)
# from camera_pi import Camera
from motion_tracker import get_frame
app = Flask(__name__)
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
def gen(camera):
"""Video streaming generator function."""
#motion_track()
while True:
frame = camera.get_frame()
# print type(frame),frame
# yield (frame)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
from StringIO import StringIO
def gen2():
# from PIL import Image
# import io
import cv2
#im = Image.fromarray(A)
while True:
frame = get_frame()
frame = cv2.imencode('.jpg', frame)[1].tostring()
# print type(frame),frame
# frame = Image.fromarray(frame)
# frame = frame.tostring()
# print(type(frame), frame)
# yield (frame)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(Camera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/tracking')
def tracker():
# while True:
# frame = get_frame()
# yield (b'--frame\r\n'
# b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
return Response(gen2(), mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True, threaded=True)
| 636 | 0 | 44 |
861af63dd167216ae5c049d5687c7fa9d4f851f5 | 279 | py | Python | jsub/config/error.py | xianghuzhao/jsub | fda16e9a0983410d33e454d9f1c4a94134e49d41 | [
"MIT"
] | 2 | 2017-05-26T07:17:34.000Z | 2019-04-08T05:53:35.000Z | jsub/config/error.py | xianghuzhao/jsub | fda16e9a0983410d33e454d9f1c4a94134e49d41 | [
"MIT"
] | null | null | null | jsub/config/error.py | xianghuzhao/jsub | fda16e9a0983410d33e454d9f1c4a94134e49d41 | [
"MIT"
] | 1 | 2019-04-08T06:52:46.000Z | 2019-04-08T06:52:46.000Z | from jsub.error import JsubError
| 16.411765 | 44 | 0.784946 | from jsub.error import JsubError
class ConfigError(JsubError):
pass
class SyntaxError(ConfigError):
pass
class UnknownConfigFormatError(ConfigError):
pass
class ConfigFileNotFoundError(ConfigError):
pass
class UnknownUpdateMethodError(ConfigError):
pass
| 0 | 131 | 115 |
855310f6abf608e0738d7e82ba8e8976e6186a5e | 2,487 | py | Python | src/adventofcode/year_2021/day_05_2021.py | marcelblijleven/adventofcode | 94def5ec340fbb58eaa56e82b54b9495903eb9d9 | [
"MIT"
] | 13 | 2021-12-01T03:50:04.000Z | 2022-03-23T14:16:29.000Z | src/adventofcode/year_2021/day_05_2021.py | marcelblijleven/adventofcode | 94def5ec340fbb58eaa56e82b54b9495903eb9d9 | [
"MIT"
] | 7 | 2021-11-30T19:52:21.000Z | 2021-12-20T07:46:00.000Z | src/adventofcode/year_2021/day_05_2021.py | marcelblijleven/adventofcode | 94def5ec340fbb58eaa56e82b54b9495903eb9d9 | [
"MIT"
] | 6 | 2021-11-30T20:15:35.000Z | 2021-12-20T17:14:19.000Z | import re
from collections import defaultdict
from typing import List, DefaultDict, Generator
from adventofcode.util.exceptions import SolutionNotFoundException
from adventofcode.util.helpers import solution_timer
from adventofcode.util.input_helpers import get_input_for_day
Coord = tuple[int, int]
GridType = DefaultDict[Coord, int]
LinePositions = tuple[Coord, Coord]
Line = List[Coord]
line_pattern = re.compile(r'(\d+)')
@solution_timer(2021, 5, 1)
@solution_timer(2021, 5, 2)
if __name__ == '__main__':
data = get_input_for_day(2021, 5)
part_one(data)
part_two(data)
| 25.377551 | 92 | 0.685967 | import re
from collections import defaultdict
from typing import List, DefaultDict, Generator
from adventofcode.util.exceptions import SolutionNotFoundException
from adventofcode.util.helpers import solution_timer
from adventofcode.util.input_helpers import get_input_for_day
Coord = tuple[int, int]
GridType = DefaultDict[Coord, int]
LinePositions = tuple[Coord, Coord]
Line = List[Coord]
line_pattern = re.compile(r'(\d+)')
def is_horizontal(line: LinePositions) -> bool:
start, end = line
return start[0] == end[0]
def is_vertical(line: LinePositions) -> bool:
start, end = line
return start[1] == end[1]
def is_diagonal(line: LinePositions) -> bool:
return not is_vertical(line) and not is_horizontal(line)
def get_line(positions: LinePositions) -> Line:
start, end = positions
x1, y1 = start
x2, y2 = end
dx = bool(x2 > x1) - bool(x2 < x1)
dy = bool(y2 > y1) - bool(y2 < y1)
return [(x1 + n * dx, y1 + n * dy) for n in range(max(abs(x2 - x1), abs(y2 - y1)) + 1)]
def get_lines(positions_list: List[LinePositions]) -> Generator[Line, None, None]:
for positions in positions_list:
yield get_line(positions)
def count_intersections(parsed_input: List[LinePositions]) -> int:
seen: DefaultDict[Coord, int] = defaultdict(int)
for line in get_lines(parsed_input):
for coord in line:
seen[coord] += 1
return len([value for value in seen.values() if value > 1])
def parse_input(input_data: List[str], filter_diagonal: bool = True) -> List[LinePositions]:
lines: List[LinePositions] = []
for line in input_data:
x1, y1, x2, y2 = map(int, line_pattern.findall(line))
parsed_line = ((x1, y1), (x2, y2))
if filter_diagonal and is_diagonal(parsed_line):
continue
lines.append(parsed_line)
return lines
@solution_timer(2021, 5, 1)
def part_one(input_data: List[str]):
parsed_input = parse_input(input_data)
answer = count_intersections(parsed_input)
if not answer:
raise SolutionNotFoundException(2021, 5, 1)
return answer
@solution_timer(2021, 5, 2)
def part_two(input_data: List[str]):
parsed_input = parse_input(input_data, filter_diagonal=False)
answer = count_intersections(parsed_input)
if not answer:
raise SolutionNotFoundException(2021, 5, 2)
return answer
if __name__ == '__main__':
data = get_input_for_day(2021, 5)
part_one(data)
part_two(data)
| 1,682 | 0 | 205 |
6a75bf2fba99e0f78b96884a2eac1f74834ce13c | 1,162 | py | Python | inferfuzzy/rule.py | leynier/inferfuzzy | bc9dd3a3d0d59f323c5c573423ff7d20ba771eeb | [
"MIT"
] | 3 | 2020-11-23T21:05:31.000Z | 2020-11-25T17:33:27.000Z | inferfuzzy/rule.py | leynier/fuzzpy | bc9dd3a3d0d59f323c5c573423ff7d20ba771eeb | [
"MIT"
] | null | null | null | inferfuzzy/rule.py | leynier/fuzzpy | bc9dd3a3d0d59f323c5c573423ff7d20ba771eeb | [
"MIT"
] | null | null | null | from typing import Any, List
from .base_set import BaseSet
from .base_var import VarSet
from .predicates import Predicate
| 27.023256 | 74 | 0.613597 | from typing import Any, List
from .base_set import BaseSet
from .base_var import VarSet
from .predicates import Predicate
class BaseRule:
def __init__(self, antecedent: Predicate):
self.antecedent = antecedent
def __call__(self, values: dict):
raise NotImplementedError()
def __str__(self) -> str:
return f"Antecedent: {self.antecedent}"
class Rule(BaseRule):
def __init__(self, antecedent: Predicate, consequences: List[VarSet]):
super(Rule, self).__init__(antecedent)
self.consequences = consequences
def aggregate(self, set: BaseSet, value: Any) -> BaseSet:
raise NotImplementedError()
def __call__(self, values: dict):
value = self.antecedent(values)
return {
consequence.var.name: self.aggregate(
consequence.set,
value,
)
for consequence in self.consequences
}
def __str__(self) -> str:
result = f"Antecedent: {self.antecedent}\n "
result += "Consequences:\n" + "\n".join(
[f" {x}" for x in self.consequences],
)
return result
| 810 | -6 | 233 |
edf651a400730b62f2a934c8bd13d13b520aa848 | 741 | py | Python | wicked_historian/compat/mysql.py | innovationinit/django-wicked-historian | bef0011639791e2275c6bf2272b57542174b4cf0 | [
"BSD-2-Clause"
] | null | null | null | wicked_historian/compat/mysql.py | innovationinit/django-wicked-historian | bef0011639791e2275c6bf2272b57542174b4cf0 | [
"BSD-2-Clause"
] | null | null | null | wicked_historian/compat/mysql.py | innovationinit/django-wicked-historian | bef0011639791e2275c6bf2272b57542174b4cf0 | [
"BSD-2-Clause"
] | 1 | 2022-03-15T07:29:58.000Z | 2022-03-15T07:29:58.000Z | import json
from django_mysql.models.fields import JSONField as MySQLJSONField
from wicked_historian.encoder import JSONEncoder
__all__ = (
'JSONField',
)
| 26.464286 | 71 | 0.68691 | import json
from django_mysql.models.fields import JSONField as MySQLJSONField
from wicked_historian.encoder import JSONEncoder
__all__ = (
'JSONField',
)
class JSONField(MySQLJSONField):
def __init__(self, *args, **kwargs):
self.encoder = kwargs.pop('encoder', JSONEncoder)
super().__init__(*args, **kwargs)
def get_prep_value(self, value):
if value is not None and not isinstance(value, str):
return json.dumps(value, cls=self.encoder, allow_nan=False)
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared and value is not None:
return json.dumps(value, cls=self.encoder, allow_nan=False)
return value
| 463 | 11 | 104 |
5827ec9f745130abc2c1eddb42effc2086db230d | 6,023 | py | Python | others/old/Dataset.py | YuRui8879/CPSC2021_python | bfa4c565ec3113528e73b064041082863cd228b4 | [
"MIT"
] | 4 | 2021-12-20T12:52:02.000Z | 2021-12-29T09:34:42.000Z | others/old/Dataset.py | YuRui8879/CPSC2021_python | bfa4c565ec3113528e73b064041082863cd228b4 | [
"MIT"
] | null | null | null | others/old/Dataset.py | YuRui8879/CPSC2021_python | bfa4c565ec3113528e73b064041082863cd228b4 | [
"MIT"
] | 1 | 2021-11-20T12:20:55.000Z | 2021-11-20T12:20:55.000Z | import numpy as np
import random
if __name__ == '__main__':
X = np.empty((10,5))
Y = np.linspace(0,9,10)
d = AssembleDataset(X,Y,5,seed = 0)
a,b = d.get_fold_data()
print(a,b)
a,b = d.get_res_data()
print(a,b)
| 32.556757 | 65 | 0.470032 | import numpy as np
import random
class Dataset:
def __init__(self,X,Y,fold):
self.idx = 0
self.fold = fold
self.X = np.array(X)
self.Y = np.array(Y)
if self.X.shape[0] != self.Y.shape[0]:
raise Exception('数据与标签维度不一致')
self.length = self.Y.shape[0]
self.step_len = self.length//self.fold
def shuffle(self,seed = 0):
random.seed(seed)
index = list(range(self.length))
random.shuffle(index)
index = np.array(index)
self.X = self.X[index,:]
self.Y = self.Y[index]
def get_fold_data(self,n = -1):
if n >= self.fold:
print('游标超出范围,请设置在(0,{})范围内'.format(self.fold))
res_X = []
res_Y = []
else:
if n != -1:
if n == self.fold - 1:
lidx = n * self.step_len
res_X = self.X[lidx:,:]
res_Y = self.Y[lidx:]
else:
lidx = n * self.step_len
ridx = (n + 1) * self.step_len
res_X = self.X[lidx:ridx,:]
res_Y = self.Y[lidx:ridx]
else:
if self.idx == self.fold - 1:
lidx = self.idx * self.step_len
res_X = self.X[lidx:,:]
res_Y = self.Y[lidx:]
else:
lidx = self.idx * self.step_len
ridx = (self.idx + 1) * self.step_len
res_X = self.X[lidx:ridx,:]
res_Y = self.Y[lidx:ridx]
return res_X,res_Y
def get_res_data(self):
if self.idx == self.fold - 1:
lidx = self.idx * self.step_len
res_X = self.X[:lidx,:]
res_Y = self.Y[:lidx]
elif self.idx == 0:
ridx = (self.idx + 1) * self.step_len
res_X = self.X[ridx:,:]
res_Y = self.Y[ridx:]
else:
lidx = self.idx * self.step_len
ridx = (self.idx + 1) * self.step_len
res_X1 = self.X[:lidx,:]
res_X2 = self.X[ridx:,:]
res_Y1 = self.Y[:lidx]
res_Y2 = self.Y[ridx:]
res_X = np.vstack((res_X1,res_X2))
res_Y = np.hstack((res_Y1,res_Y2))
return res_X,res_Y
def step(self,n = 1):
self.idx += n
self.idx = self.idx % self.fold
print('向前搜索{}步,现在输出第{}折数据'.format(n,self.idx))
def reset(self,n = 0):
if n > self.fold:
print('游标超出范围,请设置在(0,{})范围内'.format(self.fold))
else:
self.idx = n
print('游标设置为{},现在输出第{}折数据'.format(self.idx,self.idx))
class AssembleDataset:
def __init__(self,X,Y,fold,train_rate = 0.8,seed = -1):
X = np.array(X)
Y = np.array(Y)
if X.shape[0] != Y.shape[0]:
raise Exception('数据与标签维度不一致')
train_size = int(len(Y) * train_rate)
if seed != -1:
random.seed(seed)
index = list(range(len(Y)))
random.shuffle(index)
index = np.array(index)
X = X[index,:]
Y = Y[index]
self.train_X = X[:train_size,:]
self.train_Y = Y[:train_size]
self.test_X = X[train_size:,:]
self.test_Y = Y[train_size:]
self.idx = 0
self.fold = fold
self.length = self.train_Y.shape[0]
self.step_len = self.length//self.fold
def get_test_set(self):
return self.test_X,self.test_Y
def get_train_set(self):
return self.train_X,self.train_Y
def get_fold_data(self,n = -1):
if n >= self.fold:
print('游标超出范围,请设置在(0,{})范围内'.format(self.fold))
res_X = []
res_Y = []
else:
if n != -1:
if n == self.fold - 1:
lidx = n * self.step_len
res_X = self.train_X[lidx:,:]
res_Y = self.train_Y[lidx:]
else:
lidx = n * self.step_len
ridx = (n + 1) * self.step_len
res_X = self.train_X[lidx:ridx,:]
res_Y = self.train_Y[lidx:ridx]
else:
if self.idx == self.fold - 1:
lidx = self.idx * self.step_len
res_X = self.train_X[lidx:,:]
res_Y = self.train_Y[lidx:]
else:
lidx = self.idx * self.step_len
ridx = (self.idx + 1) * self.step_len
res_X = self.train_X[lidx:ridx,:]
res_Y = self.train_Y[lidx:ridx]
return res_X,res_Y
def get_res_data(self):
if self.idx == self.fold - 1:
lidx = self.idx * self.step_len
res_X = self.train_X[:lidx,:]
res_Y = self.train_Y[:lidx]
elif self.idx == 0:
ridx = (self.idx + 1) * self.step_len
res_X = self.train_X[ridx:,:]
res_Y = self.train_Y[ridx:]
else:
lidx = self.idx * self.step_len
ridx = (self.idx + 1) * self.step_len
res_X1 = self.train_X[:lidx,:]
res_X2 = self.train_X[ridx:,:]
res_Y1 = self.train_Y[:lidx]
res_Y2 = self.train_Y[ridx:]
res_X = np.vstack((res_X1,res_X2))
res_Y = np.hstack((res_Y1,res_Y2))
return res_X,res_Y
def step(self,n = 1):
self.idx += n
self.idx = self.idx % self.fold
print('向前搜索{}步,现在输出第{}折数据'.format(n,self.idx))
def reset(self,n = 0):
if n > self.fold:
print('游标超出范围,请设置在(0,{})范围内'.format(self.fold))
else:
self.idx = n
print('游标设置为{},现在输出第{}折数据'.format(self.idx,self.idx))
if __name__ == '__main__':
X = np.empty((10,5))
Y = np.linspace(0,9,10)
d = AssembleDataset(X,Y,5,seed = 0)
a,b = d.get_fold_data()
print(a,b)
a,b = d.get_res_data()
print(a,b)
| 5,648 | -6 | 405 |
d227c6b7d42bf9b54733a1338de7b87c5ae514d8 | 6,236 | py | Python | open_spiel/python/algorithms/action_value_vs_best_response.py | texasmichelle/open_spiel | d9a9b8f9f1f44143867217fc3f6ff2db71b174b0 | [
"Apache-2.0"
] | 2 | 2020-09-05T07:17:08.000Z | 2021-05-02T21:10:28.000Z | open_spiel/python/algorithms/action_value_vs_best_response.py | texasmichelle/open_spiel | d9a9b8f9f1f44143867217fc3f6ff2db71b174b0 | [
"Apache-2.0"
] | 4 | 2020-11-13T18:58:27.000Z | 2022-02-10T01:58:38.000Z | open_spiel/python/algorithms/action_value_vs_best_response.py | texasmichelle/open_spiel | d9a9b8f9f1f44143867217fc3f6ff2db71b174b0 | [
"Apache-2.0"
] | 1 | 2020-06-02T17:52:48.000Z | 2020-06-02T17:52:48.000Z | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compute the value of action given a policy vs a best responder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from open_spiel.python import policy
from open_spiel.python.algorithms import action_value
from open_spiel.python.algorithms import get_all_states
from open_spiel.python.algorithms import policy_utils
import pyspiel
def _transitions(state, policies):
"""Returns a list of (action, prob) pairs from the specified state."""
if state.is_chance_node():
return state.chance_outcomes()
else:
pl = state.current_player()
return list(policies[pl].action_probabilities(state).items())
_CalculatorReturn = collections.namedtuple(
"_CalculatorReturn",
[
# The exploitability of the opponent strategy, i.e. the value of the
# best-responder player BR.
"exploitability",
# An array of shape `[len(info_states), game.num_distinct_actions()]`
# giving the value of each action vs the best response.
# Will be zero for invalid actions.
"values_vs_br",
# The player's counterfactual reach probability of this infostate when
# playing against the BR, as a list of shape [num_info_states].
"counterfactual_reach_probs_vs_br",
# The reach probability of the current player at the infostates when
# playing against the BR, as list shape [num_info_states].
# This is the product of the current player probs along *one* trajectory
# leading to this info-state (this number should be the same along
# any trajectory leading to this info-state because of perfect recall).
"player_reach_probs_vs_br",
])
class Calculator(object):
"""Class to orchestrate the calculation."""
def __call__(self, player, player_policy, info_states):
"""Computes action values per state for the player.
Args:
player: The id of the player (0 <= player < game.num_players()). This
player will play `player_policy`, while the opponent will play a best
response.
player_policy: A `policy.Policy` object.
info_states: A list of info state strings.
Returns:
A `_CalculatorReturn` nametuple. See its docstring for the documentation.
"""
self.player = player
opponent = 1 - player
# If the policy is a TabularPolicy, we can directly copy the infostate
# strings & values from the class. This is significantly faster than having
# to create the infostate strings.
if isinstance(player_policy, policy.TabularPolicy):
tabular_policy = {
key: _tuples_from_policy(player_policy.policy_for_key(key))
for key in player_policy.state_lookup
}
# Otherwise, we have to calculate all the infostate strings everytime. This
# is ~2x slower.
else:
# We cache these as they are expensive to compute & do not change.
if self._all_states is None:
self._all_states = get_all_states.get_all_states(
self.game,
depth_limit=-1,
include_terminals=False,
include_chance_states=False)
self._state_to_information_state = {
state: self._all_states[state].information_state_string()
for state in self._all_states
}
tabular_policy = policy_utils.policy_to_dict(
player_policy, self.game, self._all_states,
self._state_to_information_state)
# When constructed, TabularBestResponse does a lot of work; we can save that
# work by caching it.
if self._best_responder[player] is None:
self._best_responder[player] = pyspiel.TabularBestResponse(
self.game, opponent, tabular_policy)
else:
self._best_responder[player].set_policy(tabular_policy)
# Computing the value at the root calculates best responses everywhere.
history = str(self.game.new_initial_state())
best_response_value = self._best_responder[player].value(history)
best_response_actions = self._best_responder[
player].get_best_response_actions()
# Compute action values
self._action_value_calculator.compute_all_states_action_values({
player:
player_policy,
opponent:
policy.tabular_policy_from_callable(
self.game, best_response_policy, [opponent]),
})
obj = self._action_value_calculator._get_tabular_statistics( # pylint: disable=protected-access
((player, s) for s in info_states))
# Return values
return _CalculatorReturn(
exploitability=best_response_value,
values_vs_br=obj.action_values,
counterfactual_reach_probs_vs_br=obj.counterfactual_reach_probs,
player_reach_probs_vs_br=obj.player_reach_probs)
| 38.732919 | 100 | 0.712155 | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compute the value of action given a policy vs a best responder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from open_spiel.python import policy
from open_spiel.python.algorithms import action_value
from open_spiel.python.algorithms import get_all_states
from open_spiel.python.algorithms import policy_utils
import pyspiel
def _transitions(state, policies):
"""Returns a list of (action, prob) pairs from the specified state."""
if state.is_chance_node():
return state.chance_outcomes()
else:
pl = state.current_player()
return list(policies[pl].action_probabilities(state).items())
def _tuples_from_policy(policy_vector):
return [
(action, probability) for action, probability in enumerate(policy_vector)
]
_CalculatorReturn = collections.namedtuple(
"_CalculatorReturn",
[
# The exploitability of the opponent strategy, i.e. the value of the
# best-responder player BR.
"exploitability",
# An array of shape `[len(info_states), game.num_distinct_actions()]`
# giving the value of each action vs the best response.
# Will be zero for invalid actions.
"values_vs_br",
# The player's counterfactual reach probability of this infostate when
# playing against the BR, as a list of shape [num_info_states].
"counterfactual_reach_probs_vs_br",
# The reach probability of the current player at the infostates when
# playing against the BR, as list shape [num_info_states].
# This is the product of the current player probs along *one* trajectory
# leading to this info-state (this number should be the same along
# any trajectory leading to this info-state because of perfect recall).
"player_reach_probs_vs_br",
])
class Calculator(object):
"""Class to orchestrate the calculation."""
def __init__(self, game):
if game.num_players() != 2:
raise ValueError("Only supports 2-player games.")
self.game = game
self._num_players = game.num_players()
self._num_actions = game.num_distinct_actions()
self._action_value_calculator = action_value.TreeWalkCalculator(game)
# best_responder[i] is a best response to the provided policy for player i.
# It is therefore a policy for player (1-i).
self._best_responder = {0: None, 1: None}
self._all_states = None
def __call__(self, player, player_policy, info_states):
"""Computes action values per state for the player.
Args:
player: The id of the player (0 <= player < game.num_players()). This
player will play `player_policy`, while the opponent will play a best
response.
player_policy: A `policy.Policy` object.
info_states: A list of info state strings.
Returns:
A `_CalculatorReturn` nametuple. See its docstring for the documentation.
"""
self.player = player
opponent = 1 - player
def best_response_policy(state):
infostate = state.information_state_string(opponent)
action = best_response_actions[infostate]
return [(action, 1.0)]
# If the policy is a TabularPolicy, we can directly copy the infostate
# strings & values from the class. This is significantly faster than having
# to create the infostate strings.
if isinstance(player_policy, policy.TabularPolicy):
tabular_policy = {
key: _tuples_from_policy(player_policy.policy_for_key(key))
for key in player_policy.state_lookup
}
# Otherwise, we have to calculate all the infostate strings everytime. This
# is ~2x slower.
else:
# We cache these as they are expensive to compute & do not change.
if self._all_states is None:
self._all_states = get_all_states.get_all_states(
self.game,
depth_limit=-1,
include_terminals=False,
include_chance_states=False)
self._state_to_information_state = {
state: self._all_states[state].information_state_string()
for state in self._all_states
}
tabular_policy = policy_utils.policy_to_dict(
player_policy, self.game, self._all_states,
self._state_to_information_state)
# When constructed, TabularBestResponse does a lot of work; we can save that
# work by caching it.
if self._best_responder[player] is None:
self._best_responder[player] = pyspiel.TabularBestResponse(
self.game, opponent, tabular_policy)
else:
self._best_responder[player].set_policy(tabular_policy)
# Computing the value at the root calculates best responses everywhere.
history = str(self.game.new_initial_state())
best_response_value = self._best_responder[player].value(history)
best_response_actions = self._best_responder[
player].get_best_response_actions()
# Compute action values
self._action_value_calculator.compute_all_states_action_values({
player:
player_policy,
opponent:
policy.tabular_policy_from_callable(
self.game, best_response_policy, [opponent]),
})
obj = self._action_value_calculator._get_tabular_statistics( # pylint: disable=protected-access
((player, s) for s in info_states))
# Return values
return _CalculatorReturn(
exploitability=best_response_value,
values_vs_br=obj.action_values,
counterfactual_reach_probs_vs_br=obj.counterfactual_reach_probs,
player_reach_probs_vs_br=obj.player_reach_probs)
| 746 | 0 | 75 |
0080f062d8e05c5c6a29ce1eb5063d5916a0df3c | 1,427 | py | Python | fline/losses/research/component.py | asromahin/fline | a34243878093b3b883607557eeaf968ef4b8acf6 | [
"MIT"
] | 5 | 2021-07-01T08:19:51.000Z | 2022-03-28T06:09:55.000Z | fline/losses/research/component.py | asromahin/fline | a34243878093b3b883607557eeaf968ef4b8acf6 | [
"MIT"
] | 1 | 2022-03-21T02:42:44.000Z | 2022-03-28T06:10:57.000Z | fline/losses/research/component.py | asromahin/fline | a34243878093b3b883607557eeaf968ef4b8acf6 | [
"MIT"
] | null | null | null | import torch
from fline.losses.segmentation.dice import BCEDiceLoss
| 32.431818 | 101 | 0.531885 | import torch
from fline.losses.segmentation.dice import BCEDiceLoss
class ConnectLoss(torch.nn.Module):
def __init__(self, device):
super(ConnectLoss, self).__init__()
self.loss = BCEDiceLoss(activation=None)
self.device = device
def forward(
self,
pred_instance_mask: torch.Tensor,
target_mask: torch.Tensor,
):
res_loss = self.loss(
pred_instance_mask[:, 0:1, :, :].to(torch.float32),
(target_mask == 0).to(torch.float32),
)
k_keys = list(range(pred_instance_mask.shape[1]))
k_keys.remove(0)
n_keys = list(torch.unique(target_mask))
n_keys.remove(0)
for n in n_keys:
target_cur_mask = (target_mask == n)
cur_loss = None
select_k = None
for k in k_keys:
pred_cur_mask = pred_instance_mask[:, k:k+1, :, :]
tloss = self.loss(pred_cur_mask.to(torch.float32), target_cur_mask.to(torch.float32))
if cur_loss is None:
cur_loss = tloss
select_k = k
else:
if tloss < cur_loss:
cur_loss = tloss
select_k = k
if cur_loss is not None:
k_keys.remove(select_k)
res_loss += cur_loss
return res_loss/len(n_keys)
| 1,266 | 14 | 77 |
60f7add424ef1c9769b7a21c44abf24d17df3775 | 217 | py | Python | 10-Tecnologie_Web-Introduzione_a_Python/esempio06.py | matteocerullo/TW6 | 0219418576960ff8163fbe90f866c15f1fb1ad27 | [
"CC0-1.0"
] | 5 | 2021-05-07T08:56:46.000Z | 2022-02-08T20:58:56.000Z | 10-Tecnologie_Web-Introduzione_a_Python/esempio06.py | matteocerullo/TW6 | 0219418576960ff8163fbe90f866c15f1fb1ad27 | [
"CC0-1.0"
] | 1 | 2021-05-14T09:11:51.000Z | 2021-05-14T09:11:51.000Z | 10-Tecnologie_Web-Introduzione_a_Python/esempio06.py | matteocerullo/TW6 | 0219418576960ff8163fbe90f866c15f1fb1ad27 | [
"CC0-1.0"
] | 64 | 2021-05-07T08:53:30.000Z | 2022-02-10T17:02:41.000Z | # Cycles - For
import random
n = 10000
m = 0
l = 1000
for i in range(n):
count = 0
x = -1
while x != l:
x = random.randint(1, l)
count=count+1
m=m+count
print("Done! average:", m/n) | 12.764706 | 32 | 0.520737 | # Cycles - For
import random
n = 10000
m = 0
l = 1000
for i in range(n):
count = 0
x = -1
while x != l:
x = random.randint(1, l)
count=count+1
m=m+count
print("Done! average:", m/n) | 0 | 0 | 0 |
35b7aa855385ade468b728b5ddd2f4f8a05dd422 | 1,961 | py | Python | autoremovetorrents/torrent.py | drawwon/autoremove-torrents | 730fe989a6ffd53213b15e4f39b1c95f256e0a52 | [
"MIT"
] | null | null | null | autoremovetorrents/torrent.py | drawwon/autoremove-torrents | 730fe989a6ffd53213b15e4f39b1c95f256e0a52 | [
"MIT"
] | null | null | null | autoremovetorrents/torrent.py | drawwon/autoremove-torrents | 730fe989a6ffd53213b15e4f39b1c95f256e0a52 | [
"MIT"
] | null | null | null | #-*- coding:utf-8 -*-
import sys
import time
from .torrentstatus import TorrentStatus | 31.126984 | 109 | 0.54819 | #-*- coding:utf-8 -*-
import sys
import time
from .torrentstatus import TorrentStatus
class Torrent(object):
def __init__(self, hash_value, name, category, tracker, status, stalled, size, ratio,
uploaded, create_time, seeding_time):
# Save Properties
self.hash = hash_value
self.name = name
self.category = category
self.tracker = tracker
self.status = status
self.stalled = stalled
self.size = size
self.ratio = ratio
self.uploaded = uploaded
self.create_time = create_time
self.seeding_time = seeding_time
# Format torrent info
def __str__(self):
return "%s\nSize:%s\tRatio:%.3f\tTotal Uploaded:%s\tSeeding Time:%s\tCategory:%s\nCreate Time:%s" % \
(self.name,
self._convert_bytes(self.size),
self.ratio,
self._convert_bytes(self.uploaded),
self._convert_seconds(self.seeding_time),
self.category,
self._convert_timestamp(self.create_time)
)
# Convert Seconds
@staticmethod
def _convert_seconds(sec):
if sec == -1:
return '(Not Provided)'
else:
m, s = divmod(sec, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
return ('%dd %02d:%02d:%02d' % (d, h, m, s))
# Convert Bytes
@staticmethod
def _convert_bytes(byte):
units = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB'
'YiB', 'BiB', 'NiB', 'DiB', 'CiB']
for x in units:
if divmod(byte, 1024)[0] == 0:
break
else:
byte /= 1024
return ('%.2lf%s' % (byte, x))
# Convert Timestamp
@staticmethod
def _convert_timestamp(timestamp):
return '(Not Provided)' if timestamp == sys.maxsize \
else time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(timestamp)) | 1,562 | 289 | 23 |
90459e756ccbe912bed5ca83e56c654291f93de6 | 4,873 | py | Python | Lang/Python/py_base/data_structure/graph/graph.py | Orig5826/Basics | 582e74c83a2b654640fe7c47a1a385a8913cc466 | [
"MIT"
] | 5 | 2018-03-09T13:51:11.000Z | 2021-12-17T02:05:59.000Z | Lang/Python/py_base/data_structure/graph/graph.py | Orig5826/Basics | 582e74c83a2b654640fe7c47a1a385a8913cc466 | [
"MIT"
] | null | null | null | Lang/Python/py_base/data_structure/graph/graph.py | Orig5826/Basics | 582e74c83a2b654640fe7c47a1a385a8913cc466 | [
"MIT"
] | null | null | null |
from pydotplus import Dot, Node, Edge
import os
# 该图配置
graph = {'A': ['B', 'C', 'F'],
'B': ['C', 'D'],
'C': ['D'],
'D': ['C'],
'E': ['F', 'D'],
'F': ['C']
}
def find_path(graph, start, end, path=[]):
"""
在图graph中找路径:
从顶点start到顶点end
走过的路径为path
"""
path = path + [start]
# 3.0 若当找到路径尾部,则返回该路径
if start == end:
return path
# 1.0 判断当前顶点是否在图内
if start not in graph.keys():
return None
for node in graph[start]:
if node not in path:
# 2.0 以当前顶点为起点,继续找路径
newpath = find_path(graph, node, end, path)
# 4.0 返回该路径
if newpath:
return newpath
# 这个没有什么用吗 ?
# return path
if __name__ == '__main__':
result = find_path(graph, 'A', 'D')
print("1. 路径查找结果:", result)
print('---------------------------------')
result = find_all_paths(graph, 'A', 'D')
print("2. 全路径查找结果:", result)
print("路径个数:", len(result))
i = 1
for path in result:
print('路径{0:2d}为:{1}'.format(i, path))
i += 1
print('---------------------------------')
result = find_short_path(graph, 'A', 'D')
print("3. 查找最短路径:", result)
print('---------------------------------')
# 生成图表
dotgraph(graph)
# 广度优先遍历
result = breadth_first_search(graph, 'A')
print(result)
# 深度优先遍历
result = depth_first_search(graph, 'A')
print(result)
| 26.922652 | 70 | 0.533963 |
from pydotplus import Dot, Node, Edge
import os
# 该图配置
graph = {'A': ['B', 'C', 'F'],
'B': ['C', 'D'],
'C': ['D'],
'D': ['C'],
'E': ['F', 'D'],
'F': ['C']
}
def dotgraph(graph):
__graph = Dot(rankdir='TB', fontname="Fangsong",
fontcolor='blue', label="有向图的示例")
__graph.set_type('digraph')
__graph.set_name('digraph_demo')
__graph.set_node_defaults(
fontname="Fangsong", style='filled', fillcolor='yellow')
__graph.set_edge_defaults(fontname="Fangsong", color='black')
for key, value in graph.items():
# 若节点没有特殊label或者其他属性需求
# 可以直接以节点名称显示
# 直接标记方向,不用手动添加
# node = Node(key)
# __graph.add_node(node)
for v in value:
edge = Edge(key, v)
__graph.add_edge(edge)
ret = __graph.write_raw("demo.dot")
if ret is not True:
print('生成demo.dot失败')
ret = __graph.write_svg("demo.svg")
if ret is not True:
print('生成graph.svg失败')
# ret = __graph.write_png("demo.png")
# if ret is not True:
# print('生成graph.png失败')
return __graph
def find_path(graph, start, end, path=[]):
"""
在图graph中找路径:
从顶点start到顶点end
走过的路径为path
"""
path = path + [start]
# 3.0 若当找到路径尾部,则返回该路径
if start == end:
return path
# 1.0 判断当前顶点是否在图内
if start not in graph.keys():
return None
for node in graph[start]:
if node not in path:
# 2.0 以当前顶点为起点,继续找路径
newpath = find_path(graph, node, end, path)
# 4.0 返回该路径
if newpath:
return newpath
# 这个没有什么用吗 ?
# return path
def find_all_paths(graph, start, end, path=[], paths=[]):
path = path + [start]
if start == end:
paths.append(path)
return path
if start not in graph.keys():
return None
for node in graph[start]:
if node not in path:
newpaths = find_all_paths(graph, node, end, path)
if paths == []:
return None
return paths
def find_short_path(graph, start, end, path=[]):
path = path + [start]
if start == end:
return path
if start not in graph.keys():
return None
shortpath = None
for node in graph[start]:
if node not in path:
newpath = find_path(graph, node, end, path)
if newpath:
if not shortpath or len(newpath) < len(shortpath):
shortpath = newpath
return shortpath
def breadth_first_search(graph, start):
prenodes = [start] # 前驱节点
travel = [start] # 已遍历过的顶点
graph_sets = set(graph.keys())
travel_sets = set(travel)
while travel_sets < graph_sets:
# 当前驱节点未空时退出
while prenodes:
nextnodes = [] # 当前顶点的邻接点
for prenode in prenodes:
for curnode in graph[prenode]: # 遍历当前层的节点
if curnode not in travel: # 判断当前层节点是否被访问国
travel.append(curnode) # 若没有被访问过,则入队
nextnodes.append(curnode) # 将当前节点追加如新的前驱节点队列
# 当前层的节点都遍历完毕,则开始下一层的遍历
prenodes = nextnodes
travel_sets = set(travel)
prenodes = list(graph_sets - travel_sets)
if prenodes != []:
prenodes.sort()
travel.append(prenodes[0])
return travel
def depth_first_search(graph, start):
travel = []
stack = [start]
graph_sets = set(graph.keys())
travel_sets = set(travel)
while travel_sets < graph_sets:
# 堆栈空的时候退出
while stack:
curnode = stack.pop() # 栈顶弹出
if curnode not in travel: # 判断当前节点是否已经被访问过
travel.append(curnode) # 若没访问过,则入队
for nextnode in graph[curnode]: # 遍历当前节点林邻接点
if nextnode not in travel: # 没有被访问过的顶点全部入栈
stack.append(nextnode)
travel_sets = set(travel)
leftnode = list(graph_sets - travel_sets)
if leftnode != []:
leftnode.sort()
stack.append(leftnode[0])
return travel
if __name__ == '__main__':
result = find_path(graph, 'A', 'D')
print("1. 路径查找结果:", result)
print('---------------------------------')
result = find_all_paths(graph, 'A', 'D')
print("2. 全路径查找结果:", result)
print("路径个数:", len(result))
i = 1
for path in result:
print('路径{0:2d}为:{1}'.format(i, path))
i += 1
print('---------------------------------')
result = find_short_path(graph, 'A', 'D')
print("3. 查找最短路径:", result)
print('---------------------------------')
# 生成图表
dotgraph(graph)
# 广度优先遍历
result = breadth_first_search(graph, 'A')
print(result)
# 深度优先遍历
result = depth_first_search(graph, 'A')
print(result)
| 3,697 | 0 | 115 |
8790f34d0d7e3a06ae4b661ea64a167a9284b21f | 521 | py | Python | app/config_template.py | larrylx/email-bot | 0689ddb3a63dc0faa7d97a2aa2b72d9a428157da | [
"MIT"
] | null | null | null | app/config_template.py | larrylx/email-bot | 0689ddb3a63dc0faa7d97a2aa2b72d9a428157da | [
"MIT"
] | null | null | null | app/config_template.py | larrylx/email-bot | 0689ddb3a63dc0faa7d97a2aa2b72d9a428157da | [
"MIT"
] | null | null | null | # Auth
ALLOW_HOST = []
# Bot Address
SEND_AS = ""
# Google
GOOGLE_WORKSPACE_USER = ""
GOOGLE_WORKSPACE_SERVICE_ACCOUNT_CREDENTIALS = '''{
"type": "service_account",
"project_id": "",
"private_key_id": "",
"private_key": "",
"client_email": "",
"client_id": "",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": ""
}'''
| 24.809524 | 80 | 0.646833 | # Auth
ALLOW_HOST = []
# Bot Address
SEND_AS = ""
# Google
GOOGLE_WORKSPACE_USER = ""
GOOGLE_WORKSPACE_SERVICE_ACCOUNT_CREDENTIALS = '''{
"type": "service_account",
"project_id": "",
"private_key_id": "",
"private_key": "",
"client_email": "",
"client_id": "",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": ""
}'''
| 0 | 0 | 0 |
604d2c58889a799deb5a51136f866fdb18afcd8e | 756 | py | Python | pylearn2/datasets/tests/test_adult.py | ikervazquezlopez/Pylearn2 | 2971e8f64374ffde572d4cf967aad5342beaf5e0 | [
"BSD-3-Clause"
] | 2,045 | 2015-01-01T14:07:52.000Z | 2022-03-08T08:56:41.000Z | pylearn2/datasets/tests/test_adult.py | ikervazquezlopez/Pylearn2 | 2971e8f64374ffde572d4cf967aad5342beaf5e0 | [
"BSD-3-Clause"
] | 305 | 2015-01-02T13:18:24.000Z | 2021-08-20T18:03:28.000Z | pylearn2/datasets/tests/test_adult.py | ikervazquezlopez/Pylearn2 | 2971e8f64374ffde572d4cf967aad5342beaf5e0 | [
"BSD-3-Clause"
] | 976 | 2015-01-01T17:08:51.000Z | 2022-03-25T19:53:17.000Z | """
Test code for adult.py
=======
Testing class that simply checks to see if the adult dataset
is loadable
"""
import numpy
from pylearn2.datasets.adult import adult
from pylearn2.testing.skip import skip_if_no_data
def test_adult():
"""
Tests if it will work correctly for train and test set.
"""
skip_if_no_data()
adult_train = adult(which_set='train')
assert (adult_train.X >= 0.).all()
assert adult_train.y.dtype == bool
assert adult_train.X.shape == (30162, 104)
assert adult_train.y.shape == (30162, 1)
adult_test = adult(which_set='test')
assert (adult_test.X >= 0.).all()
assert adult_test.y.dtype == bool
assert adult_test.X.shape == (15060, 103)
assert adult_test.y.shape == (15060, 1)
| 27 | 60 | 0.678571 | """
Test code for adult.py
=======
Testing class that simply checks to see if the adult dataset
is loadable
"""
import numpy
from pylearn2.datasets.adult import adult
from pylearn2.testing.skip import skip_if_no_data
def test_adult():
"""
Tests if it will work correctly for train and test set.
"""
skip_if_no_data()
adult_train = adult(which_set='train')
assert (adult_train.X >= 0.).all()
assert adult_train.y.dtype == bool
assert adult_train.X.shape == (30162, 104)
assert adult_train.y.shape == (30162, 1)
adult_test = adult(which_set='test')
assert (adult_test.X >= 0.).all()
assert adult_test.y.dtype == bool
assert adult_test.X.shape == (15060, 103)
assert adult_test.y.shape == (15060, 1)
| 0 | 0 | 0 |
101e781d3dec1f78602616edae26f232874b490e | 1,789 | py | Python | multichoice/forms.py | Palombredun/django_quiz | 1565d251d54dfb54fdee83096b560876833275a2 | [
"Unlicense",
"MIT-0",
"MIT"
] | null | null | null | multichoice/forms.py | Palombredun/django_quiz | 1565d251d54dfb54fdee83096b560876833275a2 | [
"Unlicense",
"MIT-0",
"MIT"
] | null | null | null | multichoice/forms.py | Palombredun/django_quiz | 1565d251d54dfb54fdee83096b560876833275a2 | [
"Unlicense",
"MIT-0",
"MIT"
] | null | null | null | from django import forms
from multichoice.models import MCQuestion
from quiz.forms import QuestionForm
class CreationMultiChoiceForm(QuestionForm):
"""
Form dedicated to the creation of a MultiChoice Question.
It inherits from QuestionForm and adds the fields
answerN and and answerN_correct.
"""
answer1 = forms.CharField(max_length=1000, label="Réponse 1")
answer1_correct = forms.BooleanField(required=False, label="Correcte")
answer2 = forms.CharField(max_length=1000, label="Réponse 2")
answer2_correct = forms.BooleanField(required=False, label="Correcte")
answer3 = forms.CharField(max_length=1000, label="Réponse 3")
answer3_correct = forms.BooleanField(required=False, label="Correcte")
class MultiChoiceForm(forms.Form):
"""
Form used for the taking of a quiz.
It is used for getting the student's answer to a multichoice question.
This answer will be compared to the one decided by the creator of
the quiz in order to decided if it is right or wrong.
"""
CHOICES = ((None, ""), (True, "Vrai"), (False, "Faux"))
answer1 = forms.ChoiceField(choices=CHOICES, widget=forms.Select(), required=True)
answer2 = forms.ChoiceField(choices=CHOICES, widget=forms.Select(), required=True)
answer3 = forms.ChoiceField(choices=CHOICES, widget=forms.Select(), required=True)
qid = forms.IntegerField(widget=forms.HiddenInput()) | 35.78 | 86 | 0.65735 | from django import forms
from multichoice.models import MCQuestion
from quiz.forms import QuestionForm
class CreationMultiChoiceForm(QuestionForm):
"""
Form dedicated to the creation of a MultiChoice Question.
It inherits from QuestionForm and adds the fields
answerN and and answerN_correct.
"""
answer1 = forms.CharField(max_length=1000, label="Réponse 1")
answer1_correct = forms.BooleanField(required=False, label="Correcte")
answer2 = forms.CharField(max_length=1000, label="Réponse 2")
answer2_correct = forms.BooleanField(required=False, label="Correcte")
answer3 = forms.CharField(max_length=1000, label="Réponse 3")
answer3_correct = forms.BooleanField(required=False, label="Correcte")
class Meta:
model = MCQuestion
fields = (
"content",
"difficulty",
"order",
"theme1",
"theme2",
"theme3",
"answer1",
"answer1_correct",
"answer2",
"answer2_correct",
"answer3",
"answer3_correct",
)
class MultiChoiceForm(forms.Form):
"""
Form used for the taking of a quiz.
It is used for getting the student's answer to a multichoice question.
This answer will be compared to the one decided by the creator of
the quiz in order to decided if it is right or wrong.
"""
CHOICES = ((None, ""), (True, "Vrai"), (False, "Faux"))
answer1 = forms.ChoiceField(choices=CHOICES, widget=forms.Select(), required=True)
answer2 = forms.ChoiceField(choices=CHOICES, widget=forms.Select(), required=True)
answer3 = forms.ChoiceField(choices=CHOICES, widget=forms.Select(), required=True)
qid = forms.IntegerField(widget=forms.HiddenInput()) | 0 | 344 | 27 |
58251a14d5f0a885f6169a7ae42e78076d256e6f | 401 | py | Python | tests/cases/fib_with_argparse.py | MiguelMarcelino/py2many | 9b040b2a157e265df9c053eaf3e5cd644d3e30d0 | [
"MIT"
] | 2 | 2022-02-02T11:37:53.000Z | 2022-03-30T18:19:06.000Z | tests/cases/fib_with_argparse.py | MiguelMarcelino/py2many | 9b040b2a157e265df9c053eaf3e5cd644d3e30d0 | [
"MIT"
] | 25 | 2022-02-28T21:19:11.000Z | 2022-03-23T21:26:20.000Z | tests/cases/fib_with_argparse.py | MiguelMarcelino/py2many | 9b040b2a157e265df9c053eaf3e5cd644d3e30d0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from argparse_dataclass import dataclass
@dataclass
if __name__ == "__main__":
args = Options.parse_args()
if args.v:
print("args.v is true")
if args.n == 0:
args.n = 5
print(fib(args.n))
| 16.04 | 40 | 0.553616 | #!/usr/bin/env python3
from argparse_dataclass import dataclass
@dataclass
class Options:
v: bool = False
n: int = 0
def fib(i: int) -> int:
if i == 0 or i == 1:
return 1
return fib(i - 1) + fib(i - 2)
if __name__ == "__main__":
args = Options.parse_args()
if args.v:
print("args.v is true")
if args.n == 0:
args.n = 5
print(fib(args.n))
| 79 | 28 | 45 |
396d1311738d5c4198813a6e64e8cda6bf7c0a43 | 3,821 | py | Python | src/util.py | Fluxticks/BountyOptimiser | 5ce96dd58045d540536ad4c31fd1b454461bd9f1 | [
"MIT"
] | null | null | null | src/util.py | Fluxticks/BountyOptimiser | 5ce96dd58045d540536ad4c31fd1b454461bd9f1 | [
"MIT"
] | null | null | null | src/util.py | Fluxticks/BountyOptimiser | 5ce96dd58045d540536ad4c31fd1b454461bd9f1 | [
"MIT"
] | null | null | null | import logging
import colorlog
from coloured_log import ColoredFormatter
DEBUG_TRACE_NUM = 9
logging.Logger.trace = trace
logging.addLevelName(9, 'TRACE')
class bcolours:
"""The ANSI colour codes
"""
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
ERROR = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def dprint(data, parent='data', level=0):
"""Prints a dictionary with formatting
Args:
data (dict): The dictionary to be printed
parent (str, optional): The key from the parent for nested dictionaries
level (int, optional): How many nested dictionaries in the recursion is
"""
tabs = '\t' * level
cprint('{}' + tabs + parent + '{}: ', bcolours.OKBLUE)
tabs = '\t' * (level + 1)
for key, value in data.items():
if isinstance(value, dict):
dprint(value, parent=key, level=level + 1)
elif isinstance(value, list):
value = [str(x) for x in value]
cprint('{}' + tabs + key + '{}: {}{}{}', bcolours.ERROR, bcolours.WARNING, str(value), bcolours.ENDC)
elif isinstance(value, int):
cprint('{}' + tabs + key + '{}: {}{}{}', bcolours.ERROR, bcolours.OKGREEN, str(value), bcolours.ENDC)
elif isinstance(value, str):
cprint('{}' + tabs + key + '{}: {}', bcolours.ERROR, str(value))
def cprint(text, colour, *args):
"""Prints a message with colour
Args:
text (str): The text to be coloured
colour (bcolours.COLOR): The colour of the text
*args: Any extra strings to be printed
"""
print(text.format(colour, bcolours.ENDC, *args))
return text.format(colour, bcolours.ENDC, *args) + '\n' | 35.055046 | 114 | 0.61921 | import logging
import colorlog
from coloured_log import ColoredFormatter
DEBUG_TRACE_NUM = 9
def trace(self, message, *args, **kws):
if self.isEnabledFor(DEBUG_TRACE_NUM):
self._log(DEBUG_TRACE_NUM, message, args, **kws)
logging.Logger.trace = trace
logging.addLevelName(9, 'TRACE')
def makeLogger(logName, logLevel=logging.INFO):
LOG_LEVEL = logLevel
logName = logName.upper()
length = 10-len(logName)
LOGFORMAT = '[%(name)s]'+' '*length+'%(levelname)-15s | %(message)s (%(filename)s:%(lineno)d)'
#LOGFORMAT = ' [%(name)s][%(levelname)-8s] | %(message)s (%(filename)s:%(lineno)d)'
formatter = ColoredFormatter(LOGFORMAT)
stream = logging.StreamHandler()
stream.setLevel(LOG_LEVEL)
stream.setFormatter(formatter)
file = logging.FileHandler(logName.lower()+'.log')
file.setLevel(logging.DEBUG)
file_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file.setFormatter(file_format)
trace_log = logging.FileHandler(logName.lower()+'_trace.log')
trace_log.setLevel(DEBUG_TRACE_NUM)
trace_log.setFormatter(file_format)
logger = logging.getLogger(logName)
logger.setLevel(LOG_LEVEL)
logger.addHandler(stream)
logger.addHandler(file)
logger.addHandler(trace_log)
return logger
def makeColorLog(logName, logLevel=logging.INFO):
LOG_LEVEL = logLevel
LOGFORMAT = ' %(name)s : %(log_color)s%(levelname)-8s%(reset)s | %(message)s (%(filename)s:%(lineno)d)'
stream = colorlog.StreamHandler()
stream.setFormatter(colorlog.ColoredFormatter(LOGFORMAT))
file = logging.FileHandler(logName.lower()+'.log')
file.setLevel(logging.DEBUG)
file_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file.setFormatter(file_format)
logger = logging.getLogger(logName.upper())
logger.setLevel(LOG_LEVEL)
logger.addHandler(stream)
logger.addHandler(file)
return logger
def unHashToId(hashvalue):
val = int(hashvalue)
if (val & (1 << 31)) != 0:
val = val - (1 << 32)
return val
class bcolours:
"""The ANSI colour codes
"""
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
ERROR = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def dprint(data, parent='data', level=0):
"""Prints a dictionary with formatting
Args:
data (dict): The dictionary to be printed
parent (str, optional): The key from the parent for nested dictionaries
level (int, optional): How many nested dictionaries in the recursion is
"""
tabs = '\t' * level
cprint('{}' + tabs + parent + '{}: ', bcolours.OKBLUE)
tabs = '\t' * (level + 1)
for key, value in data.items():
if isinstance(value, dict):
dprint(value, parent=key, level=level + 1)
elif isinstance(value, list):
value = [str(x) for x in value]
cprint('{}' + tabs + key + '{}: {}{}{}', bcolours.ERROR, bcolours.WARNING, str(value), bcolours.ENDC)
elif isinstance(value, int):
cprint('{}' + tabs + key + '{}: {}{}{}', bcolours.ERROR, bcolours.OKGREEN, str(value), bcolours.ENDC)
elif isinstance(value, str):
cprint('{}' + tabs + key + '{}: {}', bcolours.ERROR, str(value))
def cprint(text, colour, *args):
"""Prints a message with colour
Args:
text (str): The text to be coloured
colour (bcolours.COLOR): The colour of the text
*args: Any extra strings to be printed
"""
print(text.format(colour, bcolours.ENDC, *args))
return text.format(colour, bcolours.ENDC, *args) + '\n' | 1,899 | 0 | 98 |
28702ad204922dd46aac6611c5b25848b5b1b25c | 5,505 | py | Python | src/train.py | jf20541/DNNHyperparameterTuning | ba741c8eaaa4b814407ebb063bddde9f7a51bcbd | [
"MIT"
] | 1 | 2021-08-17T02:01:19.000Z | 2021-08-17T02:01:19.000Z | src/train.py | jf20541/DNNHyperparameterTuning | ba741c8eaaa4b814407ebb063bddde9f7a51bcbd | [
"MIT"
] | null | null | null | src/train.py | jf20541/DNNHyperparameterTuning | ba741c8eaaa4b814407ebb063bddde9f7a51bcbd | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import torch
import optuna
from dataset import HotelDataSet
from model import DeepNeuralNetwork
import config
from engine import Engine
from sklearn.metrics import roc_auc_score
import torch.optim as optim
def train(fold, params, save_model=False):
"""Finding the optimal DNN parameters (Architecture) based on the best ROC-AUC performance
Args:
fold ([int]): [Stratified 5-Fold (avoids overfitting) ]
params ([dict]): [define a combination of hyperparameters]
save_model (bool, optional): [save optimal model's parameters]. Defaults to False.
Returns:
[float]: [optimal ROC-AUC metric]
"""
df = pd.read_csv(config.TRAINING_FOLDS)
train_df = df[df.kfold != fold].reset_index(drop=True)
valid_df = df[df.kfold == fold].reset_index(drop=True)
# split the data into training and testing set (define features, target) values
y_train = train_df[["is_canceled"]].values
x_train = train_df.drop("is_canceled", axis=1).values
y_test = valid_df[["is_canceled"]].values
x_test = valid_df.drop("is_canceled", axis=1).values
# feed the data into custom Dataset
train_dataset = HotelDataSet(x_train, y_train)
test_dataset = HotelDataSet(x_test, y_test)
# initiate custom dataset and feed to dataloader
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=config.TRAIN_BATCH_SIZE
)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=config.TEST_BATCH_SIZE
)
# inititate DNN with params
model = DeepNeuralNetwork(
n_features=x_train.shape[1],
n_targets=y_train.shape[1],
n_layers=params["num_layers"],
hidden_size=params["hidden_size"],
dropout=params["dropout"],
)
optimizer = params["optimizer"](model.parameters(), lr=params["learning_rate"])
eng = Engine(model, optimizer)
best_metric = 0
for epochs in range(config.EPOCHS):
# initiating training and evaluation function
train_targets, train_outputs = eng.train_fn(train_loader)
eval_targets, eval_outputs = eng.eval_fn(test_loader)
train_outputs = np.array(eval_outputs) >= 0.5
eval_outputs = np.array(eval_outputs) >= 0.5
# calculating roc-auc score for train&eval
train_metric = roc_auc_score(train_targets, train_outputs)
eval_metric = roc_auc_score(eval_targets, eval_outputs)
print(
f"Epoch:{epochs+1}/{config.EPOCHS}, Train ROC-AUC: {train_metric:.4f}, Eval ROC-AUC: {eval_metric:.4f}"
)
# save optimal metrics to model.bin
if eval_metric > best_metric:
best_metric = eval_metric
if save_model:
torch.save(model.state_dict(), f"../models/model{fold}.bin")
return best_metric
if __name__ == "__main__":
def objective(trial):
"""[define a combination of hyperparameters]
Args:
trial ([type]): [trial object is used to construct a model inside the objective function]
Raises:
optuna.exceptions.TrialPruned: [If pruned, we go to next n_trials]
Returns:
[type]: [the value that Optuna will optimize]
"""
params = {
"optimizer": trial.suggest_categorical(
"optimizer", [optim.SGD, optim.Adam, optim.AdamW]
),
"num_layers": trial.suggest_int("num_layers", 1, 10),
"hidden_size": trial.suggest_int("hidden_size", 2, 112),
"dropout": trial.suggest_uniform("dropout", 0.1, 0.4),
"learning_rate": trial.suggest_loguniform("learning_rate", 0.0001, 0.01),
}
all_metrics = []
for i in range(5):
temp_metric = train(i, params, save_model=False)
all_metrics.append(temp_metric)
if trial.should_prune():
raise optuna.exceptions.TrialPruned()
return np.mean(all_metrics)
# study object contains information about the required parameter space
# increase the return value of our optimization function
study = optuna.create_study(
sampler=optuna.samplers.TPESampler(), direction="maximize"
)
# initiate optimize with 10 trials
study.optimize(objective, n_trials=10)
# define number of pruned&completed trials (saves time and computing power)
pruned_trials = [
t for t in study.trials if t.state == optuna.trial.TrialState.PRUNED
]
complete_trials = [
t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE
]
# print metric and optimal combiniation of hyperparameters
n_trial = study.best_trial
print(f"Best Trial: {n_trial}, Value: {n_trial.values}")
print(f"Best Parameters: {n_trial.params}")
scores = 0
for j in range(1):
scr = train(j, n_trial.params, save_model=True)
scores += scr
# plot param importance and contour
fig = optuna.visualization.plot_param_importances(study)
fig2 = optuna.visualization.plot_contour(
study, params=["learning_rate", "optimizer"]
)
fig.show()
fig2.show()
df = study.trials_dataframe().drop(
["state", "datetime_start", "datetime_complete"], axis=1
)
print(f"SCORE: {scores}")
print(f"Number of Finished Trials {len(study.trials)}")
print(f"Number of Pruned Trials {len(pruned_trials)}")
print(f"Number of Completed Trials {len(complete_trials)}")
print(df)
| 36.456954 | 115 | 0.662125 | import pandas as pd
import numpy as np
import torch
import optuna
from dataset import HotelDataSet
from model import DeepNeuralNetwork
import config
from engine import Engine
from sklearn.metrics import roc_auc_score
import torch.optim as optim
def train(fold, params, save_model=False):
"""Finding the optimal DNN parameters (Architecture) based on the best ROC-AUC performance
Args:
fold ([int]): [Stratified 5-Fold (avoids overfitting) ]
params ([dict]): [define a combination of hyperparameters]
save_model (bool, optional): [save optimal model's parameters]. Defaults to False.
Returns:
[float]: [optimal ROC-AUC metric]
"""
df = pd.read_csv(config.TRAINING_FOLDS)
train_df = df[df.kfold != fold].reset_index(drop=True)
valid_df = df[df.kfold == fold].reset_index(drop=True)
# split the data into training and testing set (define features, target) values
y_train = train_df[["is_canceled"]].values
x_train = train_df.drop("is_canceled", axis=1).values
y_test = valid_df[["is_canceled"]].values
x_test = valid_df.drop("is_canceled", axis=1).values
# feed the data into custom Dataset
train_dataset = HotelDataSet(x_train, y_train)
test_dataset = HotelDataSet(x_test, y_test)
# initiate custom dataset and feed to dataloader
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=config.TRAIN_BATCH_SIZE
)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=config.TEST_BATCH_SIZE
)
# inititate DNN with params
model = DeepNeuralNetwork(
n_features=x_train.shape[1],
n_targets=y_train.shape[1],
n_layers=params["num_layers"],
hidden_size=params["hidden_size"],
dropout=params["dropout"],
)
optimizer = params["optimizer"](model.parameters(), lr=params["learning_rate"])
eng = Engine(model, optimizer)
best_metric = 0
for epochs in range(config.EPOCHS):
# initiating training and evaluation function
train_targets, train_outputs = eng.train_fn(train_loader)
eval_targets, eval_outputs = eng.eval_fn(test_loader)
train_outputs = np.array(eval_outputs) >= 0.5
eval_outputs = np.array(eval_outputs) >= 0.5
# calculating roc-auc score for train&eval
train_metric = roc_auc_score(train_targets, train_outputs)
eval_metric = roc_auc_score(eval_targets, eval_outputs)
print(
f"Epoch:{epochs+1}/{config.EPOCHS}, Train ROC-AUC: {train_metric:.4f}, Eval ROC-AUC: {eval_metric:.4f}"
)
# save optimal metrics to model.bin
if eval_metric > best_metric:
best_metric = eval_metric
if save_model:
torch.save(model.state_dict(), f"../models/model{fold}.bin")
return best_metric
if __name__ == "__main__":
def objective(trial):
"""[define a combination of hyperparameters]
Args:
trial ([type]): [trial object is used to construct a model inside the objective function]
Raises:
optuna.exceptions.TrialPruned: [If pruned, we go to next n_trials]
Returns:
[type]: [the value that Optuna will optimize]
"""
params = {
"optimizer": trial.suggest_categorical(
"optimizer", [optim.SGD, optim.Adam, optim.AdamW]
),
"num_layers": trial.suggest_int("num_layers", 1, 10),
"hidden_size": trial.suggest_int("hidden_size", 2, 112),
"dropout": trial.suggest_uniform("dropout", 0.1, 0.4),
"learning_rate": trial.suggest_loguniform("learning_rate", 0.0001, 0.01),
}
all_metrics = []
for i in range(5):
temp_metric = train(i, params, save_model=False)
all_metrics.append(temp_metric)
if trial.should_prune():
raise optuna.exceptions.TrialPruned()
return np.mean(all_metrics)
# study object contains information about the required parameter space
# increase the return value of our optimization function
study = optuna.create_study(
sampler=optuna.samplers.TPESampler(), direction="maximize"
)
# initiate optimize with 10 trials
study.optimize(objective, n_trials=10)
# define number of pruned&completed trials (saves time and computing power)
pruned_trials = [
t for t in study.trials if t.state == optuna.trial.TrialState.PRUNED
]
complete_trials = [
t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE
]
# print metric and optimal combiniation of hyperparameters
n_trial = study.best_trial
print(f"Best Trial: {n_trial}, Value: {n_trial.values}")
print(f"Best Parameters: {n_trial.params}")
scores = 0
for j in range(1):
scr = train(j, n_trial.params, save_model=True)
scores += scr
# plot param importance and contour
fig = optuna.visualization.plot_param_importances(study)
fig2 = optuna.visualization.plot_contour(
study, params=["learning_rate", "optimizer"]
)
fig.show()
fig2.show()
df = study.trials_dataframe().drop(
["state", "datetime_start", "datetime_complete"], axis=1
)
print(f"SCORE: {scores}")
print(f"Number of Finished Trials {len(study.trials)}")
print(f"Number of Pruned Trials {len(pruned_trials)}")
print(f"Number of Completed Trials {len(complete_trials)}")
print(df)
| 0 | 0 | 0 |
3d27ae549e9ad48016af98290dc2ad7096ed0215 | 9,638 | py | Python | src/serverattack_main.py | Ilcyb/Federated-Learning-PyTorch | 4830a89ffa1ac0ad0e52a4551338532cfb4ca210 | [
"MIT"
] | 1 | 2021-04-28T03:34:01.000Z | 2021-04-28T03:34:01.000Z | src/serverattack_main.py | Ilcyb/Federated-Learning-PyTorch | 4830a89ffa1ac0ad0e52a4551338532cfb4ca210 | [
"MIT"
] | null | null | null | src/serverattack_main.py | Ilcyb/Federated-Learning-PyTorch | 4830a89ffa1ac0ad0e52a4551338532cfb4ca210 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import os
import copy
import time
import pickle
import numpy as np
from tqdm import tqdm
import torch
from tensorboardX import SummaryWriter
import torchvision.utils as vutils
from options import args_parser
from update import LocalUpdate, test_inference, AdversaryGanUpdateMnist, AdversaryGanUpdateCifar, AdversaryUpdate, AdversaryGanUpdateSVHN
from models import MLP, CNNMnist, CNNFashion_Mnist, CNNCifar, DCGANDiscriminator_mnist, DCGANGenerator_mnist, DCGANDiscriminator_cifar10, DCGANGenerator_cifar10, DCGANDiscriminator_SVHN, DCGANGenerator_SVHN
from utils import get_dataset, average_weights, exp_details, get_dataset_ganattack, get_dataset_split_by_label, \
get_dataset_idxgroup_ganattack, get_experiment_result_location, save_grid, generate_gif_from_file, \
generate_gif_from_list,plot_loss_acc, compute_avgpsnr, plot_avg_psnr
if __name__ == '__main__':
start_time = time.time()
# define paths
path_project = os.path.abspath('..')
logger = SummaryWriter('./logs')
args = args_parser()
exp_details(args)
if args.gpu:
torch.cuda.set_device('cuda:{}'.format(args.gpu))
device = 'cuda' if args.gpu else 'cpu'
# load dataset and user groups
if args.model == 'dcgan':
train_dataset, test_dataset, user_groups = get_dataset(args)
# _, _, user_groups = get_dataset(args)
# train_dataset, test_dataset, label_indexs = get_dataset_split_by_label(args)
else:
train_dataset, test_dataset, user_groups = get_dataset(args)
global_model = None
# BUILD MODEL
if args.model == 'cnn':
# Convolutional neural netork
if args.dataset == 'mnist':
global_model = CNNMnist(args=args)
elif args.dataset == 'fmnist':
global_model = CNNFashion_Mnist(args=args)
elif args.dataset == 'cifar':
# global_model = DCGANDiscriminator_cifar10(args=args)
global_model = CNNCifar(args=args)
elif args.model == 'mlp':
# Multi-layer preceptron
img_size = train_dataset[0][0].shape
len_in = 1
for x in img_size:
len_in *= x
global_model = MLP(dim_in=len_in, dim_hidden=64,
dim_out=args.num_classes)
elif args.model == 'dcgan':
# deep convolutional generative adversarial networks
if args.dataset == 'mnist':
global_model = DCGANDiscriminator_mnist(args=args)
elif args.dataset == 'cifar':
global_model = DCGANDiscriminator_cifar10(args=args)
elif args.dataset == 'svhn':
global_model = DCGANDiscriminator_SVHN(args=args)
else:
# TODO add datasets support
exit('Error: unrecognized dataset')
else:
exit('Error: unrecognized model')
# Set the model to train and send it to device.
global_model.to(device)
global_model.train()
# copy weights
global_weights = global_model.state_dict()
# Training
train_loss, train_accuracy = [], []
fake_images = []
val_acc_list, net_list = [], []
cv_loss, cv_acc = [], []
avg_psnrs = []
print_every = 2
val_loss_pre, counter = 0, 0
save_location = get_experiment_result_location(args.model, args.dataset,
args.wanted_label_index,
{'ganlr': args.local_gan_lr,
'ganepoch': args.local_gan_epoch,
'optimizer': args.optimizer,
'localepoch':args.local_ep},
args.mode,
args.experiment_name)
# adversary model
if args.model == 'dcgan' and args.dataset == 'mnist':
generator_model = DCGANGenerator_mnist(args=args)
adversary_gan_update = AdversaryGanUpdateMnist(copy.deepcopy(global_model), generator_model,
args, logger, args.wanted_label_index, false_label_index=10)
elif args.model == 'dcgan' and args.dataset == 'cifar':
generator_model = DCGANGenerator_cifar10(args=args)
adversary_gan_update = AdversaryGanUpdateCifar(copy.deepcopy(global_model), generator_model,
args, logger, args.wanted_label_index, false_label_index=10)
elif args.model == 'dcgan' and args.dataset == 'svhn':
generator_model = DCGANGenerator_SVHN(args=args)
adversary_gan_update = AdversaryGanUpdateCifar(copy.deepcopy(global_model), generator_model,
args, logger, args.wanted_label_index, false_label_index=10)
for epoch in tqdm(range(args.epochs)):
local_weights, local_losses = [], []
# label_split = [[i] for i in range(10)]
# idx_group = get_dataset_idxgroup_ganattack(args, label_split, label_indexs)
print(f'\n | Global Training Round : {epoch+1} |\n')
global_model.train()
m = max(int(args.frac * args.num_users), 1)
# 从总用户中随机抽取需要的用户
idxs_users = np.random.choice(range(args.num_users), m, replace=False)
# if (len(idxs_users) != len(idx_group)):
# raise ValueError('len(idx_users)!=len(idx_group)')
data_idx = 0
for idx in idxs_users:
# TODO 不应该每一轮都新建一个Update类
global_model_copy = copy.deepcopy(global_model)
local_model = LocalUpdate(args=args, dataset=train_dataset,
idxs=user_groups[idx], logger=logger)
w, loss = local_model.update_weights(
model=global_model_copy, global_round=epoch)
local_weights.append(copy.deepcopy(w))
local_losses.append(copy.deepcopy(loss))
data_idx += 1
# 服务器进行攻击
if args.model == 'dcgan':
global_model_copy = copy.deepcopy(global_model)
server_adversary = AdversaryUpdate(args=args, dataset=train_dataset,
idxs=[], logger=logger,
adversary_gan_update=adversary_gan_update,
discriminator_model=global_model_copy)
server_adversary.train_generator()
w = server_adversary.update_weights(
model=global_model_copy, global_round=epoch)
local_weights.append(copy.deepcopy(w))
# update global weights
global_weights = average_weights(local_weights)
# update global weights
global_model.load_state_dict(global_weights)
loss_avg = sum(local_losses) / len(local_losses)
train_loss.append(loss_avg)
# Calculate avg training accuracy over all users at every epoch
list_acc, list_loss = [], []
global_model.eval()
# print('test idx:{}'.format(idx))
for c in range(args.num_users):
# FIXME
# 这里的user_groups[idx]是否应该是user_groups[c]?
local_model = LocalUpdate(args=args, dataset=train_dataset,
idxs=user_groups[c], logger=logger)
acc, loss = local_model.inference(model=global_model)
list_acc.append(acc)
list_loss.append(loss)
train_accuracy.append(sum(list_acc)/len(list_acc))
# print global training loss after every 'i' rounds
if (epoch+1) % print_every == 0:
print(f' \nAvg Training Stats after {epoch+1} global rounds:')
print(f'Training Loss : {np.mean(np.array(train_loss))}')
print('Train Accuracy: {:.2f}% \n'.format(100*train_accuracy[-1]))
# save generated fake images each epoch
if args.model == 'dcgan':
randz = torch.randn(1, 100, 1, 1, device=device)
generated_fake_image = generator_model(randz).to('cpu').detach()
vutils.save_image(
generated_fake_image, os.path.join(save_location, os.path.join('fake_images', 'epoch_{}.png'.format(epoch))))
fake_images.append(generated_fake_image[0])
want_targets = (train_dataset.targets == args.wanted_label_index)
want_targets = [i for i in range(len(want_targets)) if want_targets[i]==True]
# 随机抽取图片计算 AVG PSNR
random_image_idxs = np.random.choice(want_targets, 10, replace=False)
batch_images = []
for idx in random_image_idxs:
batch_images.append(train_dataset.data[idx])
avg_psnr = compute_avgpsnr(generated_fake_image, batch_images)
avg_psnrs.append(avg_psnr)
# Test inference after completion of training
test_acc, test_loss = test_inference(args, global_model, test_dataset)
plot_loss_acc(train_loss, train_accuracy, save_location)
plot_avg_psnr(avg_psnrs, save_location)
generate_gif_from_file(os.path.join(save_location, 'fake_images'), os.path.join(save_location, 'training.gif'))
print('fake images shape:{}'.format(fake_images[0].shape))
save_grid(fake_images, save_location)
print(f' \n Results after {args.epochs} global rounds of training:')
print("|---- Avg Train Accuracy: {:.2f}%".format(100*train_accuracy[-1]))
print("|---- Test Accuracy: {:.2f}%".format(100*test_acc))
print('\n Total Run Time: {0:0.4f}'.format(time.time()-start_time))
| 44.62037 | 206 | 0.618489 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import os
import copy
import time
import pickle
import numpy as np
from tqdm import tqdm
import torch
from tensorboardX import SummaryWriter
import torchvision.utils as vutils
from options import args_parser
from update import LocalUpdate, test_inference, AdversaryGanUpdateMnist, AdversaryGanUpdateCifar, AdversaryUpdate, AdversaryGanUpdateSVHN
from models import MLP, CNNMnist, CNNFashion_Mnist, CNNCifar, DCGANDiscriminator_mnist, DCGANGenerator_mnist, DCGANDiscriminator_cifar10, DCGANGenerator_cifar10, DCGANDiscriminator_SVHN, DCGANGenerator_SVHN
from utils import get_dataset, average_weights, exp_details, get_dataset_ganattack, get_dataset_split_by_label, \
get_dataset_idxgroup_ganattack, get_experiment_result_location, save_grid, generate_gif_from_file, \
generate_gif_from_list,plot_loss_acc, compute_avgpsnr, plot_avg_psnr
if __name__ == '__main__':
start_time = time.time()
# define paths
path_project = os.path.abspath('..')
logger = SummaryWriter('./logs')
args = args_parser()
exp_details(args)
if args.gpu:
torch.cuda.set_device('cuda:{}'.format(args.gpu))
device = 'cuda' if args.gpu else 'cpu'
# load dataset and user groups
if args.model == 'dcgan':
train_dataset, test_dataset, user_groups = get_dataset(args)
# _, _, user_groups = get_dataset(args)
# train_dataset, test_dataset, label_indexs = get_dataset_split_by_label(args)
else:
train_dataset, test_dataset, user_groups = get_dataset(args)
global_model = None
# BUILD MODEL
if args.model == 'cnn':
# Convolutional neural netork
if args.dataset == 'mnist':
global_model = CNNMnist(args=args)
elif args.dataset == 'fmnist':
global_model = CNNFashion_Mnist(args=args)
elif args.dataset == 'cifar':
# global_model = DCGANDiscriminator_cifar10(args=args)
global_model = CNNCifar(args=args)
elif args.model == 'mlp':
# Multi-layer preceptron
img_size = train_dataset[0][0].shape
len_in = 1
for x in img_size:
len_in *= x
global_model = MLP(dim_in=len_in, dim_hidden=64,
dim_out=args.num_classes)
elif args.model == 'dcgan':
# deep convolutional generative adversarial networks
if args.dataset == 'mnist':
global_model = DCGANDiscriminator_mnist(args=args)
elif args.dataset == 'cifar':
global_model = DCGANDiscriminator_cifar10(args=args)
elif args.dataset == 'svhn':
global_model = DCGANDiscriminator_SVHN(args=args)
else:
# TODO add datasets support
exit('Error: unrecognized dataset')
else:
exit('Error: unrecognized model')
# Set the model to train and send it to device.
global_model.to(device)
global_model.train()
# copy weights
global_weights = global_model.state_dict()
# Training
train_loss, train_accuracy = [], []
fake_images = []
val_acc_list, net_list = [], []
cv_loss, cv_acc = [], []
avg_psnrs = []
print_every = 2
val_loss_pre, counter = 0, 0
save_location = get_experiment_result_location(args.model, args.dataset,
args.wanted_label_index,
{'ganlr': args.local_gan_lr,
'ganepoch': args.local_gan_epoch,
'optimizer': args.optimizer,
'localepoch':args.local_ep},
args.mode,
args.experiment_name)
# adversary model
if args.model == 'dcgan' and args.dataset == 'mnist':
generator_model = DCGANGenerator_mnist(args=args)
adversary_gan_update = AdversaryGanUpdateMnist(copy.deepcopy(global_model), generator_model,
args, logger, args.wanted_label_index, false_label_index=10)
elif args.model == 'dcgan' and args.dataset == 'cifar':
generator_model = DCGANGenerator_cifar10(args=args)
adversary_gan_update = AdversaryGanUpdateCifar(copy.deepcopy(global_model), generator_model,
args, logger, args.wanted_label_index, false_label_index=10)
elif args.model == 'dcgan' and args.dataset == 'svhn':
generator_model = DCGANGenerator_SVHN(args=args)
adversary_gan_update = AdversaryGanUpdateCifar(copy.deepcopy(global_model), generator_model,
args, logger, args.wanted_label_index, false_label_index=10)
for epoch in tqdm(range(args.epochs)):
local_weights, local_losses = [], []
# label_split = [[i] for i in range(10)]
# idx_group = get_dataset_idxgroup_ganattack(args, label_split, label_indexs)
print(f'\n | Global Training Round : {epoch+1} |\n')
global_model.train()
m = max(int(args.frac * args.num_users), 1)
# 从总用户中随机抽取需要的用户
idxs_users = np.random.choice(range(args.num_users), m, replace=False)
# if (len(idxs_users) != len(idx_group)):
# raise ValueError('len(idx_users)!=len(idx_group)')
data_idx = 0
for idx in idxs_users:
# TODO 不应该每一轮都新建一个Update类
global_model_copy = copy.deepcopy(global_model)
local_model = LocalUpdate(args=args, dataset=train_dataset,
idxs=user_groups[idx], logger=logger)
w, loss = local_model.update_weights(
model=global_model_copy, global_round=epoch)
local_weights.append(copy.deepcopy(w))
local_losses.append(copy.deepcopy(loss))
data_idx += 1
# 服务器进行攻击
if args.model == 'dcgan':
global_model_copy = copy.deepcopy(global_model)
server_adversary = AdversaryUpdate(args=args, dataset=train_dataset,
idxs=[], logger=logger,
adversary_gan_update=adversary_gan_update,
discriminator_model=global_model_copy)
server_adversary.train_generator()
w = server_adversary.update_weights(
model=global_model_copy, global_round=epoch)
local_weights.append(copy.deepcopy(w))
# update global weights
global_weights = average_weights(local_weights)
# update global weights
global_model.load_state_dict(global_weights)
loss_avg = sum(local_losses) / len(local_losses)
train_loss.append(loss_avg)
# Calculate avg training accuracy over all users at every epoch
list_acc, list_loss = [], []
global_model.eval()
# print('test idx:{}'.format(idx))
for c in range(args.num_users):
# FIXME
# 这里的user_groups[idx]是否应该是user_groups[c]?
local_model = LocalUpdate(args=args, dataset=train_dataset,
idxs=user_groups[c], logger=logger)
acc, loss = local_model.inference(model=global_model)
list_acc.append(acc)
list_loss.append(loss)
train_accuracy.append(sum(list_acc)/len(list_acc))
# print global training loss after every 'i' rounds
if (epoch+1) % print_every == 0:
print(f' \nAvg Training Stats after {epoch+1} global rounds:')
print(f'Training Loss : {np.mean(np.array(train_loss))}')
print('Train Accuracy: {:.2f}% \n'.format(100*train_accuracy[-1]))
# save generated fake images each epoch
if args.model == 'dcgan':
randz = torch.randn(1, 100, 1, 1, device=device)
generated_fake_image = generator_model(randz).to('cpu').detach()
vutils.save_image(
generated_fake_image, os.path.join(save_location, os.path.join('fake_images', 'epoch_{}.png'.format(epoch))))
fake_images.append(generated_fake_image[0])
want_targets = (train_dataset.targets == args.wanted_label_index)
want_targets = [i for i in range(len(want_targets)) if want_targets[i]==True]
# 随机抽取图片计算 AVG PSNR
random_image_idxs = np.random.choice(want_targets, 10, replace=False)
batch_images = []
for idx in random_image_idxs:
batch_images.append(train_dataset.data[idx])
avg_psnr = compute_avgpsnr(generated_fake_image, batch_images)
avg_psnrs.append(avg_psnr)
# Test inference after completion of training
test_acc, test_loss = test_inference(args, global_model, test_dataset)
plot_loss_acc(train_loss, train_accuracy, save_location)
plot_avg_psnr(avg_psnrs, save_location)
generate_gif_from_file(os.path.join(save_location, 'fake_images'), os.path.join(save_location, 'training.gif'))
print('fake images shape:{}'.format(fake_images[0].shape))
save_grid(fake_images, save_location)
print(f' \n Results after {args.epochs} global rounds of training:')
print("|---- Avg Train Accuracy: {:.2f}%".format(100*train_accuracy[-1]))
print("|---- Test Accuracy: {:.2f}%".format(100*test_acc))
print('\n Total Run Time: {0:0.4f}'.format(time.time()-start_time))
| 0 | 0 | 0 |
93c0cc9337e79fdca67a9b20fc3b1dcffb643e0e | 1,327 | py | Python | ODEs/RK_methods.py | Zettergren-Courses/EP501_python | dabaa584e5158eb35197a43f38920a9ed7cc02b8 | [
"MIT"
] | null | null | null | ODEs/RK_methods.py | Zettergren-Courses/EP501_python | dabaa584e5158eb35197a43f38920a9ed7cc02b8 | [
"MIT"
] | 1 | 2020-10-06T13:29:01.000Z | 2020-10-06T13:29:01.000Z | ODEs/RK_methods.py | Zettergren-Courses/EP501_python | dabaa584e5158eb35197a43f38920a9ed7cc02b8 | [
"MIT"
] | 6 | 2020-09-01T10:35:59.000Z | 2020-09-18T10:12:59.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 19 07:23:35 2020
Illustrate the use of Runge-Kutta methods to solve ODEs
@author: zettergm
"""
# Imports
import numpy as np
import matplotlib.pyplot as plt
# RHS of ODE for use with RK4
# Time grid
N=15
tmin=0
tmax=6
t=np.linspace(tmin,tmax,num=N)
dt=t[1]-t[0]
# Analytical solution for comparison
y0=1
alpha=2
ybar=y0*np.exp(-alpha*t)
# RK2
yRK2=np.zeros((N))
yRK2[0]=y0
for n in range(1,N):
yhalf=yRK2[n-1]+dt/2*(-alpha*yRK2[n-1])
yRK2[n]=yRK2[n-1]+dt*(-alpha*yhalf)
# RK4
yRK4=np.zeros((N))
yRK4[0]=y0
for n in range(1,N):
dy1=dt*fRK(t[n-1],yRK4[n-1],alpha)
dy2=dt*fRK(t[n-1]+dt/2,yRK4[n-1]+dy1/2,alpha)
dy3=dt*fRK(t[n-1]+dt/2,yRK4[n-1]+dy2/2,alpha)
dy4=dt*fRK(t[n-1]+dt,yRK4[n-1]+dy3,alpha)
yRK4[n]=yRK4[n-1]+1/6*(dy1+2*dy2+2*dy3+dy4)
# Plot results
plt.figure()
plt.plot(t,ybar,"o-")
plt.xlabel("t")
plt.ylabel("y(t)")
plt.plot(t,yRK2,"--")
plt.plot(t,yRK4,"-.")
plt.legend(("exact","RK2","RK4"))
plt.show()
# RK2 stability plot
adt=np.linspace(0.01,3,20)
ladt=adt.size
G=np.zeros((ladt))
for igain in range(0,ladt):
G[igain]=(1-adt[igain]+1/2*adt[igain]**2)
plt.figure()
plt.plot(adt,G,"o")
plt.xlabel("a*dt")
plt.ylabel("gain factor")
plt.show() | 17.012821 | 55 | 0.635268 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 19 07:23:35 2020
Illustrate the use of Runge-Kutta methods to solve ODEs
@author: zettergm
"""
# Imports
import numpy as np
import matplotlib.pyplot as plt
# RHS of ODE for use with RK4
def fRK(t,y,alpha):
fval=-alpha*y
return fval
# Time grid
N=15
tmin=0
tmax=6
t=np.linspace(tmin,tmax,num=N)
dt=t[1]-t[0]
# Analytical solution for comparison
y0=1
alpha=2
ybar=y0*np.exp(-alpha*t)
# RK2
yRK2=np.zeros((N))
yRK2[0]=y0
for n in range(1,N):
yhalf=yRK2[n-1]+dt/2*(-alpha*yRK2[n-1])
yRK2[n]=yRK2[n-1]+dt*(-alpha*yhalf)
# RK4
yRK4=np.zeros((N))
yRK4[0]=y0
for n in range(1,N):
dy1=dt*fRK(t[n-1],yRK4[n-1],alpha)
dy2=dt*fRK(t[n-1]+dt/2,yRK4[n-1]+dy1/2,alpha)
dy3=dt*fRK(t[n-1]+dt/2,yRK4[n-1]+dy2/2,alpha)
dy4=dt*fRK(t[n-1]+dt,yRK4[n-1]+dy3,alpha)
yRK4[n]=yRK4[n-1]+1/6*(dy1+2*dy2+2*dy3+dy4)
# Plot results
plt.figure()
plt.plot(t,ybar,"o-")
plt.xlabel("t")
plt.ylabel("y(t)")
plt.plot(t,yRK2,"--")
plt.plot(t,yRK4,"-.")
plt.legend(("exact","RK2","RK4"))
plt.show()
# RK2 stability plot
adt=np.linspace(0.01,3,20)
ladt=adt.size
G=np.zeros((ladt))
for igain in range(0,ladt):
G[igain]=(1-adt[igain]+1/2*adt[igain]**2)
plt.figure()
plt.plot(adt,G,"o")
plt.xlabel("a*dt")
plt.ylabel("gain factor")
plt.show() | 32 | 0 | 22 |
fc148f3dfeed66f22fc7a3ed41f21e0da706a57c | 1,267 | py | Python | album_recommender/rec_system/migrations/0001_initial.py | LevUdaltsov/album_recommender | 86d5f225bab0f8f4d65fd8184abadafb6654155f | [
"MIT"
] | 1 | 2020-11-22T20:00:27.000Z | 2020-11-22T20:00:27.000Z | album_recommender/rec_system/migrations/0001_initial.py | LevUdaltsov/album_recommender | 86d5f225bab0f8f4d65fd8184abadafb6654155f | [
"MIT"
] | null | null | null | album_recommender/rec_system/migrations/0001_initial.py | LevUdaltsov/album_recommender | 86d5f225bab0f8f4d65fd8184abadafb6654155f | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-11-04 19:12
from django.db import migrations, models
| 36.2 | 114 | 0.573007 | # Generated by Django 3.1.2 on 2020-11-04 19:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Name')),
('pub_date', models.CharField(default=None, max_length=255)),
('artist', models.CharField(max_length=255, verbose_name='Artist Name')),
('content', models.TextField(default='')),
('url', models.CharField(default=None, max_length=255)),
('score', models.CharField(default=None, max_length=255)),
('best_new_music', models.CharField(default=None, max_length=255)),
],
),
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Name')),
],
),
]
| 0 | 1,153 | 23 |