hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a121293dfbdfc6387178bb484e2ea480fca7d027
| 2,504
|
py
|
Python
|
config_args.py
|
wenting-zhao/cs6741_replication
|
fbd8275793c5d2b097458c68bc3bba00144665aa
|
[
"MIT"
] | null | null | null |
config_args.py
|
wenting-zhao/cs6741_replication
|
fbd8275793c5d2b097458c68bc3bba00144665aa
|
[
"MIT"
] | null | null | null |
config_args.py
|
wenting-zhao/cs6741_replication
|
fbd8275793c5d2b097458c68bc3bba00144665aa
|
[
"MIT"
] | null | null | null |
import os.path as path
import os
def get_args(parser):
parser.add_argument('--dataset', type=str, required=True)
parser.add_argument("--data_dir", type=str, required=True)
parser.add_argument("--output_dir", type=str)
parser.add_argument('--encoder', type=str, choices=['lstm', 'average'], required=True)
parser.add_argument('--attention', type=str, choices=['tanh', 'frozen', 'pre-loaded'], required=False)
parser.add_argument('--epoch', type=int, required=False, default=8)
parser.add_argument('--seed', type=int, default=10)
parser.add_argument('--gold_label_dir', type=str, required=False)
parser.add_argument('--hidden_size', type=int, default=128)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--lmbda', type=float, required=False)
parser.add_argument('--lr_decay', type=float, default=0.5, required=False)
parser.add_argument('--lr', type=float, default=0.001, required=False)
parser.add_argument('--lr_step_size', type=int, default=4, required=False)
parser.add_argument('--adversarial', action='store_const', required=False, const=True)
parser.add_argument('--use_attention', action='store_true')
opt = parser.parse_args()
return opt
def config_args(args):
# check that have provided a data directory to load attentions/predictions from
if (args.attention == 'pre-loaded' or args.adversarial) and not args.gold_label_dir :
raise Exception("You must specify a gold-label directory for attention distributions")
#check that have provided the correct dir:
if args.gold_label_dir and args.dataset.lower() not in args.gold_label_dir and args.dataset not in args.gold_label_dir :
raise Exception("Gold-attention labels directory does not match specified dataset")
# add check for lmbda value if adversarial model
if args.adversarial and not args.lmbda :
raise Exception("Must specify a lambda value for the adversarial model")
if args.adversarial :
args.frozen_attn = False
args.pre_loaded_attn = False
elif args.attention == 'frozen' :
args.frozen_attn = True
args.pre_loaded_attn = False
elif args.attention == 'tanh' :
args.frozen_attn = False
args.pre_loaded_attn = False
elif args.attention == 'pre-loaded': # not an adversarial model
args.frozen_attn = False
args.pre_loaded_attn = True
else :
raise LookupError("Attention not found ...")
| 45.527273
| 124
| 0.703674
|
b1abc9e7b1923d2e56f452c6891ddaa959d95b40
| 24,247
|
py
|
Python
|
src/main/python/apache/aurora/client/commands/core.py
|
wickman/incubator-aurora
|
9906d217093568ed4c9cfe620862818f15ce4150
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/apache/aurora/client/commands/core.py
|
wickman/incubator-aurora
|
9906d217093568ed4c9cfe620862818f15ce4150
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/apache/aurora/client/commands/core.py
|
wickman/incubator-aurora
|
9906d217093568ed4c9cfe620862818f15ce4150
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2013 Apache Software Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Command-line client for managing jobs with the Aurora scheduler.
"""
from __future__ import print_function
import collections
from datetime import datetime
import json
import os
import pprint
import subprocess
import sys
import time
from tempfile import NamedTemporaryFile
from apache.aurora.client.base import (
check_and_log_response,
deprecation_warning,
die,
handle_open,
requires,
synthesize_url)
from apache.aurora.client.api.disambiguator import LiveJobDisambiguator
from apache.aurora.client.api.job_monitor import JobMonitor
from apache.aurora.client.api.quota_check import print_quota
from apache.aurora.client.api.updater_util import UpdaterConfig
from apache.aurora.client.config import get_config, GlobalHookRegistry
from apache.aurora.client.factory import make_client, make_client_factory
from apache.aurora.client.options import (
CLUSTER_CONFIG_OPTION,
CLUSTER_INVOKE_OPTION,
CLUSTER_NAME_OPTION,
DISABLE_HOOKS_OPTION,
ENV_CONFIG_OPTION,
ENVIRONMENT_BIND_OPTION,
FROM_JOBKEY_OPTION,
HEALTH_CHECK_INTERVAL_SECONDS_OPTION,
JSON_OPTION,
OPEN_BROWSER_OPTION,
SHARDS_OPTION,
WAIT_UNTIL_OPTION)
from apache.aurora.common.aurora_job_key import AuroraJobKey
from gen.apache.aurora.constants import ACTIVE_STATES, CURRENT_API_VERSION, AURORA_EXECUTOR_NAME
from gen.apache.aurora.ttypes import ExecutorConfig, ResponseCode, ScheduleStatus
from twitter.common import app, log
from twitter.common.python.pex import PexInfo
from twitter.common.python.dirwrapper import PythonDirectoryWrapper
def get_job_config(job_spec, config_file, options):
try:
job_key = AuroraJobKey.from_path(job_spec)
select_cluster = job_key.cluster
select_env = job_key.env
select_role = job_key.role
jobname = job_key.name
except AuroraJobKey.Error:
deprecation_warning('Please refer to your job in CLUSTER/ROLE/ENV/NAME format.')
select_cluster = options.cluster if options.cluster else None
select_env = options.env
select_role = None
jobname = job_spec
try:
json_option = options.json
except AttributeError:
json_option = False
try:
bindings = options.bindings
except AttributeError:
bindings = ()
return get_config(
jobname,
config_file,
json_option,
bindings,
select_cluster=select_cluster,
select_role=select_role,
select_env=select_env)
@app.command
def version(args):
"""usage: version
Prints information about the version of the aurora client being run.
"""
try:
pexpath = sys.argv[0]
pex_info = PexInfo.from_pex(PythonDirectoryWrapper.get(pexpath))
print("Aurora client build info:")
print("\tsha: %s" % pex_info.build_properties['sha'])
print("\tdate: %s" % pex_info.build_properties['date'])
except (IOError, PythonDirectoryWrapper.Error):
print("Aurora client build info not available")
print("Aurora API version: %s" % CURRENT_API_VERSION)
def maybe_disable_hooks(options):
"""Checks the hooks disable option, and disables the hooks if required.
This could be done with a callback in the option, but this is better for the way that
we test clientv1.
"""
if options.disable_all_hooks_reason is not None:
GlobalHookRegistry.disable_hooks()
log.info('Client hooks disabled; reason given by user: %s' % options.disable_all_hooks_reason)
@app.command
@app.command_option(ENVIRONMENT_BIND_OPTION)
@app.command_option(OPEN_BROWSER_OPTION)
@app.command_option(CLUSTER_CONFIG_OPTION)
@app.command_option(ENV_CONFIG_OPTION)
@app.command_option(JSON_OPTION)
@app.command_option(WAIT_UNTIL_OPTION)
@app.command_option(DISABLE_HOOKS_OPTION)
@requires.exactly('cluster/role/env/job', 'config')
def create(job_spec, config_file):
"""usage: create cluster/role/env/job config
Creates a job based on a configuration file.
"""
options = app.get_options()
maybe_disable_hooks(options)
try:
config = get_job_config(job_spec, config_file, options)
except ValueError as v:
print("Error: %s" % v)
sys.exit(1)
api = make_client(config.cluster())
monitor = JobMonitor(api, config.role(), config.environment(), config.name())
resp = api.create_job(config)
check_and_log_response(resp)
handle_open(api.scheduler_proxy.scheduler_client().url, config.role(), config.environment(),
config.name())
if options.wait_until == 'RUNNING':
monitor.wait_until(monitor.running_or_finished)
elif options.wait_until == 'FINISHED':
monitor.wait_until(monitor.terminal)
@app.command
@app.command_option(ENVIRONMENT_BIND_OPTION)
@app.command_option(CLUSTER_CONFIG_OPTION)
@app.command_option(ENV_CONFIG_OPTION)
@app.command_option(JSON_OPTION)
@app.command_option(FROM_JOBKEY_OPTION)
@requires.exactly('cluster/role/env/job', 'config')
def diff(job_spec, config_file):
"""usage: diff cluster/role/env/job config
Compares a job configuration against a running job.
By default the diff will be displayed using 'diff', though you may choose an alternate
diff program by specifying the DIFF_VIEWER environment variable."""
options = app.get_options()
config = get_job_config(job_spec, config_file, options)
if options.rename_from:
cluster, role, env, name = options.rename_from
else:
cluster = config.cluster()
role = config.role()
env = config.environment()
name = config.name()
api = make_client(cluster)
resp = api.query(api.build_query(role, name, statuses=ACTIVE_STATES, env=env))
if resp.responseCode != ResponseCode.OK:
die('Request failed, server responded with "%s"' % resp.message)
remote_tasks = [t.assignedTask.task for t in resp.result.scheduleStatusResult.tasks]
resp = api.populate_job_config(config)
if resp.responseCode != ResponseCode.OK:
die('Request failed, server responded with "%s"' % resp.message)
local_tasks = resp.result.populateJobResult.populated
pp = pprint.PrettyPrinter(indent=2)
def pretty_print_task(task):
# The raw configuration is not interesting - we only care about what gets parsed.
task.configuration = None
task.executorConfig = ExecutorConfig(
name=AURORA_EXECUTOR_NAME,
data=json.loads(task.executorConfig.data))
return pp.pformat(vars(task))
def pretty_print_tasks(tasks):
return ',\n'.join([pretty_print_task(t) for t in tasks])
def dump_tasks(tasks, out_file):
out_file.write(pretty_print_tasks(tasks))
out_file.write('\n')
out_file.flush()
diff_program = os.environ.get('DIFF_VIEWER', 'diff')
with NamedTemporaryFile() as local:
dump_tasks(local_tasks, local)
with NamedTemporaryFile() as remote:
dump_tasks(remote_tasks, remote)
result = subprocess.call([diff_program, remote.name, local.name])
# Unlike most commands, diff doesn't return zero on success; it returns
# 1 when a successful diff is non-empty.
if result != 0 and result != 1:
return result
else:
return 0
@app.command(name='open')
def do_open(args, _):
"""usage: open cluster[/role[/env/job]]
Opens the scheduler page for a cluster, role or job in the default web browser.
"""
cluster_name = role = env = job = None
args = args[0].split("/")
if len(args) > 0:
cluster_name = args[0]
if len(args) > 1:
role = args[1]
if len(args) > 2:
env = args[2]
if len(args) > 3:
job = args[3]
else:
# TODO(ksweeney): Remove this after MESOS-2945 is completed.
die('env scheduler pages are not yet implemented, please specify job')
if not cluster_name:
die('cluster is required')
api = make_client(cluster_name)
import webbrowser
webbrowser.open_new_tab(
synthesize_url(api.scheduler_proxy.scheduler_client().url, role, env, job))
@app.command
@app.command_option('--local', dest='local', default=False, action='store_true',
help='Inspect the configuration as would be created by the "spawn" command.')
@app.command_option('--raw', dest='raw', default=False, action='store_true',
help='Show the raw configuration.')
@app.command_option(ENVIRONMENT_BIND_OPTION)
@app.command_option(CLUSTER_CONFIG_OPTION)
@app.command_option(ENV_CONFIG_OPTION)
@app.command_option(JSON_OPTION)
@requires.exactly('cluster/role/env/job', 'config')
def inspect(job_spec, config_file):
"""usage: inspect cluster/role/env/job config
Verifies that a job can be parsed from a configuration file, and displays
the parsed configuration.
"""
options = app.get_options()
config = get_job_config(job_spec, config_file, options)
if options.raw:
print('Parsed job config: %s' % config.job())
return
job_thrift = config.job()
job = config.raw()
job_thrift = config.job()
print('Job level information')
print(' name: %s' % job.name())
print(' role: %s' % job.role())
print(' contact: %s' % job.contact())
print(' cluster: %s' % job.cluster())
print(' instances: %s' % job.instances())
if job.has_cron_schedule():
print(' cron:')
print(' schedule: %s' % job.cron_schedule())
print(' policy: %s' % job.cron_collision_policy())
if job.has_constraints():
print(' constraints:')
for constraint, value in job.constraints().get().items():
print(' %s: %s' % (constraint, value))
print(' service: %s' % job_thrift.taskConfig.isService)
print(' production: %s' % bool(job.production().get()))
print()
task = job.task()
print('Task level information')
print(' name: %s' % task.name())
if len(task.constraints().get()) > 0:
print(' constraints:')
for constraint in task.constraints():
print(' %s' % (' < '.join(st.get() for st in constraint.order())))
print()
processes = task.processes()
for process in processes:
print('Process %s:' % process.name())
if process.daemon().get():
print(' daemon')
if process.ephemeral().get():
print(' ephemeral')
if process.final().get():
print(' final')
print(' cmdline:')
for line in process.cmdline().get().splitlines():
print(' ' + line)
print()
@app.command
@app.command_option(CLUSTER_INVOKE_OPTION)
@app.command_option(OPEN_BROWSER_OPTION)
@app.command_option(DISABLE_HOOKS_OPTION)
def start_cron(args, options):
"""usage: start_cron cluster/role/env/job
Invokes a cron job immediately, out of its normal cron cycle.
This does not affect the cron cycle in any way.
"""
maybe_disable_hooks(options)
api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(
args, options, make_client_factory())
config = get_job_config(job_key.to_path(), config_file, options) if config_file else None
resp = api.start_cronjob(job_key, config=config)
check_and_log_response(resp)
handle_open(api.scheduler_proxy.scheduler_client().url, job_key.role, job_key.env, job_key.name)
@app.command
@app.command_option(
'--pretty',
dest='pretty',
default=False,
action='store_true',
help='Show job information in prettyprinted format')
@app.command_option(
'--show-cron',
'-c',
dest='show_cron_schedule',
default=False,
action='store_true',
help='List jobs registered with the Aurora scheduler')
@requires.exactly('cluster/role')
def list_jobs(cluster_and_role):
"""usage: list_jobs [--show-cron] cluster/role/env/job
Shows all jobs that match the job-spec known by the scheduler.
If --show-cron is specified, then also shows the registered cron schedule.
"""
def show_job_simple(job):
if options.show_cron_schedule:
print(('{0}/{1.key.role}/{1.key.environment}/{1.key.name}' +
'\t\'{1.cronSchedule}\'\t{1.cronCollisionPolicy}').format(cluster, job))
else:
print('{0}/{1.key.role}/{1.key.environment}/{1.key.name}'.format(cluster, job))
def show_job_pretty(job):
print("Job %s/%s/%s/%s:" %
(cluster, job.key.role, job.key.environment, job.key.name))
print('\tcron schedule: %s' % job.cronSchedule)
print('\tcron policy: %s' % job.cronCollisionPolicy)
options = app.get_options()
if options.show_cron_schedule and options.pretty:
print_fn = show_job_pretty
else:
print_fn = show_job_simple
# Take the cluster_and_role parameter, and split it into its two components.
if cluster_and_role.count('/') != 1:
die('list_jobs parameter must be in cluster/role format')
(cluster,role) = cluster_and_role.split('/')
api = make_client(cluster)
resp = api.get_jobs(role)
check_and_log_response(resp)
for job in resp.result.getJobsResult.configs:
print_fn(job)
@app.command
@app.command_option(CLUSTER_INVOKE_OPTION)
@app.command_option(OPEN_BROWSER_OPTION)
@app.command_option(SHARDS_OPTION)
@app.command_option(DISABLE_HOOKS_OPTION)
def kill(args, options):
"""usage: kill --shards=shardspec cluster/role/env/job
Kills a group of tasks in a running job, blocking until all specified tasks have terminated.
"""
maybe_disable_hooks(options)
if options.shards is None:
print('Shards option is required for kill; use killall to kill all shards', file=sys.stderr)
exit(1)
api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(
args, options, make_client_factory())
options = app.get_options()
config = get_job_config(job_key.to_path(), config_file, options) if config_file else None
resp = api.kill_job(job_key, options.shards, config=config)
check_and_log_response(resp)
handle_open(api.scheduler_proxy.scheduler_client().url, job_key.role, job_key.env, job_key.name)
@app.command
@app.command_option(CLUSTER_INVOKE_OPTION)
@app.command_option(OPEN_BROWSER_OPTION)
@app.command_option(DISABLE_HOOKS_OPTION)
def killall(args, options):
"""usage: killall cluster/role/env/job
Kills all tasks in a running job, blocking until all specified tasks have been terminated.
"""
maybe_disable_hooks(options)
job_key = AuroraJobKey.from_path(args[0])
config_file = args[1] if len(args) > 1 else None # the config for hooks
config = get_job_config(job_key.to_path(), config_file, options) if config_file else None
api = make_client(job_key.cluster)
resp = api.kill_job(job_key, None, config=config)
check_and_log_response(resp)
handle_open(api.scheduler_proxy.scheduler_client().url, job_key.role, job_key.env, job_key.name)
@app.command
@app.command_option(CLUSTER_INVOKE_OPTION)
def status(args, options):
"""usage: status cluster/role/env/job
Fetches and prints information about the active tasks in a job.
"""
def is_active(task):
return task.status in ACTIVE_STATES
def print_task(scheduled_task):
assigned_task = scheduled_task.assignedTask
taskInfo = assigned_task.task
taskString = ''
if taskInfo:
taskString += '''cpus: %s, ram: %s MB, disk: %s MB''' % (taskInfo.numCpus,
taskInfo.ramMb,
taskInfo.diskMb)
if assigned_task.assignedPorts:
taskString += '\n\tports: %s' % assigned_task.assignedPorts
taskString += '\n\tfailure count: %s (max %s)' % (scheduled_task.failureCount,
taskInfo.maxTaskFailures)
taskString += '\n\tevents:'
for event in scheduled_task.taskEvents:
taskString += '\n\t\t %s %s: %s' % (datetime.fromtimestamp(event.timestamp / 1000),
ScheduleStatus._VALUES_TO_NAMES[event.status],
event.message)
taskString += '\n\tmetadata:'
if assigned_task.task.metadata is not None:
for md in assigned_task.task.metadata:
taskString += ('\n\t\t%s: %s' % (md.key, md.value))
return taskString
def print_tasks(tasks):
for task in tasks:
taskString = print_task(task)
log.info('role: %s, env: %s, name: %s, shard: %s, status: %s on %s\n%s' %
(task.assignedTask.task.owner.role,
task.assignedTask.task.environment,
task.assignedTask.task.jobName,
task.assignedTask.instanceId,
ScheduleStatus._VALUES_TO_NAMES[task.status],
task.assignedTask.slaveHost,
taskString))
api, job_key, _ = LiveJobDisambiguator.disambiguate_args_or_die(
args, options, make_client_factory())
resp = api.check_status(job_key)
check_and_log_response(resp)
tasks = resp.result.scheduleStatusResult.tasks
if tasks:
active_tasks = filter(is_active, tasks)
log.info('Active Tasks (%s)' % len(active_tasks))
print_tasks(active_tasks)
inactive_tasks = filter(lambda x: not is_active(x), tasks)
log.info('Inactive Tasks (%s)' % len(inactive_tasks))
print_tasks(inactive_tasks)
else:
log.info('No tasks found.')
@app.command
@app.command_option(SHARDS_OPTION)
@app.command_option(ENVIRONMENT_BIND_OPTION)
@app.command_option(CLUSTER_CONFIG_OPTION)
@app.command_option(ENV_CONFIG_OPTION)
@app.command_option(JSON_OPTION)
@app.command_option(HEALTH_CHECK_INTERVAL_SECONDS_OPTION)
@app.command_option(DISABLE_HOOKS_OPTION)
@app.command_option(
'--force',
dest='force',
default=True, # TODO(maximk): Temporary bandaid for MESOS-4310 until a better fix is available.
action='store_true',
help='Turn off warning message that the update looks large enough to be disruptive.')
@requires.exactly('cluster/role/env/job', 'config')
def update(job_spec, config_file):
"""usage: update cluster/role/env/job config
Performs a rolling upgrade on a running job, using the update configuration
within the config file as a control for update velocity and failure tolerance.
Updates are fully controlled client-side, so aborting an update halts the
update and leaves the job in a 'locked' state on the scheduler.
Subsequent update attempts will fail until the update is 'unlocked' using the
'cancel_update' command.
The updater only takes action on shards in a job that have changed, meaning
that changing a single shard will only induce a restart on the changed shard.
You may want to consider using the 'diff' subcommand before updating,
to preview what changes will take effect.
"""
def warn_if_dangerous_change(api, job_spec, config):
# Get the current job status, so that we can check if there's anything
# dangerous about this update.
job_key = AuroraJobKey(config.cluster(), config.role(), config.environment(), config.name())
resp = api.query(api.build_query(config.role(), config.name(),
statuses=ACTIVE_STATES, env=config.environment()))
if resp.responseCode != ResponseCode.OK:
die('Could not get job status from server for comparison: %s' % resp.message)
remote_tasks = [t.assignedTask.task for t in resp.result.scheduleStatusResult.tasks]
resp = api.populate_job_config(config)
if resp.responseCode != ResponseCode.OK:
die('Server could not populate job config for comparison: %s' % resp.message)
local_task_count = len(resp.result.populateJobResult.populated)
remote_task_count = len(remote_tasks)
if (local_task_count >= 4 * remote_task_count or local_task_count <= 4 * remote_task_count
or local_task_count == 0):
print('Warning: this update is a large change. Press ^c within 5 seconds to abort')
time.sleep(5)
options = app.get_options()
maybe_disable_hooks(options)
config = get_job_config(job_spec, config_file, options)
api = make_client(config.cluster())
if not options.force:
warn_if_dangerous_change(api, job_spec, config)
resp = api.update_job(config, options.health_check_interval_seconds, options.shards)
check_and_log_response(resp)
@app.command
@app.command_option(CLUSTER_INVOKE_OPTION)
@app.command_option(HEALTH_CHECK_INTERVAL_SECONDS_OPTION)
@app.command_option(OPEN_BROWSER_OPTION)
@app.command_option(SHARDS_OPTION)
@app.command_option(
'--batch_size',
dest='batch_size',
type=int,
default=1,
help='Number of shards to be restarted in one iteration.')
@app.command_option(
'--max_per_shard_failures',
dest='max_per_shard_failures',
type=int,
default=0,
help='Maximum number of restarts per shard during restart. Increments total failure count when '
'this limit is exceeded.')
@app.command_option(
'--max_total_failures',
dest='max_total_failures',
type=int,
default=0,
help='Maximum number of shard failures to be tolerated in total during restart.')
@app.command_option(
'--restart_threshold',
dest='restart_threshold',
type=int,
default=60,
help='Maximum number of seconds before a shard must move into the RUNNING state before '
'considered a failure.')
@app.command_option(
'--watch_secs',
dest='watch_secs',
type=int,
default=30,
help='Minimum number of seconds a shard must remain in RUNNING state before considered a '
'success.')
@app.command_option(DISABLE_HOOKS_OPTION)
def restart(args, options):
"""usage: restart cluster/role/env/job
[--shards=SHARDS]
[--batch_size=INT]
[--updater_health_check_interval_seconds=SECONDS]
[--max_per_shard_failures=INT]
[--max_total_failures=INT]
[--restart_threshold=INT]
[--watch_secs=SECONDS]
Performs a rolling restart of shards within a job.
Restarts are fully controlled client-side, so aborting halts the restart.
"""
maybe_disable_hooks(options)
api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(
args, options, make_client_factory())
config = get_job_config(job_key.to_path(), config_file, options) if config_file else None
updater_config = UpdaterConfig(
options.batch_size,
options.restart_threshold,
options.watch_secs,
options.max_per_shard_failures,
options.max_total_failures)
resp = api.restart(job_key, options.shards, updater_config,
options.health_check_interval_seconds, config=config)
check_and_log_response(resp)
handle_open(api.scheduler_proxy.scheduler_client().url, job_key.role, job_key.env, job_key.name)
@app.command
@app.command_option(CLUSTER_INVOKE_OPTION)
def cancel_update(args, options):
"""usage: cancel_update cluster/role/env/job
Unlocks a job for updates.
A job may be locked if a client's update session terminated abnormally,
or if another user is actively updating the job. This command should only
be used when the user is confident that they are not conflicting with another user.
"""
api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(
args, options, make_client_factory())
config = get_job_config(job_key.to_path(), config_file, options) if config_file else None
resp = api.cancel_update(job_key, config=config)
check_and_log_response(resp)
@app.command
@app.command_option(CLUSTER_NAME_OPTION)
@requires.exactly('role')
def get_quota(role):
"""usage: get_quota --cluster=CLUSTER role
Prints the production quota that has been allocated to a user.
"""
options = app.get_options()
resp = make_client(options.cluster).get_quota(role)
quota_result = resp.result.getQuotaResult
print_quota(quota_result.quota, 'Total allocated quota', role)
if resp.result.getQuotaResult.prodConsumption:
print_quota(quota_result.prodConsumption,
'Resources consumed by production jobs',
role)
if resp.result.getQuotaResult.nonProdConsumption:
print_quota(quota_result.nonProdConsumption,
'Resources consumed by non-production jobs',
role)
| 36.6823
| 100
| 0.717037
|
71bd1bdce05637c200f5ac627256ed7614659638
| 2,044
|
py
|
Python
|
fugue_analytics/metrics/sources/slack.py
|
kvnkho/fugue-analytics
|
7a8aee2cadd9d91291a71b78de3d367f936ea2ba
|
[
"Apache-2.0"
] | null | null | null |
fugue_analytics/metrics/sources/slack.py
|
kvnkho/fugue-analytics
|
7a8aee2cadd9d91291a71b78de3d367f936ea2ba
|
[
"Apache-2.0"
] | null | null | null |
fugue_analytics/metrics/sources/slack.py
|
kvnkho/fugue-analytics
|
7a8aee2cadd9d91291a71b78de3d367f936ea2ba
|
[
"Apache-2.0"
] | null | null | null |
from prefect import get_run_logger, task
import os
import requests as re
from fugue_analytics.utilities.postgres import execute_query
import pandas as pd
import json
from datetime import date, datetime
@task(retries = 3)
def get_slack_member_count() -> pd.DataFrame:
"""
Slack doesn't seem to have an API to get workspace members so we
need to use Orbit to get the join events and then add it to the
previous total retrieves from the database
This is the sole reason the metrics table needs datetime resolution
"""
token = os.environ["ORBIT_TOKEN"]
current_datetime = datetime.now()
current_date = date.today().strftime("%Y-%m-%d")
url = f"https://app.orbit.love/api/v1/fugue/activities?activity_type=slack%3Achannel%3Ajoined&start_date={current_date}"
headers = {"Accept": "application/json", "Authorization": f"Bearer {token}"}
response = re.get(url, headers=headers)
df = pd.json_normalize(json.loads(response.text)["data"])
print(df.head())
# Pull latest record from database
query = """SELECT source, MAX(datetime), MAX(value)
FROM metrics_over_time
WHERE source = 'Slack Channel Joins'
GROUP BY source
"""
latest = execute_query(query)
print(latest)
latest_time = latest[0][1]
latest_val = latest[0][2]
# Filtering records already counted
# TO-DO: account for datetime
if df.shape[0] > 0:
# Orbit time comes in UTC
filtered_df = df.loc[pd.to_datetime(df["attributes.created_at"]).dt.tz_convert('US/Eastern') >= current_datetime]
if filtered_df.shape[0] > 1:
res = pd.DataFrame({"datetime": pd.to_datetime([current_datetime]),
"source": ["Slack Channel Joins"],
"value": [latest_val + filtered_df.shape[0]]})
return res
return pd.DataFrame()
if __name__ == "__main__":
@flow()
def github_slack_count():
res = get_slack_member_count()
github_slack_count()
| 36.5
| 124
| 0.655088
|
fdf2d3e57c7678254175e02eef8f7afa1e8a6946
| 6,971
|
py
|
Python
|
Lib/idlelib/idle_test/test_text.py
|
djaldave/laevad-python-2.7.18
|
df9aac191d554295db45d638e528880a9ab9a3ec
|
[
"bzip2-1.0.6"
] | 42
|
2018-12-12T01:00:59.000Z
|
2022-03-27T07:32:29.000Z
|
Lib/idlelib/idle_test/test_text.py
|
djaldave/laevad-python-2.7.18
|
df9aac191d554295db45d638e528880a9ab9a3ec
|
[
"bzip2-1.0.6"
] | 13
|
2020-11-06T13:50:45.000Z
|
2022-01-25T07:17:37.000Z
|
Lib/idlelib/idle_test/test_text.py
|
djaldave/laevad-python-2.7.18
|
df9aac191d554295db45d638e528880a9ab9a3ec
|
[
"bzip2-1.0.6"
] | 8
|
2020-11-14T04:30:26.000Z
|
2021-01-16T17:55:19.000Z
|
# Test mock_tk.Text class against tkinter.Text class by running same tests with both.
import unittest
from test.test_support import requires
from _tkinter import TclError
class TextTest(object):
hw = 'hello\nworld' # usual initial insert after initialization
hwn = hw+'\n' # \n present at initialization, before insert
Text = None
def setUp(self):
self.text = self.Text()
def test_init(self):
self.assertEqual(self.text.get('1.0'), '\n')
self.assertEqual(self.text.get('end'), '')
def test_index_empty(self):
index = self.text.index
for dex in (-1.0, 0.3, '1.-1', '1.0', '1.0 lineend', '1.end', '1.33',
'insert'):
self.assertEqual(index(dex), '1.0')
for dex in 'end', 2.0, '2.1', '33.44':
self.assertEqual(index(dex), '2.0')
def test_index_data(self):
index = self.text.index
self.text.insert('1.0', self.hw)
for dex in -1.0, 0.3, '1.-1', '1.0':
self.assertEqual(index(dex), '1.0')
for dex in '1.0 lineend', '1.end', '1.33':
self.assertEqual(index(dex), '1.5')
for dex in 'end', '33.44':
self.assertEqual(index(dex), '3.0')
def test_get(self):
get = self.text.get
Equal = self.assertEqual
self.text.insert('1.0', self.hw)
Equal(get('end'), '')
Equal(get('end', 'end'), '')
Equal(get('1.0'), 'h')
Equal(get('1.0', '1.1'), 'h')
Equal(get('1.0', '1.3'), 'hel')
Equal(get('1.1', '1.3'), 'el')
Equal(get('1.0', '1.0 lineend'), 'hello')
Equal(get('1.0', '1.10'), 'hello')
Equal(get('1.0 lineend'), '\n')
Equal(get('1.1', '2.3'), 'ello\nwor')
Equal(get('1.0', '2.5'), self.hw)
Equal(get('1.0', 'end'), self.hwn)
Equal(get('0.0', '5.0'), self.hwn)
def test_insert(self):
insert = self.text.insert
get = self.text.get
Equal = self.assertEqual
insert('1.0', self.hw)
Equal(get('1.0', 'end'), self.hwn)
insert('1.0', '') # nothing
Equal(get('1.0', 'end'), self.hwn)
insert('1.0', '*')
Equal(get('1.0', 'end'), '*hello\nworld\n')
insert('1.0 lineend', '*')
Equal(get('1.0', 'end'), '*hello*\nworld\n')
insert('2.3', '*')
Equal(get('1.0', 'end'), '*hello*\nwor*ld\n')
insert('end', 'x')
Equal(get('1.0', 'end'), '*hello*\nwor*ldx\n')
insert('1.4', 'x\n')
Equal(get('1.0', 'end'), '*helx\nlo*\nwor*ldx\n')
def test_no_delete(self):
# if index1 == 'insert' or 'end' or >= end, there is no deletion
delete = self.text.delete
get = self.text.get
Equal = self.assertEqual
self.text.insert('1.0', self.hw)
delete('insert')
Equal(get('1.0', 'end'), self.hwn)
delete('end')
Equal(get('1.0', 'end'), self.hwn)
delete('insert', 'end')
Equal(get('1.0', 'end'), self.hwn)
delete('insert', '5.5')
Equal(get('1.0', 'end'), self.hwn)
delete('1.4', '1.0')
Equal(get('1.0', 'end'), self.hwn)
delete('1.4', '1.4')
Equal(get('1.0', 'end'), self.hwn)
def test_delete_char(self):
delete = self.text.delete
get = self.text.get
Equal = self.assertEqual
self.text.insert('1.0', self.hw)
delete('1.0')
Equal(get('1.0', '1.end'), 'ello')
delete('1.0', '1.1')
Equal(get('1.0', '1.end'), 'llo')
# delete \n and combine 2 lines into 1
delete('1.end')
Equal(get('1.0', '1.end'), 'lloworld')
self.text.insert('1.3', '\n')
delete('1.10')
Equal(get('1.0', '1.end'), 'lloworld')
self.text.insert('1.3', '\n')
delete('1.3', '2.0')
Equal(get('1.0', '1.end'), 'lloworld')
def test_delete_slice(self):
delete = self.text.delete
get = self.text.get
Equal = self.assertEqual
self.text.insert('1.0', self.hw)
delete('1.0', '1.0 lineend')
Equal(get('1.0', 'end'), '\nworld\n')
delete('1.0', 'end')
Equal(get('1.0', 'end'), '\n')
self.text.insert('1.0', self.hw)
delete('1.0', '2.0')
Equal(get('1.0', 'end'), 'world\n')
delete('1.0', 'end')
Equal(get('1.0', 'end'), '\n')
self.text.insert('1.0', self.hw)
delete('1.2', '2.3')
Equal(get('1.0', 'end'), 'held\n')
def test_multiple_lines(self): # insert and delete
self.text.insert('1.0', 'hello')
self.text.insert('1.3', '1\n2\n3\n4\n5')
self.assertEqual(self.text.get('1.0', 'end'), 'hel1\n2\n3\n4\n5lo\n')
self.text.delete('1.3', '5.1')
self.assertEqual(self.text.get('1.0', 'end'), 'hello\n')
def test_compare(self):
compare = self.text.compare
Equal = self.assertEqual
# need data so indexes not squished to 1,0
self.text.insert('1.0', 'First\nSecond\nThird\n')
self.assertRaises(TclError, compare, '2.2', 'op', '2.2')
for op, less1, less0, equal, greater0, greater1 in (
('<', True, True, False, False, False),
('<=', True, True, True, False, False),
('>', False, False, False, True, True),
('>=', False, False, True, True, True),
('==', False, False, True, False, False),
('!=', True, True, False, True, True),
):
Equal(compare('1.1', op, '2.2'), less1, op)
Equal(compare('2.1', op, '2.2'), less0, op)
Equal(compare('2.2', op, '2.2'), equal, op)
Equal(compare('2.3', op, '2.2'), greater0, op)
Equal(compare('3.3', op, '2.2'), greater1, op)
class MockTextTest(TextTest, unittest.TestCase):
@classmethod
def setUpClass(cls):
from idlelib.idle_test.mock_tk import Text
cls.Text = Text
def test_decode(self):
# test endflags (-1, 0) not tested by test_index (which uses +1)
decode = self.text._decode
Equal = self.assertEqual
self.text.insert('1.0', self.hw)
Equal(decode('end', -1), (2, 5))
Equal(decode('3.1', -1), (2, 5))
Equal(decode('end', 0), (2, 6))
Equal(decode('3.1', 0), (2, 6))
class TkTextTest(TextTest, unittest.TestCase):
@classmethod
def setUpClass(cls):
requires('gui')
from Tkinter import Tk, Text
cls.Text = Text
cls.root = Tk()
@classmethod
def tearDownClass(cls):
cls.root.destroy()
del cls.root
if __name__ == '__main__':
unittest.main(verbosity=2, exit=False)
| 30.574561
| 86
| 0.491178
|
740b83b2de4a7ef2eabbb70f29acc28011adf3a7
| 1,902
|
py
|
Python
|
car_autohome/car_autohome/pipelines.py
|
aircov/Scrapy_ImagePipeline_Spider
|
d0bbf78ddda8031da836c0b9eb8a487f7151c1be
|
[
"MIT"
] | 4
|
2018-12-15T01:51:25.000Z
|
2019-02-13T14:18:36.000Z
|
car_autohome/car_autohome/pipelines.py
|
aircov/Scrapy_ImagePipeline_Spider
|
d0bbf78ddda8031da836c0b9eb8a487f7151c1be
|
[
"MIT"
] | null | null | null |
car_autohome/car_autohome/pipelines.py
|
aircov/Scrapy_ImagePipeline_Spider
|
d0bbf78ddda8031da836c0b9eb8a487f7151c1be
|
[
"MIT"
] | 1
|
2019-09-07T17:49:45.000Z
|
2019-09-07T17:49:45.000Z
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import re
from scrapy import Request
from scrapy.exceptions import DropItem
from scrapy.pipelines.images import ImagesPipeline
import logging
logger = logging.getLogger("CarAutohomeImagePipeline")
class CarAutohomePipeline(object):
def process_item(self, item, spider):
return item
# 重写ImagePipeline
class CarAutohomeImagePipeline(ImagesPipeline):
def get_media_requests(self, item, info):
"""
这个方法是在发送下载请求之前调用
其实这个方法本身就是去发送下载请求的
:param item: spider.py中返回的item
:param info:
:return:
"""
for url in item["image_urls"]:
# meta里面的数据是从spider获取,然后通过meta传递给下面方法:file_path
yield Request(url, meta={"item": item})
def file_path(self, request, response=None, info=None):
"""
分类保存
:param request: 每一个图片下载管道请求
:param response:
:param info:
:return: 每套图的分类目录
strip:清洗Windows系统的文件夹非法字符,避免无法创建目录
"""
# 接收上面meta传递过来的图片名称
item = request.meta["item"]
folder = item["category"]
# 清洗掉Windows系统非法文件夹名字的字符串,不经过这么一个步骤,会有乱码或无法下载
folder_strip = re.sub(r"[?\\*|“<>:/]", "", str(folder))
# 提取url后面的名称作为图片名
image_guid = request.url.split("/")[-1]
filename = u'{0}/{1}'.format(folder_strip, image_guid)
return filename
def item_completed(self, results, item, info):
"""
文件下载完成之后,返回一个列表 results
列表中是一个元组,第一个值是布尔值,请求成功会失败,第二个值的下载到的资源
"""
if not results[0][0]:
# 如果下载失败,就抛出异常,并丢弃这个item
# 被丢弃的item将不会被之后的pipeline组件所处理
raise DropItem('下载失败')
# 打印日志
logger.debug('下载图片成功')
return item
| 27.565217
| 65
| 0.625657
|
ba99479a817b83768bf349a92215c38680824b47
| 1,546
|
py
|
Python
|
src/models_trajGRU/utils.py
|
inoue0406/adversarial-nowcasting
|
431f6bc4b7d731e85ca52f1bf81638b31c4be17e
|
[
"MIT"
] | 2
|
2021-03-09T06:15:07.000Z
|
2022-03-03T15:40:18.000Z
|
src/models_trajGRU/utils.py
|
inoue0406/regional-analysis-paper
|
672b16d3a3178e258a6057ebf2d89ed7cf4311c1
|
[
"MIT"
] | null | null | null |
src/models_trajGRU/utils.py
|
inoue0406/regional-analysis-paper
|
672b16d3a3178e258a6057ebf2d89ed7cf4311c1
|
[
"MIT"
] | null | null | null |
import numpy as np
from torch import nn
from collections import OrderedDict
def make_layers(block):
layers = []
for layer_name, v in block.items():
if 'pool' in layer_name:
layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1],
padding=v[2])
layers.append((layer_name, layer))
elif 'deconv' in layer_name:
transposeConv2d = nn.ConvTranspose2d(in_channels=v[0], out_channels=v[1],
kernel_size=v[2], stride=v[3],
padding=v[4])
layers.append((layer_name, transposeConv2d))
if 'relu' in layer_name:
layers.append(('relu_' + layer_name, nn.ReLU(inplace=True)))
elif 'leaky' in layer_name:
layers.append(('leaky_' + layer_name, nn.LeakyReLU(negative_slope=0.2, inplace=True)))
elif 'conv' in layer_name:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
kernel_size=v[2], stride=v[3],
padding=v[4])
layers.append((layer_name, conv2d))
if 'relu' in layer_name:
layers.append(('relu_' + layer_name, nn.ReLU(inplace=True)))
elif 'leaky' in layer_name:
layers.append(('leaky_' + layer_name, nn.LeakyReLU(negative_slope=0.2, inplace=True)))
else:
raise NotImplementedError
return nn.Sequential(OrderedDict(layers))
| 44.171429
| 102
| 0.539457
|
dee048cc52e4d3b1b410257d880daf38902e105f
| 4,273
|
py
|
Python
|
azure-quantum/tests/unit/test_authentication.py
|
microsoft/qdk-python
|
d91a3a670174de5270f6ac11f9dc51af7270bb4e
|
[
"MIT"
] | 53
|
2021-01-21T23:38:09.000Z
|
2022-03-29T16:34:42.000Z
|
azure-quantum/tests/unit/test_authentication.py
|
microsoft/qdk-python
|
d91a3a670174de5270f6ac11f9dc51af7270bb4e
|
[
"MIT"
] | 152
|
2021-01-23T07:01:49.000Z
|
2022-03-31T19:43:21.000Z
|
azure-quantum/tests/unit/test_authentication.py
|
microsoft/qdk-python
|
d91a3a670174de5270f6ac11f9dc51af7270bb4e
|
[
"MIT"
] | 47
|
2021-01-30T20:15:46.000Z
|
2022-03-25T23:35:28.000Z
|
#!/bin/env python
# -*- coding: utf-8 -*-
##
# test_authentication.py: Checks correctness of azure.quantum._authentication module.
##
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
##
import json
import os
from pathlib import Path
import time
import pytest
from unittest.mock import patch
from azure.identity import CredentialUnavailableError
from azure.quantum._authentication import _TokenFileCredential
from .common import QuantumTestBase
_AZURE_QUANTUM_SCOPE = "https://quantum.microsoft.com/.default"
class TestWorkspace(QuantumTestBase):
def test_azure_quantum_token_credential_file_not_set(self):
credential = _TokenFileCredential()
with pytest.raises(CredentialUnavailableError) as exception:
credential.get_token(_AZURE_QUANTUM_SCOPE)
assert "Token file location not set." in str(exception.value)
def test_azure_quantum_token_credential_file_not_exists(self):
with patch.dict(os.environ, { "AZURE_QUANTUM_TOKEN_FILE": "fake_file_path" }, clear=True):
with patch('os.path.isfile') as mock_isfile:
mock_isfile.return_value = False
credential = _TokenFileCredential()
with pytest.raises(CredentialUnavailableError) as exception:
credential.get_token(_AZURE_QUANTUM_SCOPE)
assert "Token file at fake_file_path does not exist." in str(exception.value)
def test_azure_quantum_token_credential_file_invalid_json(self):
tmpdir = self.create_temp_dir()
file = Path(tmpdir) / "token.json"
file.write_text("not a json")
with patch.dict(os.environ, { "AZURE_QUANTUM_TOKEN_FILE": str(file.resolve()) }, clear=True):
credential = _TokenFileCredential()
with pytest.raises(CredentialUnavailableError) as exception:
credential.get_token(_AZURE_QUANTUM_SCOPE)
assert "Failed to parse token file: Invalid JSON." in str(exception.value)
def test_azure_quantum_token_credential_file_missing_expires_on(self):
content = {
"access_token": "fake_token",
}
tmpdir = self.create_temp_dir()
file = Path(tmpdir) / "token.json"
file.write_text(json.dumps(content))
with patch.dict(os.environ, { "AZURE_QUANTUM_TOKEN_FILE": str(file.resolve()) }, clear=True):
credential = _TokenFileCredential()
with pytest.raises(CredentialUnavailableError) as exception:
credential.get_token(_AZURE_QUANTUM_SCOPE)
assert "Failed to parse token file: Missing expected value: 'expires_on'" in str(exception.value)
def test_azure_quantum_token_credential_file_token_expired(self):
content = {
"access_token": "fake_token",
"expires_on": 1628543125086 # Matches timestamp in error message below
}
tmpdir = self.create_temp_dir()
file = Path(tmpdir) / "token.json"
file.write_text(json.dumps(content))
with patch.dict(os.environ, { "AZURE_QUANTUM_TOKEN_FILE": str(file.resolve()) }, clear=True):
credential = _TokenFileCredential()
with pytest.raises(CredentialUnavailableError) as exception:
credential.get_token(_AZURE_QUANTUM_SCOPE)
assert "Token already expired at Mon Aug 9 21:05:25 2021" in str(exception.value)
def test_azure_quantum_token_credential_file_valid_token(self):
one_hour_ahead = time.time() + 60*60
content = {
"access_token": "fake_token",
"expires_on": one_hour_ahead * 1000 # Convert to milliseconds
}
tmpdir = self.create_temp_dir()
file = Path(tmpdir) / "token.json"
file.write_text(json.dumps(content))
with patch.dict(os.environ, { "AZURE_QUANTUM_TOKEN_FILE": str(file.resolve()) }, clear=True):
credential = _TokenFileCredential()
token = credential.get_token(_AZURE_QUANTUM_SCOPE)
assert token.token == "fake_token"
assert token.expires_on == pytest.approx(one_hour_ahead)
| 43.161616
| 109
| 0.662766
|
39d9d5240a8bf8a65d9ee7421d6f9c740e4ce6c6
| 5,837
|
py
|
Python
|
torchkge/evaluation/triplet_classification.py
|
MacOS/torchkge
|
89ed724368f3a5279c0f79c6ba1f948ed2a5696f
|
[
"BSD-3-Clause"
] | 248
|
2019-04-03T10:04:43.000Z
|
2022-03-30T13:01:51.000Z
|
torchkge/evaluation/triplet_classification.py
|
MacOS/torchkge
|
89ed724368f3a5279c0f79c6ba1f948ed2a5696f
|
[
"BSD-3-Clause"
] | 52
|
2019-04-04T05:54:35.000Z
|
2022-03-02T17:18:15.000Z
|
torchkge/evaluation/triplet_classification.py
|
MacOS/torchkge
|
89ed724368f3a5279c0f79c6ba1f948ed2a5696f
|
[
"BSD-3-Clause"
] | 41
|
2019-06-05T08:08:00.000Z
|
2022-03-26T09:18:05.000Z
|
# -*- coding: utf-8 -*-
"""
Copyright TorchKGE developers
@author: Armand Boschin <aboschin@enst.fr>
"""
from torch import zeros, cat
from ..data_structures import SmallKG
from ..sampling import PositionalNegativeSampler
from ..utils import DataLoader
class TripletClassificationEvaluator(object):
"""Evaluate performance of given embedding using triplet classification
method.
References
----------
* Richard Socher, Danqi Chen, Christopher D Manning, and Andrew Ng.
Reasoning With Neural Tensor Networks for Knowledge Base Completion.
In Advances in Neural Information Processing Systems 26, pages 926–934.
2013.
https://nlp.stanford.edu/pubs/SocherChenManningNg_NIPS2013.pdf
Parameters
----------
model: torchkge.models.interfaces.Model
Embedding model inheriting from the right interface.
kg_val: torchkge.data_structures.KnowledgeGraph
Knowledge graph on which the validation thresholds will be computed.
kg_test: torchkge.data_structures.KnowledgeGraph
Knowledge graph on which the testing evaluation will be done.
Attributes
----------
model: torchkge.models.interfaces.Model
Embedding model inheriting from the right interface.
kg_val: torchkge.data_structures.KnowledgeGraph
Knowledge graph on which the validation thresholds will be computed.
kg_test: torchkge.data_structures.KnowledgeGraph
Knowledge graph on which the evaluation will be done.
evaluated: bool
Indicate whether the `evaluate` function has been called.
thresholds: float
Float value of the thresholds for the scoring function to consider a
triplet as true. It is defined by calling the `evaluate` method.
sampler: torchkge.sampling.NegativeSampler
Negative sampler.
"""
def __init__(self, model, kg_val, kg_test):
self.model = model
self.kg_val = kg_val
self.kg_test = kg_test
self.is_cuda = next(self.model.parameters()).is_cuda
self.evaluated = False
self.thresholds = None
self.sampler = PositionalNegativeSampler(self.kg_val,
kg_test=self.kg_test)
def get_scores(self, heads, tails, relations, batch_size):
"""With head, tail and relation indexes, compute the value of the
scoring function of the model.
Parameters
----------
heads: torch.Tensor, dtype: torch.long, shape: n_facts
List of heads indices.
tails: torch.Tensor, dtype: torch.long, shape: n_facts
List of tails indices.
relations: torch.Tensor, dtype: torch.long, shape: n_facts
List of relation indices.
batch_size: int
Returns
-------
scores: torch.Tensor, dtype: torch.float, shape: n_facts
List of scores of each triplet.
"""
scores = []
small_kg = SmallKG(heads, tails, relations)
if self.is_cuda:
dataloader = DataLoader(small_kg, batch_size=batch_size,
use_cuda='batch')
else:
dataloader = DataLoader(small_kg, batch_size=batch_size)
for i, batch in enumerate(dataloader):
h_idx, t_idx, r_idx = batch[0], batch[1], batch[2]
scores.append(self.model.scoring_function(h_idx, t_idx, r_idx))
return cat(scores, dim=0)
def evaluate(self, b_size):
"""Find relation thresholds using the validation set. As described in
the paper by Socher et al., for a relation, the threshold is a value t
such that if the score of a triplet is larger than t, the fact is true.
If a relation is not present in any fact of the validation set, then
the largest value score of all negative samples is used as threshold.
Parameters
----------
b_size: int
Batch size.
"""
r_idx = self.kg_val.relations
neg_heads, neg_tails = self.sampler.corrupt_kg(b_size, self.is_cuda,
which='main')
neg_scores = self.get_scores(neg_heads, neg_tails, r_idx, b_size)
self.thresholds = zeros(self.kg_val.n_rel)
for i in range(self.kg_val.n_rel):
mask = (r_idx == i).bool()
if mask.sum() > 0:
self.thresholds[i] = neg_scores[mask].max()
else:
self.thresholds[i] = neg_scores.max()
self.evaluated = True
self.thresholds.detach_()
def accuracy(self, b_size):
"""
Parameters
----------
b_size: int
Batch size.
Returns
-------
acc: float
Share of all triplets (true and negatively sampled ones) that where
correctly classified using the thresholds learned from the
validation set.
"""
if not self.evaluated:
self.evaluate(b_size)
r_idx = self.kg_test.relations
neg_heads, neg_tails = self.sampler.corrupt_kg(b_size,
self.is_cuda,
which='test')
scores = self.get_scores(self.kg_test.head_idx,
self.kg_test.tail_idx,
r_idx,
b_size)
neg_scores = self.get_scores(neg_heads, neg_tails, r_idx, b_size)
if self.is_cuda:
self.thresholds = self.thresholds.cuda()
scores = (scores > self.thresholds[r_idx])
neg_scores = (neg_scores < self.thresholds[r_idx])
return (scores.sum().item() +
neg_scores.sum().item()) / (2 * self.kg_test.n_facts)
| 34.952096
| 79
| 0.602707
|
429b64ec389521d82f48774d374dde223009d732
| 4,514
|
py
|
Python
|
docs/source/conf.py
|
harrywang/oauthenticator
|
ae4b260c9bbcc66040fd55ff1b14c8f2fc9346e4
|
[
"BSD-3-Clause"
] | 1
|
2021-04-17T07:21:29.000Z
|
2021-04-17T07:21:29.000Z
|
docs/source/conf.py
|
harrywang/oauthenticator
|
ae4b260c9bbcc66040fd55ff1b14c8f2fc9346e4
|
[
"BSD-3-Clause"
] | 1
|
2022-02-21T19:13:12.000Z
|
2022-02-21T19:13:12.000Z
|
docs/source/conf.py
|
harrywang/oauthenticator
|
ae4b260c9bbcc66040fd55ff1b14c8f2fc9346e4
|
[
"BSD-3-Clause"
] | 2
|
2021-04-12T15:10:41.000Z
|
2021-04-23T13:42:41.000Z
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from subprocess import check_call
source = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'OAuthenticator'
copyright = 'Jupyter Contributors'
author = 'Jupyter Contributors'
master_doc = 'index'
import oauthenticator
# The short X.Y version.
version = '%i.%i' % oauthenticator.version_info[:2]
# The full version, including alpha/beta/rc tags.
release = oauthenticator.__version__
# -- generate autodoc classes from entrypoints
from collections import defaultdict
import entrypoints
import jinja2
def render_autodoc_modules():
authenticator_entrypoints = entrypoints.get_group_named(
"jupyterhub.authenticators"
).values()
api = os.path.join(source, "api")
api_gen = os.path.join(api, "gen")
# modules is a dict of dicts of lists
# { '$module': { 'classes': [...], 'configurables': [...] } }
modules = defaultdict(lambda : defaultdict(list))
# pre-load base classes
modules['oauthenticator.oauth2'] = {
'classes': [
'OAuthLoginHandler',
'OAuthCallbackHandler',
],
'configurables': [
'OAuthenticator',
],
}
# load Authenticator classes from entrypoints
for ep in authenticator_entrypoints:
if ep.module_name and ep.module_name.startswith('oauthenticator.'):
modules[ep.module_name]['configurables'].append(ep.object_name)
with open(os.path.join(api, "authenticator.rst.tpl")) as f:
tpl = jinja2.Template(f.read())
try:
os.makedirs(os.path.join(api_gen))
except FileExistsError:
pass
for mod, mod_content in modules.items():
dest = os.path.join(api_gen, mod + ".rst")
print(
"Autogenerating module documentation in {} with classes: {}".format(
dest, mod_content
)
)
with open(dest, "w") as f:
f.write(tpl.render(module=mod, **mod_content))
# render the module index
with open(os.path.join(api, "index.rst.tpl")) as f:
index_tpl = jinja2.Template(f.read())
with open(os.path.join(api, "index.rst"), "w") as f:
f.write(index_tpl.render(modules=modules))
render_autodoc_modules()
autodoc_mock_imports = ["tornado", "jwt", "mwoauth", "globus_sdk"]
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'autodoc_traits',
'sphinx_copybutton',
'recommonmark',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
import recommonmark
from recommonmark.transform import AutoStructify
def setup(app):
app.add_config_value('recommonmark_config', {'enable_eval_rst': True}, True)
app.add_stylesheet('custom.css')
app.add_transform(AutoStructify)
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pydata_sphinx_theme'
html_logo = '_static/images/logo/logo.png'
html_favicon = '_static/images/logo/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 29.89404
| 80
| 0.659947
|
d776ab07b30f03032fed349062c3c68dbb8fcf07
| 453,350
|
py
|
Python
|
test/onnx/test_pytorch_onnx_onnxruntime.py
|
xuhancn/pytorch
|
5c7d916c3d287f6c86f4d59ca1e2b8cc4cd9cd3e
|
[
"Intel"
] | null | null | null |
test/onnx/test_pytorch_onnx_onnxruntime.py
|
xuhancn/pytorch
|
5c7d916c3d287f6c86f4d59ca1e2b8cc4cd9cd3e
|
[
"Intel"
] | null | null | null |
test/onnx/test_pytorch_onnx_onnxruntime.py
|
xuhancn/pytorch
|
5c7d916c3d287f6c86f4d59ca1e2b8cc4cd9cd3e
|
[
"Intel"
] | null | null | null |
# Owner(s): ["module: onnx"]
import copy
import io
import itertools
import os
import random
import unittest
from collections import OrderedDict
from typing import Dict, List, Optional, Tuple, Union
import model_defs.word_language_model as word_language_model
import numpy as np
import onnx
import onnxruntime
import torchvision
from model_defs.lstm_flattening_result import (
LstmFlatteningResultWithoutSeqLength,
LstmFlatteningResultWithSeqLength,
)
from model_defs.rnn_model_with_packed_sequence import (
RnnModelWithPackedSequence,
RnnModelWithPackedSequenceWithoutState,
RnnModelWithPackedSequenceWithState,
)
from test_pytorch_common import (
BATCH_SIZE,
RNN_BATCH_SIZE,
RNN_HIDDEN_SIZE,
RNN_INPUT_SIZE,
RNN_SEQUENCE_LENGTH,
skipIfNoLapack,
skipIfUnsupportedMaxOpsetVersion,
skipIfUnsupportedMinOpsetVersion,
skipIfUnsupportedOpsetVersion,
skipScriptTest,
)
from torchvision import ops
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor, TwoMLPHead
from torchvision.models.detection.image_list import ImageList
from torchvision.models.detection.roi_heads import RoIHeads
from torchvision.models.detection.rpn import (
AnchorGenerator,
RegionProposalNetwork,
RPNHead,
)
from torchvision.models.detection.transform import GeneralizedRCNNTransform
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.nn.utils import rnn as rnn_utils
from torch.nn.utils.rnn import PackedSequence
from torch.onnx import (
CheckerError,
register_custom_op_symbolic,
unregister_custom_op_symbolic,
)
from torch.onnx.symbolic_helper import _unimplemented
from torch.onnx.utils import unpack_quantized_tensor
_ORT_PROVIDERS = ["CPUExecutionProvider"]
def flatten_tuples(elem):
flattened = []
for t in elem:
if isinstance(t, tuple):
flattened.extend(flatten_tuples(t))
else:
flattened.append(t)
return flattened
def to_numpy(elem):
if isinstance(elem, Tensor):
if elem.requires_grad:
return elem.detach().cpu().numpy()
else:
return elem.cpu().numpy()
elif isinstance(elem, (list, tuple)):
return [to_numpy(inp) for inp in elem]
elif isinstance(elem, bool):
return np.array(elem, dtype=bool)
elif isinstance(elem, int):
return np.array(elem, dtype=int)
elif isinstance(elem, float):
return np.array(elem, dtype=float)
elif isinstance(elem, dict):
flattened = []
for k in elem:
flattened += [to_numpy(k)] + [to_numpy(elem[k])]
return flattened
return elem
def convert_to_onnx(
model,
input=None,
opset_version=9,
do_constant_folding=True,
keep_initializers_as_inputs=True,
dynamic_axes=None,
input_names=None,
output_names=None,
fixed_batch_size=False,
training=None,
verbose=False,
):
f = io.BytesIO()
input_copy = copy.deepcopy(input)
torch.onnx._export(
model,
input_copy,
f,
opset_version=opset_version,
do_constant_folding=do_constant_folding,
keep_initializers_as_inputs=keep_initializers_as_inputs,
dynamic_axes=dynamic_axes,
input_names=input_names,
output_names=output_names,
fixed_batch_size=fixed_batch_size,
training=training,
verbose=verbose,
)
# compute onnxruntime output prediction
so = onnxruntime.SessionOptions()
# suppress ort warnings.
# 0:Verbose, 1:Info, 2:Warning. 3:Error, 4:Fatal. Default is 2.
so.log_severity_level = 3
ort_sess = onnxruntime.InferenceSession(f.getvalue(), so, providers=_ORT_PROVIDERS)
return ort_sess
def inline_flatten_list(inputs, res_list):
for i in inputs:
res_list.append(i) if not isinstance(i, (list, tuple)) else inline_flatten_list(
i, res_list
)
return res_list
def unpack_to_numpy(values):
value_unpacked = []
for value in values:
value_unpacked.extend(unpack_quantized_tensor(value))
return [to_numpy(v) for v in value_unpacked]
def run_ort(ort_sess, inputs):
kw_inputs = {}
if inputs and isinstance(inputs[-1], dict):
kw_inputs = inputs[-1]
inputs = inputs[:-1]
inputs = unpack_to_numpy(flatten_tuples(inputs))
ort_inputs = {}
for input_name, input in kw_inputs.items():
ort_inputs[input_name] = to_numpy(input)
inputs = to_numpy(inputs)
ort_sess_inputs = ort_sess.get_inputs()
for i, input in enumerate(inputs):
if i == len(ort_sess_inputs) or ort_sess_inputs[i].name in ort_inputs:
raise ValueError(
f"got too many positional inputs. inputs: {inputs}. kw_inputs: {kw_inputs}"
)
ort_inputs[ort_sess_inputs[i].name] = input
ort_outs = ort_sess.run(None, ort_inputs)
return inline_flatten_list(ort_outs, [])
def ort_compare_with_pytorch(ort_outs, output, rtol, atol):
output, _ = torch.jit._flatten(output)
outputs = unpack_to_numpy(output)
# compare onnxruntime and PyTorch results
assert len(outputs) == len(ort_outs), "number of outputs differ"
# compare onnxruntime and PyTorch results
[
np.testing.assert_allclose(out, ort_out, rtol=rtol, atol=atol)
for out, ort_out in zip(outputs, ort_outs)
]
def run_model_test(
self,
model,
batch_size=2,
state_dict=None,
input=None,
use_gpu=True,
rtol=0.001,
atol=1e-7,
do_constant_folding=True,
dynamic_axes=None,
test_with_inputs=None,
input_names=None,
output_names=None,
fixed_batch_size=False,
dict_check=True,
training=None,
remained_onnx_input_idx=None,
flatten=True,
verbose=False,
):
if training is not None and training == torch.onnx.TrainingMode.TRAINING:
model.train()
elif training is None or training == torch.onnx.TrainingMode.EVAL:
model.eval()
if input is None:
input = torch.randn(batch_size, 3, 224, 224, requires_grad=True)
with torch.no_grad():
if isinstance(input, (Tensor, dict)):
input = (input,)
# In-place operators will update input tensor data as well.
# Thus inputs are replicated before every forward call.
input_args = copy.deepcopy(input)
input_kwargs = {}
if dict_check and isinstance(input_args[-1], dict):
input_kwargs = input_args[-1]
input_args = input_args[:-1]
try:
model_copy = copy.deepcopy(model)
output = model_copy(*input_args, **input_kwargs)
except Exception:
output = model(*input_args, **input_kwargs)
if isinstance(output, Tensor):
output = (output,)
if not dict_check and isinstance(input[-1], dict):
input = input + ({},)
ort_sess = convert_to_onnx(
model,
input=input,
opset_version=self.opset_version,
do_constant_folding=do_constant_folding,
keep_initializers_as_inputs=self.keep_initializers_as_inputs,
dynamic_axes=dynamic_axes,
input_names=input_names,
output_names=output_names,
fixed_batch_size=fixed_batch_size,
training=training,
verbose=verbose,
)
# compute onnxruntime output prediction
if remained_onnx_input_idx is not None:
input_onnx = []
for idx in remained_onnx_input_idx:
input_onnx.append(input[idx])
input = input_onnx
input_copy = copy.deepcopy(input)
if flatten:
input_copy, _ = torch.jit._flatten(input_copy)
elif input_copy and input_copy[-1] == {}:
# Handle empty kwargs (normally removed by flatten).
input_copy = input_copy[:-1]
ort_outs = run_ort(ort_sess, input_copy)
ort_compare_with_pytorch(ort_outs, output, rtol, atol)
# if additional test inputs are provided run the onnx
# model with these inputs and check the outputs
if test_with_inputs is not None:
for test_input in test_with_inputs:
if isinstance(test_input, Tensor):
test_input = (test_input,)
test_input_copy = copy.deepcopy(test_input)
output = model(*test_input_copy)
if isinstance(output, Tensor):
output = (output,)
if remained_onnx_input_idx is not None:
test_input_onnx = []
for idx in remained_onnx_input_idx:
test_input_onnx.append(test_input[idx])
test_input = test_input_onnx
if flatten:
test_input, _ = torch.jit._flatten(test_input)
ort_outs = run_ort(ort_sess, test_input)
ort_compare_with_pytorch(ort_outs, output, rtol, atol)
def _init_test_generalized_rcnn_transform():
min_size = 100
max_size = 200
image_mean = [0.485, 0.456, 0.406]
image_std = [0.229, 0.224, 0.225]
transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std)
return transform
def _init_test_rpn():
anchor_sizes = ((32,), (64,), (128,), (256,), (512,))
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
rpn_anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios)
out_channels = 256
rpn_head = RPNHead(out_channels, rpn_anchor_generator.num_anchors_per_location()[0])
rpn_fg_iou_thresh = 0.7
rpn_bg_iou_thresh = 0.3
rpn_batch_size_per_image = 256
rpn_positive_fraction = 0.5
rpn_pre_nms_top_n = dict(training=2000, testing=1000)
rpn_post_nms_top_n = dict(training=2000, testing=1000)
rpn_nms_thresh = 0.7
rpn_score_thresh = 0.0
rpn = RegionProposalNetwork(
rpn_anchor_generator,
rpn_head,
rpn_fg_iou_thresh,
rpn_bg_iou_thresh,
rpn_batch_size_per_image,
rpn_positive_fraction,
rpn_pre_nms_top_n,
rpn_post_nms_top_n,
rpn_nms_thresh,
score_thresh=rpn_score_thresh,
)
return rpn
def _init_test_roi_heads_faster_rcnn():
out_channels = 256
num_classes = 91
box_fg_iou_thresh = 0.5
box_bg_iou_thresh = 0.5
box_batch_size_per_image = 512
box_positive_fraction = 0.25
bbox_reg_weights = None
box_score_thresh = 0.05
box_nms_thresh = 0.5
box_detections_per_img = 100
box_roi_pool = ops.MultiScaleRoIAlign(
featmap_names=["0", "1", "2", "3"], output_size=7, sampling_ratio=2
)
resolution = box_roi_pool.output_size[0]
representation_size = 1024
box_head = TwoMLPHead(out_channels * resolution**2, representation_size)
representation_size = 1024
box_predictor = FastRCNNPredictor(representation_size, num_classes)
roi_heads = RoIHeads(
box_roi_pool,
box_head,
box_predictor,
box_fg_iou_thresh,
box_bg_iou_thresh,
box_batch_size_per_image,
box_positive_fraction,
bbox_reg_weights,
box_score_thresh,
box_nms_thresh,
box_detections_per_img,
)
return roi_heads
def _construct_tensor_for_quantization_test(
shape: Tuple[int, ...],
offset: Optional[Union[int, float]] = None,
max_val: Optional[Union[int, float]] = None,
) -> torch.Tensor:
"""Helper function to generate weights and test inputs in a deterministic way.
Due to difference in implementation details between PyTorch and ONNXRuntime, randomly generated
test data for quantization tests can be flaky. To help stablize the test, this helper function is
used to generate weights and test inputs in a deterministic way.
Args:
shape (Tuple[int]): Shape for tensor to construct.
offset (Optional[Union[int, float]]): Offset to be added to the generated tensor.
max_val (Optional[Union[int, float]]): If any element within tensor has a larger absolute value than
max_val, the tensor will be scaled by max_val / tensor.abs().max(). This step is done after
applying offset.
"""
tensor = torch.arange(np.prod(shape), dtype=torch.float).view(shape)
if offset is not None:
tensor = tensor + offset
if max_val is not None and tensor.abs().max() > max_val:
tensor = tensor * max_val / tensor.abs().max()
return tensor
def set_rng_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
class _TestONNXRuntime:
"""Abstract base class for test cases.
Intentionally not a sub-class of unittest.TestCase so that unittest / pytest
don't run it directly. unitest.TestCase is mixed in as another base class when
creating concrete sub-types. See MakeTestCase().
"""
opset_version = -1 # Sub-classes must override
keep_initializers_as_inputs = True # For IR version 3 type export.
def setUp(self):
torch.manual_seed(0)
onnxruntime.set_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
np.random.seed(seed=0)
os.environ["ALLOW_RELEASED_ONNX_OPSET_ONLY"] = "0"
self.is_script_test_enabled = True
# The exported ONNX model may have less inputs than the pytorch model because of const folding.
# This mostly happens in unit test, where we widely use torch.size or torch.shape.
# So the output is only dependent on the input shape, not value.
# remained_onnx_input_idx is used to indicate which pytorch model input idx is remained in ONNX model.
def run_test(
self,
model,
input,
rtol=1e-3,
atol=1e-7,
do_constant_folding=True,
batch_size=2,
use_gpu=True,
dynamic_axes=None,
test_with_inputs=None,
input_names=None,
output_names=None,
fixed_batch_size=False,
dict_check=True,
training=None,
remained_onnx_input_idx=None,
verbose=False,
):
def _run_test(m, remained_onnx_input_idx, flatten=True):
return run_model_test(
self,
m,
batch_size=batch_size,
input=input,
use_gpu=use_gpu,
rtol=rtol,
atol=atol,
do_constant_folding=do_constant_folding,
dynamic_axes=dynamic_axes,
test_with_inputs=test_with_inputs,
input_names=input_names,
output_names=output_names,
fixed_batch_size=fixed_batch_size,
dict_check=dict_check,
training=training,
remained_onnx_input_idx=remained_onnx_input_idx,
flatten=flatten,
verbose=verbose,
)
if isinstance(remained_onnx_input_idx, dict):
scripting_remained_onnx_input_idx = remained_onnx_input_idx["scripting"]
tracing_remained_onnx_input_idx = remained_onnx_input_idx["tracing"]
else:
scripting_remained_onnx_input_idx = remained_onnx_input_idx
tracing_remained_onnx_input_idx = remained_onnx_input_idx
is_script = isinstance(
model, (torch.jit.ScriptModule, torch.jit.ScriptFunction)
)
if self.is_script_test_enabled:
script_model = model if is_script else torch.jit.script(model)
_run_test(script_model, scripting_remained_onnx_input_idx, flatten=False)
if not is_script:
_run_test(model, tracing_remained_onnx_input_idx)
def run_model_test_with_external_data(
self,
model,
input,
rtol=0.001,
atol=1e-7,
do_constant_folding=True,
dynamic_axes=None,
input_names=None,
output_names=None,
ort_optim_on=True,
training=None,
):
import os
import tempfile
if training is not None and training == torch.onnx.TrainingMode.TRAINING:
model.train()
elif training is None or training == torch.onnx.TrainingMode.EVAL:
model.eval()
with torch.no_grad():
if isinstance(input, Tensor):
input = (input,)
# In-place operators will update input tensor data as well.
# Thus inputs are replicated before every forward call.
input_copy = copy.deepcopy(input)
output = model(*input_copy)
if isinstance(output, Tensor):
output = (output,)
# export the model to ONNX
with tempfile.TemporaryDirectory() as tmpdirname:
model_file_name = os.path.join(tmpdirname, "model.onnx")
input_copy = copy.deepcopy(input)
torch.onnx.export(
model,
input_copy,
model_file_name,
opset_version=self.opset_version,
verbose=False,
do_constant_folding=do_constant_folding,
keep_initializers_as_inputs=self.keep_initializers_as_inputs,
dynamic_axes=dynamic_axes,
input_names=input_names,
output_names=output_names,
)
# compute onnxruntime output prediction
ort_sess_opt = onnxruntime.SessionOptions()
ort_sess_opt.graph_optimization_level = (
onnxruntime.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
if ort_optim_on
else onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL
)
# suppress ort warnings.
# 0:Verbose, 1:Info, 2:Warning. 3:Error, 4:Fatal. Default is 2.
ort_sess_opt.log_severity_level = 3
ort_sess = onnxruntime.InferenceSession(
model_file_name, sess_options=ort_sess_opt, providers=_ORT_PROVIDERS
)
input_copy = copy.deepcopy(input)
ort_outs = run_ort(ort_sess, input_copy)
ort_compare_with_pytorch(ort_outs, output, rtol, atol)
@skipIfUnsupportedMinOpsetVersion(
9
) # Because external data format was released with Opset 9.
def test_embedding_model_with_external_data(self):
class LargeModel(torch.nn.Module):
def __init__(self):
super(LargeModel, self).__init__()
dim = 15
n = 4 * 100
self.emb = torch.nn.Embedding(n, dim)
self.lin1 = torch.nn.Linear(dim, 1)
self.seq = torch.nn.Sequential(
self.emb,
self.lin1,
)
def forward(self, input):
return self.seq(input)
model = LargeModel()
x = torch.tensor([2], dtype=torch.long)
self.run_model_test_with_external_data(model, x)
@skipIfUnsupportedMinOpsetVersion(
9
) # Because external data format was released with Opset 9.
def test_large_model_with_external_data(self):
class LargeModel(torch.nn.Module):
def __init__(self):
super(LargeModel, self).__init__()
dim = 5
n = 40 * 4 * 10**6
self.emb = torch.nn.Embedding(n, dim)
self.lin1 = torch.nn.Linear(dim, 1)
self.seq = torch.nn.Sequential(
self.emb,
self.lin1,
)
def forward(self, input):
return self.seq(input)
x = torch.tensor([2], dtype=torch.long)
self.run_model_test_with_external_data(LargeModel(), x)
@skipIfUnsupportedMinOpsetVersion(
9
) # Because external data format was released with Opset 9.
def test_large_model_with_non_str_file(self):
class LargeModel(torch.nn.Module):
def __init__(self):
super(LargeModel, self).__init__()
dim = 5
n = 40 * 4 * 10**6
self.emb = torch.nn.Embedding(n, dim)
self.lin1 = torch.nn.Linear(dim, 1)
self.seq = torch.nn.Sequential(
self.emb,
self.lin1,
)
def forward(self, input):
return self.seq(input)
x = torch.tensor([2], dtype=torch.long)
f = io.BytesIO()
err_msg = (
"The serialized model is larger than the 2GiB limit imposed by the protobuf library. "
"Therefore the output file must be a file path, so that the ONNX external data can be written to "
"the same directory. Please specify the output file name."
)
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.onnx.export(LargeModel(), x, f)
def test_fuse_conv_bn1d(self):
class Fuse(torch.nn.Module):
def __init__(self):
super(Fuse, self).__init__()
self.conv = torch.nn.Conv1d(16, 33, 3, stride=2)
self.bn = torch.nn.BatchNorm1d(33)
def forward(self, x):
out = self.conv(x)
return self.bn(out)
model = Fuse()
x = torch.randn(20, 16, 50, requires_grad=True)
self.run_test(model, (x,))
def test_fuse_conv_bn2d(self):
class Fuse(torch.nn.Module):
def __init__(self):
super(Fuse, self).__init__()
self.conv = torch.nn.Conv2d(
3, 2, kernel_size=1, stride=2, padding=3, bias=False
)
self.bn = torch.nn.BatchNorm2d(2)
def forward(self, x):
out = self.conv(x)
return self.bn(out)
model = Fuse()
x = torch.randn(2, 3, 2, 2, requires_grad=True)
self.run_test(model, (x,))
def test_fuse_conv_bn3d(self):
class Fuse(torch.nn.Module):
def __init__(self):
super(Fuse, self).__init__()
self.conv = torch.nn.Conv3d(
3, 2, (3, 5, 2), stride=(2, 1, 1), padding=(3, 2, 0), bias=False
)
self.bn = torch.nn.BatchNorm3d(2)
def forward(self, x):
out = self.conv(x)
return self.bn(out)
model = Fuse()
x = torch.randn(2, 3, 10, 50, 100, requires_grad=True)
self.run_test(model, (x,), rtol=1e-3, atol=1e-6)
def test_fuse_conv_in_block(self):
class Fuse(torch.nn.Module):
def __init__(self):
super(Fuse, self).__init__()
self.conv = torch.nn.Conv1d(
in_channels=5,
out_channels=5,
kernel_size=3,
stride=1,
padding=2,
dilation=1,
)
self.bn = torch.nn.BatchNorm1d(5)
def forward(self, x):
results_available = True
if x.sum() > -1:
results_available = False
if results_available:
x = self.conv(x)
x = self.bn(x)
return x
model = Fuse()
x = torch.randn(2, 5, 9, requires_grad=True)
self.run_test(
torch.jit.script(model),
(x,),
input_names=["x"],
dynamic_axes={"x": [0, 2]},
rtol=1e-3,
atol=1e-6,
)
def test_conv_tbc(self):
from torch.nn.modules.utils import _single
class ConvTBC(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding=0):
super(ConvTBC, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _single(kernel_size)
self.padding = _single(padding)
self.weight = torch.nn.Parameter(
Tensor(self.kernel_size[0], in_channels, out_channels)
)
self.bias = torch.nn.Parameter(Tensor(out_channels))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_normal_(self.weight)
torch.nn.init.zeros_(self.bias)
def conv_tbc(self, input):
return torch.conv_tbc(
input.contiguous(), self.weight, self.bias, self.padding[0]
)
def forward(self, input):
return self.conv_tbc(input)
in_channels = 3
out_channels = 5
kernel_size = 5
model = ConvTBC(in_channels, out_channels, kernel_size, padding=0)
x = torch.randn(10, 7, in_channels, requires_grad=True)
self.run_test(model, (x,), atol=1e-5)
def test_reshape_constant_fold(self):
class Reshape(torch.nn.Module):
def __init__(
self,
):
super(Reshape, self).__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
scale_1 = self.weight.reshape(1, -1, 1, 1)
return x * scale_1
x = torch.randn(4, 5)
self.run_test(Reshape(), (x,), rtol=1e-3, atol=1e-5)
def run_word_language_model(self, model_name):
ntokens = 50
emsize = 5
nhid = 5
nlayers = 5
dropout = 0.2
tied = False
batchsize = 5
if model_name == "GRU":
model = word_language_model.RNNModelWithTensorHidden(
model_name, ntokens, emsize, nhid, nlayers, dropout, tied, batchsize
)
elif model_name == "LSTM":
model = word_language_model.RNNModelWithTupleHidden(
model_name, ntokens, emsize, nhid, nlayers, dropout, tied, batchsize
)
else:
model = word_language_model.RNNModel(
model_name, ntokens, emsize, nhid, nlayers, dropout, tied, batchsize
)
x = torch.arange(0, ntokens).long().view(-1, batchsize)
# Only support CPU version, since tracer is not working in GPU RNN.
self.run_test(model, (x, model.hidden))
def get_image(self, rel_path: str, size: Tuple[int, int]) -> Tensor:
import os
from PIL import Image
from torchvision import transforms
data_dir = os.path.join(os.path.dirname(__file__), "assets")
path = os.path.join(data_dir, *rel_path.split("/"))
image = Image.open(path).convert("RGB").resize(size, Image.BILINEAR)
return transforms.ToTensor()(image)
def get_test_images(self) -> Tuple[List[Tensor], List[Tensor]]:
return (
[self.get_image("grace_hopper_517x606.jpg", (100, 320))],
[self.get_image("rgb_pytorch.png", (250, 380))],
)
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest() # Faster RCNN model is not scriptable
def test_faster_rcnn(self):
model = torchvision.models.detection.faster_rcnn.fasterrcnn_resnet50_fpn(
pretrained=False, pretrained_backbone=True, min_size=200, max_size=300
)
model.eval()
x1 = torch.randn(3, 200, 300, requires_grad=True)
x2 = torch.randn(3, 200, 300, requires_grad=True)
self.run_test(model, ([x1, x2],), rtol=1e-3, atol=1e-5)
self.run_test(
model,
([x1, x2],),
input_names=["images_tensors"],
output_names=["outputs"],
dynamic_axes={"images_tensors": [0, 1, 2], "outputs": [0, 1, 2]},
rtol=1e-3,
atol=1e-5,
)
dummy_image = [torch.ones(3, 100, 100) * 0.3]
images, test_images = self.get_test_images()
self.run_test(
model,
(images,),
test_with_inputs=[(images,), (test_images,), (dummy_image,)],
input_names=["images_tensors"],
output_names=["outputs"],
dynamic_axes={"images_tensors": [0, 1, 2], "outputs": [0, 1, 2]},
rtol=1e-3,
atol=1e-5,
)
self.run_test(
model,
(dummy_image,),
test_with_inputs=[(dummy_image,), (images,)],
input_names=["images_tensors"],
output_names=["outputs"],
dynamic_axes={"images_tensors": [0, 1, 2], "outputs": [0, 1, 2]},
rtol=1e-3,
atol=1e-5,
)
def test_paste_mask_in_image(self):
masks = torch.rand(10, 1, 26, 26)
boxes = torch.rand(10, 4)
boxes[:, 2:] += torch.rand(10, 2)
boxes *= 50
o_im_s = (100, 100)
from torchvision.models.detection.roi_heads import paste_masks_in_image
out = paste_masks_in_image(masks, boxes, o_im_s)
jit_trace = torch.jit.trace(
paste_masks_in_image,
(masks, boxes, [torch.tensor(o_im_s[0]), torch.tensor(o_im_s[1])]),
)
out_trace = jit_trace(
masks, boxes, [torch.tensor(o_im_s[0]), torch.tensor(o_im_s[1])]
)
assert torch.all(out.eq(out_trace))
masks2 = torch.rand(20, 1, 26, 26)
boxes2 = torch.rand(20, 4)
boxes2[:, 2:] += torch.rand(20, 2)
boxes2 *= 100
o_im_s2 = (200, 200)
from torchvision.models.detection.roi_heads import paste_masks_in_image
out2 = paste_masks_in_image(masks2, boxes2, o_im_s2)
out_trace2 = jit_trace(
masks2, boxes2, [torch.tensor(o_im_s2[0]), torch.tensor(o_im_s2[1])]
)
assert torch.all(out2.eq(out_trace2))
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest()
def test_mask_rcnn(self):
model = torchvision.models.detection.mask_rcnn.maskrcnn_resnet50_fpn(
pretrained=False, pretrained_backbone=True, min_size=200, max_size=300
)
images, test_images = self.get_test_images()
self.run_test(model, (images,), rtol=1e-3, atol=1e-5)
self.run_test(
model,
(images,),
input_names=["images_tensors"],
output_names=["boxes", "labels", "scores", "masks"],
dynamic_axes={
"images_tensors": [0, 1, 2],
"boxes": [0, 1],
"labels": [0],
"scores": [0],
"masks": [0, 1, 2],
},
rtol=1e-3,
atol=1e-5,
)
dummy_image = [torch.ones(3, 100, 100) * 0.3]
self.run_test(
model,
(images,),
test_with_inputs=[(images,), (test_images,), (dummy_image,)],
input_names=["images_tensors"],
output_names=["boxes", "labels", "scores", "masks"],
dynamic_axes={
"images_tensors": [0, 1, 2],
"boxes": [0, 1],
"labels": [0],
"scores": [0],
"masks": [0, 1, 2],
},
rtol=1e-3,
atol=1e-5,
)
self.run_test(
model,
(dummy_image,),
test_with_inputs=[(dummy_image,), (images,)],
input_names=["images_tensors"],
output_names=["boxes", "labels", "scores", "masks"],
dynamic_axes={
"images_tensors": [0, 1, 2],
"boxes": [0, 1],
"labels": [0],
"scores": [0],
"masks": [0, 1, 2],
},
rtol=1e-3,
atol=1e-5,
)
def test_heatmaps_to_keypoints(self):
maps = torch.rand(10, 1, 26, 26)
rois = torch.rand(10, 4)
from torchvision.models.detection.roi_heads import heatmaps_to_keypoints
out = heatmaps_to_keypoints(maps, rois)
jit_trace = torch.jit.trace(heatmaps_to_keypoints, (maps, rois))
out_trace = jit_trace(maps, rois)
assert torch.all(out[0].eq(out_trace[0]))
assert torch.all(out[1].eq(out_trace[1]))
maps2 = torch.rand(20, 2, 21, 21)
rois2 = torch.rand(20, 4)
from torchvision.models.detection.roi_heads import heatmaps_to_keypoints
out2 = heatmaps_to_keypoints(maps2, rois2)
out_trace2 = jit_trace(maps2, rois2)
assert torch.all(out2[0].eq(out_trace2[0]))
assert torch.all(out2[1].eq(out_trace2[1]))
@unittest.skip("Failing, see https://github.com/pytorch/pytorch/issues/66528")
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest()
def test_keypoint_rcnn(self):
model = torchvision.models.detection.keypoint_rcnn.keypointrcnn_resnet50_fpn(
pretrained=False, pretrained_backbone=False, min_size=200, max_size=300
)
images, test_images = self.get_test_images()
self.run_test(model, (images,), rtol=1e-3, atol=1e-5)
self.run_test(
model,
(images,),
input_names=["images_tensors"],
output_names=["outputs1", "outputs2", "outputs3", "outputs4"],
dynamic_axes={"images_tensors": [0, 1, 2]},
rtol=1e-3,
atol=1e-5,
)
dummy_images = [torch.ones(3, 100, 100) * 0.3]
self.run_test(
model,
(images,),
test_with_inputs=[(images,), (test_images,), (dummy_images,)],
input_names=["images_tensors"],
output_names=["outputs1", "outputs2", "outputs3", "outputs4"],
dynamic_axes={"images_tensors": [0, 1, 2]},
rtol=5e-3,
atol=1e-5,
)
self.run_test(
model,
(dummy_images,),
test_with_inputs=[(dummy_images,), (test_images,)],
input_names=["images_tensors"],
output_names=["outputs1", "outputs2", "outputs3", "outputs4"],
dynamic_axes={"images_tensors": [0, 1, 2]},
rtol=5e-3,
atol=1e-5,
)
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest()
def test_shufflenet_v2_dynamic_axes(self):
model = torchvision.models.shufflenet_v2_x0_5(pretrained=False)
dummy_input = torch.randn(1, 3, 224, 224, requires_grad=True)
test_inputs = torch.randn(3, 3, 224, 224, requires_grad=True)
self.run_test(
model,
(dummy_input,),
test_with_inputs=[(dummy_input,), (test_inputs,)],
input_names=["input_images"],
output_names=["outputs"],
dynamic_axes={
"input_images": {0: "batch_size"},
"output": {0: "batch_size"},
},
rtol=1e-3,
atol=1e-5,
)
@skipScriptTest()
def test_mobilenet_v3(self):
model = torchvision.models.quantization.mobilenet_v3_large(pretrained=False)
dummy_input = torch.randn(1, 3, 224, 224)
self.run_test(model, (dummy_input,))
@unittest.skip(
"Unstable loading pretrained quantized mobilenet v3: https://github.com/pytorch/vision/issues/5303"
)
@skipIfUnsupportedMinOpsetVersion(10)
@skipScriptTest()
def test_mobilenet_v3_quant(self):
model = torchvision.models.quantization.mobilenet_v3_large(
pretrained=True, quantize=True
)
from PIL import Image
from torchvision import transforms
data_dir = os.path.join(os.path.dirname(__file__), "assets")
path = os.path.join(data_dir, "grace_hopper_517x606.jpg")
input_image = Image.open(path)
# Based on example from https://pytorch.org/hub/pytorch_vision_resnet/
preprocess = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
input_tensor = preprocess(input_image).unsqueeze(0)
# Due to precision error from quantization, check only that the top prediction matches.
class TopPredictor(torch.nn.Module):
def __init__(self, mobilenet):
super().__init__()
self.mobilenet = mobilenet
def forward(self, x):
x = self.mobilenet(x)
_, topk_catid = torch.topk(x[0], 1)
return topk_catid
# Currently, we need convert the model to ScriptModule before export.
# The reason is that PackedParams contains int (not tensor).
# Then it fails when the exporter calls _trace_and_get_graph_from_model().
# TODO: https://msdata.visualstudio.com/Vienna/_workitems/edit/1547858
model = torch.jit.trace(TopPredictor(model), input_tensor)
self.run_test(model, (input_tensor,))
@skipScriptTest()
def test_word_language_model_RNN_TANH(self):
self.run_word_language_model("RNN_TANH")
@skipScriptTest()
def test_word_language_model_RNN_RELU(self):
self.run_word_language_model("RNN_RELU")
@skipScriptTest() # scripting prim::unchecked_cast prim::setattr
def test_word_language_model_LSTM(self):
self.run_word_language_model("LSTM")
def test_word_language_model_GRU(self):
self.run_word_language_model("GRU")
def test_index_1d(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[0]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
def test_index_2d_1dimslice(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[0:1, :]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
def test_index_2d_sliceint(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[1, :]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
def test_index_2d_neg_slice(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[0:-1, :]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_mask(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[torch.tensor([0, 1, 0], dtype=torch.uint8)]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
class MyModel(torch.nn.Module):
def forward(self, input):
return input[torch.tensor([0, 1, 0], dtype=torch.bool)]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
@skipIfUnsupportedMinOpsetVersion(9)
def test_data(self):
class Data(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x.new_zeros(x.data.size())
x = torch.randn(3, 4)
self.run_test(Data(), x, input_names=["x"], dynamic_axes={"x": [0, 1]})
self.run_test(Data(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_mask_nd(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[input > 0]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
@skipScriptTest()
def test_dict(self):
class MyModel(torch.nn.Module):
def forward(self, x_in):
x_out = {}
x_out["test_key_out"] = torch.add(
x_in[list(x_in.keys())[0]], list(x_in.keys())[0]
)
return x_out
x = {torch.tensor(1.0): torch.randn(1, 2, 3)}
self.run_test(MyModel(), (x, {}))
@skipScriptTest()
def test_dict_str(self):
class MyModel(torch.nn.Module):
def forward(self, x_in):
x_out = {}
x_out["test_key_out"] = torch.add(x_in["test_key_in"], 2.0)
return x_out
x = {"test_key_in": torch.randn(1, 2, 3)}
self.run_test(MyModel(), (x, {}))
@skipScriptTest() # User-defined class not supported
def test_dict_output(self):
class DictModelOutput(OrderedDict):
tensor_out: Tensor
tuple_out: Optional[Tuple[Tensor]] = None
list_out: Optional[List[Tensor]] = None
class MyModel(torch.nn.Module):
def forward(self, a, b, c, d):
return DictModelOutput(
tensor_out=a,
tuple_out=(b, c),
list_out=[d],
)
a = torch.randn(2, 3)
b = torch.randn(2, 3)
c = torch.randn(2, 3)
d = torch.randn(2, 3)
self.run_test(MyModel(), (a, b, c, d))
def test_tuple_output(self):
class MyModel(torch.nn.Module):
def forward(self, a, b, c, d):
return a, (b, c), d
a = torch.randn(2, 3)
b = torch.randn(2, 3)
c = torch.randn(2, 3)
d = torch.randn(2, 3)
self.run_test(MyModel(), (a, b, c, d))
def test_nested_tuple_output(self):
class MyModel(torch.nn.Module):
def forward(self, a, b, c, d):
return a, ((b,), (c, d))
a = torch.randn(2, 3)
b = torch.randn(2, 3)
c = torch.randn(2, 3)
d = torch.randn(2, 3)
self.run_test(MyModel(), (a, b, c, d))
def test_tuple_input(self):
class TupleModel(torch.nn.Module):
def forward(self, a: Tuple[Tensor, Tensor]):
return a
x = (torch.randn(3, 4), torch.randn(4, 3))
self.run_test(TupleModel(), input=(x,))
def test_tuple_primitive_input(self):
class TupleModel(torch.nn.Module):
def forward(self, a: Tuple[int, Tensor], b):
return a[0], a[1] + b
x = (3, torch.randn(4, 3))
y = torch.randn(4, 3)
self.run_test(TupleModel(), input=(x, y))
def test_nested_tuple_input(self):
class NestedTupleModel(torch.nn.Module):
def forward(self, a, b: Tuple[Tensor, Tuple[Tensor, Tensor]]):
return a + b[0] + b[1][0] + b[1][1]
x = torch.randn(4, 5)
y = (torch.randn(4, 5), (torch.randn(1, 5), torch.randn(4, 1)))
self.run_test(NestedTupleModel(), input=(x, y))
def test_empty_kwargs(self):
class IdentityModel(torch.nn.Module):
def forward(self, input):
return input
self.run_test(IdentityModel(), (torch.randn(2, 3), {}))
@skipScriptTest() # Needs https://github.com/pytorch/rfcs/pull/21
@skipIfUnsupportedMinOpsetVersion(15)
def test_mixed_optional_default_none(self):
class Model(torch.nn.Module):
def forward(
self, x, y: Optional[Tensor] = None, z: Optional[Tensor] = None
):
if y is not None:
return x + y
if z is not None:
return x + z
return x
x = torch.randn(2, 3)
y = torch.randn(2, 3)
z = torch.randn(2, 3)
model = Model()
# Without kwargs dict.
self.run_test(model, (x, y, None))
self.run_test(model, (x, None, z))
# With kwargs dict.
self.run_test(model, (x, {"y": y, "z": None}))
self.run_test(model, (x, {"y": None, "z": z}))
self.run_test(model, (x, {"z": z}))
self.run_test(model, (x, {"y": y}))
@skipScriptTest() # tracing eliminates None inputs so it works differently. See _script version below.
@skipIfUnsupportedMinOpsetVersion(15)
def test_mixed_optional_default_tensor(self):
class Model(torch.nn.Module):
def forward(
self,
x,
y: Optional[Tensor] = torch.ones(2, 3),
z: Optional[Tensor] = torch.zeros(2, 3),
):
if y is not None:
return x + y
if z is not None:
return x + z
return x
x = torch.randn(2, 3)
y = torch.randn(2, 3)
z = torch.randn(2, 3)
model = Model()
self.run_test(model, (x, y, None))
self.run_test(model, (x, None, z))
@skipIfUnsupportedMinOpsetVersion(15)
def test_mixed_optional_default_tensor_script(self):
class Model(torch.nn.Module):
def forward(
self,
x,
y: Optional[Tensor] = torch.ones(2, 3),
z: Optional[Tensor] = torch.zeros(2, 3),
):
if y is not None:
return x + y
if z is not None:
return x + z
return x
x = torch.randn(2, 3)
y = torch.randn(2, 3)
z = torch.randn(2, 3)
model = torch.jit.script(Model())
self.run_test(model, (x, y, z), input_names=("x", "y", "z"))
self.run_test(model, (x, {"y": y, "z": z}), input_names=("x", "y", "z"))
# Requires input_names to be set so that we can feed the inputs properly into ORT.
# TODO: Export default values as ONNX initializers, then this should not raise.
# https://msdata.visualstudio.com/Vienna/_workitems/edit/969268
# Default values are accessible via FunctionSchema.
with self.assertRaisesRegex(
ValueError, "Model requires 3 inputs. Input Feed contains 2"
):
self.run_test(model, (x, {"y": y}), input_names=("x", "y"))
for example_inputs in (
(x, y, None),
(x, None, z),
(x, {"y": y, "z": None}),
(x, {"y": None, "z": z}),
):
with self.assertRaisesRegex(
ValueError, "args contained 1 None's after flattening."
):
self.run_test(model, example_inputs, input_names=("x", "y", "z"))
@skipScriptTest() # Needs https://github.com/pytorch/rfcs/pull/21
@skipIfUnsupportedMinOpsetVersion(15)
def test_all_optional_default_none(self):
class Model(torch.nn.Module):
def forward(self, x: Optional[Tensor] = None, y: Optional[Tensor] = None):
if x is not None:
return x
if y is not None:
return y
else:
return torch.tensor(-1.0)
x = torch.randn(2, 3)
model = Model()
self.run_test(model, (x, None))
self.run_test(
model,
({"x": x, "y": None},),
# y disappears in tracing.
input_names=("x",),
)
@skipScriptTest() # tracing eliminates None inputs so it works differently. See _script version below.
@skipIfUnsupportedMinOpsetVersion(15)
def test_all_optional_default_tensor(self):
class Model(torch.nn.Module):
def forward(
self,
x: Optional[Tensor] = torch.ones(2, 3),
y: Optional[Tensor] = torch.zeros(2, 3),
):
if x is not None:
return x
elif y is not None:
return y
else:
return torch.tensor(-1.0)
x = torch.randn(2, 3)
y = torch.randn(2, 3)
model = Model()
self.run_test(model, (x, None))
self.run_test(model, (None, y))
# tracing means y is never used so it's removed from the exported model inputs,
# and we fail when trying to run ORT.
with self.assertRaisesRegex(ValueError, "got too many positional inputs"):
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(15)
def test_all_optional_default_tensor_script(self):
class Model(torch.nn.Module):
def forward(
self,
x: Optional[Tensor] = torch.ones(2, 3),
y: Optional[Tensor] = torch.zeros(2, 3),
):
if x is not None:
return x
elif y is not None:
return y
else:
return torch.tensor(-1.0)
x = torch.randn(2, 3)
y = torch.randn(2, 3)
model = torch.jit.script(Model())
# TODO: Export default values as ONNX initializers, then this should not raise.
# https://msdata.visualstudio.com/Vienna/_workitems/edit/969268
# Default values are accessible via FunctionSchema.
with self.assertRaisesRegex(
ValueError, "Model requires 2 inputs. Input Feed contains 1"
):
self.run_test(model, (x,))
self.run_test(model, ({"y": y},))
self.run_test(model, (x, y))
self.run_test(model, ({"x": x, "y": y},), input_names=("x", "y"))
@skipScriptTest() # Needs https://github.com/pytorch/rfcs/pull/21
@skipIfUnsupportedMinOpsetVersion(15)
def test_mixed_optional(self):
class Model(torch.nn.Module):
def forward(self, x, y: Optional[Tensor]):
if y is not None:
return x + y
return x
x = torch.randn(2, 3)
model = Model()
self.run_test(model, (x, None))
self.run_test(model, (x, x))
@skipScriptTest() # Needs https://github.com/pytorch/rfcs/pull/21
@skipIfUnsupportedMinOpsetVersion(15)
def test_tuple_of_optional(self):
class Model(torch.nn.Module):
def forward(self, x, y: Tuple[Optional[Tensor], Optional[Tensor]]):
if y[0] is not None:
return x + y[0]
if y[1] is not None:
return x + y[1]
return x
x = torch.randn(2, 3)
y1 = torch.randn(2, 3)
self.run_test(Model(), (x, (None, y1)))
@skipScriptTest() # tracing eliminates None inputs so it works differently. See _script version below.
@skipIfUnsupportedMinOpsetVersion(15)
def test_tuple_of_optional_default_tensor(self):
class Model(torch.nn.Module):
def forward(
self,
x,
y: Tuple[Optional[Tensor], Optional[Tensor]] = (
torch.zeros(2, 3),
torch.zeros(2, 3),
),
):
y0, y1 = y
if y0 is not None:
return x + y0
if y1 is not None:
return x + y1
return x
x = torch.randn(2, 3)
y1 = torch.randn(2, 3)
self.run_test(Model(), (x, (None, y1)))
@skipIfUnsupportedMinOpsetVersion(15)
def test_tuple_of_optional_default_tensor_script(self):
class Model(torch.nn.Module):
def forward(
self,
x,
y: Tuple[Optional[Tensor], Optional[Tensor]] = (
torch.zeros(2, 3),
torch.zeros(2, 3),
),
):
y0, y1 = y
if y0 is not None:
return x + y0
if y1 is not None:
return x + y1
return x
x = torch.randn(2, 3)
y0 = torch.randn(2, 3)
y1 = torch.randn(2, 3)
model = torch.jit.script(Model())
with self.assertRaisesRegex(
ValueError, "args contained 1 None's after flattening."
):
self.run_test(model, (x, (None, y1)))
self.run_test(model, (x, (y0, y1)))
# export succeeds, but running ORT through run_test would fail because the exported model
# has the inputs flattened into 3 inputs.
torch.onnx.export(
model, (x, {"y": (y0, y1)}), io.BytesIO(), opset_version=self.opset_version
)
def test_primitive_input_integer(self):
class Model(torch.nn.Module):
def forward(self, x: int, y):
return x + y
x = 3
y = torch.randint(10, (2, 3, 4))
self.run_test(Model(), (x, y))
def test_primitive_input_floating(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x: float, y):
return x + y
x = 3.0
y = torch.randn(2, 3, 4)
self.run_test(Model(), (x, y))
def test_primitive_input_bool(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, flag: bool, x, y):
if flag:
return x
else:
return y
flag = True
x = torch.randn(2, 3, 4)
y = torch.randn(2, 3, 4)
self.run_test(torch.jit.script(Model()), (flag, x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_cste_script(self):
class MyModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.zeros(x.size(0)), torch.ones(
(x.size(1), x.size(0)), dtype=torch.int64
)
x = torch.randn(3, 4)
self.run_test(MyModel(), x, input_names=["x"], dynamic_axes={"x": [0, 1]})
self.run_test(MyModel(), x, remained_onnx_input_idx=[])
def test_scalar_tensor(self):
class test(torch.nn.Module):
def forward(self, input):
return torch.scalar_tensor(input.size(0)), torch.scalar_tensor(
input.size(1), dtype=torch.int64
)
x = torch.randn(2, 3, 4)
y = torch.randn(7, 8, 9)
model = test()
self.run_test(
model,
x,
test_with_inputs=[y],
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1, 2]},
)
def test_tensor(self):
class ScalarInputModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.tensor(input.shape[1])
x = torch.randn(3, 4)
self.run_test(
ScalarInputModel(), x, input_names=["x"], dynamic_axes={"x": [0, 1]}
)
self.run_test(ScalarInputModel(), x, remained_onnx_input_idx=[])
class TensorInputModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.tensor([input.shape[0], input.shape[1]])
x = torch.randn(3, 4)
self.run_test(
TensorInputModel(), x, input_names=["x"], dynamic_axes={"x": [0, 1]}
)
self.run_test(TensorInputModel(), x, remained_onnx_input_idx=[])
class FloatInputModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.tensor([float(input)])
x = torch.randn(1)
self.run_test(FloatInputModel(), x)
class InputWithDtypeModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.tensor(input.shape[1], dtype=torch.long)
x = torch.randn(3, 4)
self.run_test(
InputWithDtypeModel(), x, input_names=["x"], dynamic_axes={"x": [0, 1]}
)
self.run_test(InputWithDtypeModel(), x, remained_onnx_input_idx=[])
class MixedInputModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.tensor([input.shape[0], int(input)])
x = torch.randn(1)
self.run_test(MixedInputModel(), x)
def test_hardtanh(self):
model = torch.nn.Hardtanh(-1.5, 2.5)
x = torch.arange(-5, 5).to(dtype=torch.float32)
self.run_test(model, x)
def test_hardtanh_script_with_default_values(self):
class MyModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.nn.functional.hardtanh(x)
x = torch.arange(-5, 5).to(dtype=torch.float32)
self.run_test(MyModel(), x)
def test_hardswish(self):
model = torch.nn.Hardswish()
x = torch.rand(3, 3).to(dtype=torch.float32)
self.run_test(model, x)
# Testing edge cases
x = torch.tensor(3).to(dtype=torch.float32)
self.run_test(model, x)
x = torch.tensor(-3).to(dtype=torch.float32)
self.run_test(model, x)
def test_hardswish_script(self):
class MyModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.nn.functional.hardswish(x)
x = torch.rand(3, 3).to(dtype=torch.float32)
self.run_test(MyModel(), x)
def test_hardsigmoid(self):
model = torch.nn.Hardsigmoid()
x = torch.rand(3, 3).to(dtype=torch.float32)
self.run_test(model, x)
# corner cases
x = torch.tensor(3).to(dtype=torch.float32)
self.run_test(model, x)
x = torch.tensor(-3).to(dtype=torch.float32)
self.run_test(model, x)
def test_tanhshrink(self):
model = torch.nn.Tanhshrink()
x = torch.rand(3, 3).to(dtype=torch.float32)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_hardshrink(self):
model = torch.nn.Hardshrink()
x = torch.rand(3, 3).to(dtype=torch.float32)
self.run_test(model, x)
# Testing edge cases
x = torch.tensor(0.5).to(dtype=torch.float32)
self.run_test(model, x)
x = torch.tensor(-0.5).to(dtype=torch.float32)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_softshrink(self):
model = torch.nn.Softshrink()
x = torch.rand(3, 3).to(dtype=torch.float32)
self.run_test(model, x)
# Testing edge cases
x = torch.tensor(0.5).to(dtype=torch.float32)
self.run_test(model, x)
x = torch.tensor(-0.5).to(dtype=torch.float32)
self.run_test(model, x)
def test_clamp(self):
class ClampModel(torch.nn.Module):
def forward(self, x):
return x.clamp(-0.5, 0.5)
x = torch.randn(3, 4)
self.run_test(ClampModel(), x)
class ClampMinModel(torch.nn.Module):
def forward(self, x):
return x.clamp(min=-0.5)
x = torch.randn(3, 4)
self.run_test(ClampMinModel(), x)
class ClampMaxModel(torch.nn.Module):
def forward(self, x):
return x.clamp(max=0.5)
x = torch.randn(3, 4)
self.run_test(ClampMaxModel(), x)
@skipIfUnsupportedMinOpsetVersion(8)
def test_clamp_dyn(self):
class ClampMaxModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x.clamp(None, x.size(0))
x = torch.arange(16).view(4, 4).float()
self.run_test(ClampMaxModel(), x)
class ClampMinModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x.clamp(x.size(0), None)
x = torch.arange(16).view(4, 4).float()
self.run_test(ClampMinModel(), x)
class ClampMinMaxModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x.clamp(x.size(0), x.size(1))
x = torch.arange(16).view(2, 8).float()
self.run_test(ClampMinMaxModel(), x)
class ClampTensorModel(torch.nn.Module):
def forward(self, x, min, max):
return x.clamp(min, max)
x = torch.randn(3, 4)
y = torch.randn(3, 4)
z = torch.randn(3, 4)
self.run_test(ClampTensorModel(), (x, y, z))
class ClampTensorMinModel(torch.nn.Module):
def forward(self, x, min):
return x.clamp(min=min)
self.run_test(ClampTensorMinModel(), (x, y))
class ClampTensorMaxModel(torch.nn.Module):
def forward(self, x, max):
return x.clamp(max=max)
self.run_test(ClampTensorMaxModel(), (x, z))
@skipIfUnsupportedMinOpsetVersion(9)
def test_full_trace(self):
class FullModel(torch.nn.Module):
def forward(self, x):
return torch.full((3, 4), x, dtype=torch.long)
x = torch.tensor(12)
self.run_test(FullModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_full_script(self):
class FullModelScripting(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.full((3, 4), x, dtype=torch.long)
x = torch.tensor(12)
self.run_test(FullModelScripting(), x)
def test_fuse_addmm(self):
class AddmmModel(torch.nn.Module):
def forward(self, x):
return torch.mm(x, x) + x
x = torch.ones(3, 3)
self.run_test(AddmmModel(), x)
def test_maxpool(self):
model = torch.nn.MaxPool1d(2, stride=1)
x = torch.randn(20, 16, 50)
self.run_test(model, x)
def test_conv(self):
class TraceModel(torch.nn.Module):
def __init__(self):
super(TraceModel, self).__init__()
self.conv1 = torch.nn.Conv1d(16, 33, 3, stride=2)
self.conv2 = torch.nn.Conv2d(
16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1)
)
self.conv3 = torch.nn.Conv3d(
16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0)
)
def forward(self, input1, input2, input3):
return self.conv1(input1), self.conv2(input2), self.conv3(input3)
x1 = torch.randn(20, 16, 50)
x2 = torch.randn(20, 16, 50, 100)
x3 = torch.randn(20, 16, 10, 50, 100)
self.run_test(TraceModel(), (x1, x2, x3), atol=10e-5)
def test_conv_shape_inference(self):
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv2 = torch.nn.Conv2d(
16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1)
)
def forward(self, input):
return self.conv2(input) + 2
x = torch.randn(20, 16, 50, 100)
self.run_test(
Model(), x, atol=10e-5, input_names=["x"], dynamic_axes={"x": [0]}
)
def test_conv_transpose(self):
class TraceModel(torch.nn.Module):
def __init__(self):
super(TraceModel, self).__init__()
self.conv1 = torch.nn.ConvTranspose1d(16, 33, 3, stride=2)
self.conv2 = torch.nn.ConvTranspose2d(
16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1)
)
self.conv3 = torch.nn.ConvTranspose3d(
16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0)
)
def forward(self, input1, input2, input3):
return self.conv1(input1), self.conv2(input2), self.conv3(input3)
x1 = torch.randn(20, 16, 50)
x2 = torch.randn(20, 16, 50, 100)
x3 = torch.randn(20, 16, 10, 50, 100)
self.run_test(TraceModel(), (x1, x2, x3), atol=10e-5)
# Conversion of Transpose depends on input shape to be known.
# The following test only works when onnx shape inference is enabled.
def test_transpose_infer_shape(self):
class TransposeModule(torch.jit.ScriptModule):
def __init__(self):
super(TransposeModule, self).__init__()
self.conv = torch.nn.Conv2d(3, 1, 3, stride=2)
@torch.jit.script_method
def forward(self, x):
x = self.conv(x)
return x.transpose(0, 1)
x = torch.randn(32, 3, 64, 64)
y = torch.randn(16, 3, 8, 64)
self.run_test(
TransposeModule(),
x,
input_names=["x"],
dynamic_axes={"x": [0, 2]},
test_with_inputs=[y],
)
def squeeze_model_tests(self, d, x1, x2):
class Squeeze(torch.nn.Module):
def __init__(self, d):
super(Squeeze, self).__init__()
self.d = d
def forward(self, x):
if self.d is not None:
return torch.squeeze(x, dim=self.d)
else:
return torch.squeeze(x)
x2 = [] if x2 is None else [x2]
if len(x2) > 0:
self.run_test(
Squeeze(d),
x1,
input_names=["input"],
dynamic_axes={"input": {0: "0", 1: "1", 2: "2"}},
test_with_inputs=x2,
)
else:
self.run_test(Squeeze(d), x1)
def test_squeeze_without_no_op(self):
x = torch.randn(2, 1, 4)
self.squeeze_model_tests(1, x, None)
@skipIfUnsupportedMinOpsetVersion(11)
def test_squeeze_dynamic(self):
x_squeeze = torch.randn(2, 1, 4)
x_noop = torch.randn(2, 2, 3)
self.squeeze_model_tests(1, x_squeeze, x_noop)
def test_squeeze_neg_without_no_op(self):
x = torch.randn(2, 1, 4)
self.squeeze_model_tests(-2, x, None)
@skipIfUnsupportedMinOpsetVersion(11)
def test_squeeze_neg(self):
x_squeeze = torch.randn(2, 1, 4)
x_noop = torch.randn(2, 2, 3)
self.squeeze_model_tests(-2, x_squeeze, x_noop)
def test_squeeze_all_dims(self):
x_squeeze = torch.randn(2, 1, 4)
x_noop = torch.randn(2, 2, 3)
self.squeeze_model_tests(None, x_squeeze, x_noop)
@skipIfUnsupportedMinOpsetVersion(11)
def test_squeeze_no_op(self):
x_noop = torch.randn(2, 1, 4)
x_squeeze = torch.randn(2, 2, 1)
self.squeeze_model_tests(2, x_noop, x_squeeze)
@skipIfUnsupportedMinOpsetVersion(11)
def test_squeeze_runtime_dim(self):
class Squeeze(torch.nn.Module):
def forward(self, d1, d2):
t = torch.zeros(d1[0], d2[0])
return t.squeeze(0)
d1 = torch.tensor([1])
d3 = torch.tensor([3])
d4 = torch.tensor([4])
self.run_test(Squeeze(), (d1, d4), test_with_inputs=[(d3, d4)])
self.run_test(Squeeze(), (d3, d4), test_with_inputs=[(d1, d3)])
def test_squeeze(self):
class Squeeze(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x, dim=-2)
x = torch.randn(2, 1, 4)
self.run_test(Squeeze(), x)
@skipIfUnsupportedMinOpsetVersion(13)
def test_squeeze_dynamic_dim(self):
class Squeeze(torch.nn.Module):
def forward(self, x, dim: int):
return torch.squeeze(x, dim)
x = torch.randn(2, 1, 4)
dim = 1
self.run_test(Squeeze(), (x, dim))
def test_unsqueeze(self):
class Unsqueeze(torch.nn.Module):
def forward(self, x):
return torch.unsqueeze(x, dim=-2)
x = torch.randn(2, 3, 4)
self.run_test(Unsqueeze(), x)
@skipIfUnsupportedMinOpsetVersion(13)
def test_unsqueeze_dynamic_dim(self):
class Unsqueeze(torch.nn.Module):
def forward(self, x, dim: int):
return torch.unsqueeze(x, dim)
x = torch.randn(2, 1, 4)
dim = -1
self.run_test(Unsqueeze(), (x, dim))
def test_maxpool_default_stride(self):
class MaxPoolModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.max_pool2d(x, 2)
model = MaxPoolModel()
x = torch.randn(10, 20, 16, 50)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(8)
def test_maxpool_adaptive(self):
model = torch.nn.AdaptiveMaxPool1d((5), return_indices=False)
x = torch.randn(20, 16, 50, requires_grad=True)
y = torch.randn(32, 16, 50, requires_grad=True)
self.run_test(
model, x, input_names=["x"], dynamic_axes={"x": [0]}, test_with_inputs=[y]
)
def test_maxpool_2d(self):
model = torch.nn.MaxPool2d(5, padding=(1, 2))
x = torch.randn(1, 20, 16, 50, requires_grad=True)
self.run_test(model, x)
def test_maxpool_1d_ceil(self):
model = torch.nn.MaxPool1d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50)
self.run_test(model, x)
def test_maxpool_2d_ceil(self):
model = torch.nn.MaxPool2d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 32)
self.run_test(model, x)
def test_maxpool_3d_ceil(self):
model = torch.nn.MaxPool3d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 44, 31)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(8)
def test_maxpool_with_indices(self):
model = torch.nn.MaxPool1d(2, stride=1, return_indices=True)
x = torch.randn(20, 16, 50)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(10)
def test_maxpool_dilation(self):
model = torch.nn.MaxPool1d(2, stride=1, dilation=2)
x = torch.randn(20, 16, 50)
self.run_test(model, x)
def test_avgpool_default_stride(self):
class AvgPoolModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.avg_pool2d(x, 2)
model = AvgPoolModel()
x = torch.randn(10, 20, 16, 50)
self.run_test(model, x)
def test_avgpool(self):
model = torch.nn.AvgPool1d(2, stride=1)
x = torch.randn(20, 16, 50)
self.run_test(model, x)
def test_avgpool_1d_ceil(self):
model = torch.nn.AvgPool1d(3, 2, ceil_mode=True)
x = torch.randn(1, 1, 7)
self.run_test(model, x)
def test_avgpool_2d_ceil(self):
model = torch.nn.AvgPool2d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 32)
self.run_test(model, x)
def test_avgpool_3d_ceil(self):
model = torch.nn.AvgPool3d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 44, 31)
y = torch.randn(32, 8, 50, 44, 31)
self.run_test(
model,
x,
input_names=["x"],
dynamic_axes={"x": [0, 1]},
test_with_inputs=[y],
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_floating_point(self):
class FloatingPoint(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
if x.is_floating_point():
return x.new_zeros(x.shape)
return x.new_zeros(x.shape)
x = torch.randn(2, 3, 4)
self.run_test(
FloatingPoint(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]}
)
self.run_test(FloatingPoint(), x, remained_onnx_input_idx=[])
class FloatingPoint(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
if x.size(0) > 1:
a = x + 2
if a.is_floating_point():
return x + 1
return x + 1
return x
x = torch.randn(2, 3, 4)
self.run_test(FloatingPoint(), x)
# Operator rank mismatch between outputs of two branches for opsets below 11.
@skipIfUnsupportedMinOpsetVersion(11)
def test_floating_point_infer_dtype(self):
class FloatingPoint(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
if x.size(0) > 1:
a = x + 2
if a.is_floating_point():
return x.new_zeros(x.shape[1:])
return x.new_zeros(x.shape)
return x
x = torch.randn(2, 3, 4)
self.run_test(
FloatingPoint(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]}
)
self.run_test(FloatingPoint(), x, remained_onnx_input_idx=[])
class FloatingPoint(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
if x.size(0) > 1:
a = x + 2
if a.is_floating_point():
return x + 1
return x
return x
x = torch.randn(2, 3, 4).to(torch.int32)
self.run_test(FloatingPoint(), x)
@skipIfUnsupportedMinOpsetVersion(12)
def test_prim_min(self):
@torch.jit.script
def list_append(boxes: List[Tensor]):
temp = []
for i, b in enumerate(
boxes
): # enumerate is creating a prim::min op in torch graph
temp.append(torch.full_like(b[:, 1], i))
return temp[0]
class Min(torch.nn.Module):
def forward(self, x):
boxes = [x for _ in range(3)]
return list_append(boxes)
x = torch.rand(5, 5)
self.run_test(Min(), (x,))
class M(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
i = 3
return min(x[i], i)
x = torch.arange(6, dtype=torch.int64)
self.run_test(M(), (x,))
def test_arithmetic(self):
class ArithmeticModule(torch.nn.Module):
def forward(self, x):
x = x + 2
x = x - 4
x = x * 6
x = x / 8
return x
x = torch.randn(2, 3, 4)
self.run_test(ArithmeticModule(), x)
def test_arithmetic_prim_long(self):
class ArithmeticModule(torch.nn.Module):
def forward(self, x, y: int):
x = x + y
x = x - y
x = x * (y * 3)
x = x / (y * 4)
return x
x = torch.randn(2, 3, 4)
y = 2
self.run_test(ArithmeticModule(), (x, y))
class ArithmeticModule(torch.nn.Module):
def forward(self, x):
x = x + 2
x = x - 3
return x.shape[0]
x = torch.randn(2, 3, 4)
self.run_test(ArithmeticModule(), x, remained_onnx_input_idx=[])
def test_arithmetic_prim_float(self):
class ArithmeticModule(torch.nn.Module):
def forward(self, x, y: float):
x = x + y
x = x - y
x = x * (y * 3)
x = x / (y * 4)
return x
x = torch.randn(2, 3, 4)
y = 2.5
self.run_test(ArithmeticModule(), (x, y))
class ArithmeticModule(torch.nn.Module):
def forward(self, x):
x = x + 2
x = x - 3
return x.shape[1] / 2
x = torch.randn(2, 3, 4)
self.run_test(ArithmeticModule(), x, remained_onnx_input_idx=[])
def test_arithmetic_prim_bool(self):
class ArithmeticModule(torch.nn.Module):
def forward(self, x, y: int, z: bool, t: float):
x = x + y
x = x - y
if z:
x = x * (y * 3)
x = x / (y * 4)
return x / t, z
x = torch.randn(2, 3, 4)
y = 2
z = False
t = 2.5
self.run_test(ArithmeticModule(), (x, y, z, t))
class ArithmeticModule(torch.nn.Module):
def forward(self, x: int, y: int):
return x == y
x = 3
y = 2
self.run_test(ArithmeticModule(), (x, y))
# In tracing, None outputs are removed. In scripting they're kept but
# we don't know Optional.elem_type, so we can't construct a valid Optional.
# Tests for Optional outputs (control flow with None in one branch,
# not-None in another) are in test_pytorch_onnx_no_runtime.py.
@skipScriptTest()
def test_tuple_with_none_outputs(self):
class TupleModel(torch.nn.Module):
def forward(self, x):
return (x, (x, None, (x, None)))
x = torch.randn(3, 4)
self.run_test(TupleModel(), (x,))
# In scripting the first transpose node do not carry shape and dtype info.
# The following test only works when onnx shape inference is enabled.
def test_arithmetic_infer_dtype(self):
class ArithmeticModule(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
x = x.t()
x = x + 2
x = x - 4
x = x * 6
x = x / 8
return x
x = torch.randn(2, 3)
self.run_test(ArithmeticModule(), x)
def test_floor_div(self):
class FloorDivModule(torch.nn.Module):
def forward(self, x, y):
return (
x // 3,
x // 2.0,
x.to(dtype=torch.float64) // 3,
x.to(dtype=torch.float64) // 2.0,
x.to(dtype=torch.int64) // 3,
x.to(dtype=torch.int64) // 2.0,
x // (y + 1.0).to(dtype=torch.int64),
x // y,
x.to(dtype=torch.float64) // y.to(dtype=torch.int64),
x.to(dtype=torch.float64) // y.to(dtype=torch.float64),
x.to(dtype=torch.int64) // y.to(dtype=torch.int64),
x.to(dtype=torch.int64) // y,
)
x = torch.arange(-2, 4).reshape(2, 3, 1)
y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4)
self.run_test(FloorDivModule(), (x, y))
def test_floor_div_script(self):
class FloorDivModule(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y):
return x // 3, x // 2.0, x // y
x = torch.arange(-2, 4).reshape(2, 3, 1)
y = torch.randn(2, 3, 4)
self.run_test(FloorDivModule(), (x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_floordiv(self):
class FloordivModule(torch.nn.Module):
def forward(self, x):
return x.new_zeros(x.size(2) // x.size(1))
x = torch.randn(2, 3, 4)
self.run_test(
FloordivModule(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]}
)
self.run_test(FloordivModule(), (x,), remained_onnx_input_idx=[])
def test_div(self):
class DivModule(torch.nn.Module):
def forward(self, x, y):
return x / y, torch.true_divide(x, y)
x = torch.randn(2, 3, 4).to(torch.int)
y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.int)
self.run_test(DivModule(), (x, y))
self.run_test(DivModule(), (x.float(), y.float()))
# Note: div cannot (generally) be exported via scripting
# since its type promotion logic is dependent on knowing the scalar types
# of the input tensors. That is, the ONNX graph is dependent on the
# data type of the inputs. This makes it appropriate for tracing only.
def test_div_promotion_trace(self):
class DivModule(torch.nn.Module):
def forward(self, x, y):
return x / y, torch.true_divide(x, y)
x = torch.randn(2, 3, 4).to(torch.int)
y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.int)
prev_default = torch.get_default_dtype()
torch.set_default_dtype(torch.float)
self.run_test(torch.jit.trace(DivModule(), (x, y)), (x, y))
torch.set_default_dtype(torch.double)
self.run_test(torch.jit.trace(DivModule(), (x, y)), (x, y))
torch.set_default_dtype(prev_default)
# In scripting x, y do not carry shape and dtype info.
# The following test only works when onnx shape inference is enabled.
def test_div_promotion_script(self):
class DivModule(torch.nn.Module):
def forward(self, x, y):
# Add transpose to hide shape/type information
# Otherwise shape and type are still avaiable from input.
x = x.transpose(1, 2)
y = y.transpose(1, 2)
return x / y, torch.true_divide(x, y)
x = torch.randn(2, 3, 4).to(torch.int)
y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.int)
prev_default = torch.get_default_dtype()
# 1. x,y are int, and output is float.
# This can be handled by the default case, where both are cast to float.
# It works even if type of x, y are unknown.
torch.set_default_dtype(torch.float)
self.run_test(torch.jit.script(DivModule()), (x, y))
# 2. x,y are int, and output is double.
# This can be handled by the default case, where both are cast to double.
# It works even if type of x, y are unknown.
torch.set_default_dtype(torch.double)
self.run_test(torch.jit.script(DivModule()), (x, y))
# 3. x is int, y is double, and output is double.
# This can only be handled when both type of x and y are known.
torch.set_default_dtype(prev_default)
x = torch.randn(2, 3, 4).to(torch.int)
y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.double)
self.run_test(torch.jit.script(DivModule()), (x, y))
def test_div_rounding_mode(self):
class TrueDivModule(torch.nn.Module):
def forward(self, x, y):
return (
x.div(y, rounding_mode=None),
torch.div(x, y, rounding_mode=None),
)
class TruncDivModule(torch.nn.Module):
def forward(self, x, y):
return (
x.div(y, rounding_mode="trunc"),
torch.div(x, y, rounding_mode="trunc"),
)
class FloorDivModule(torch.nn.Module):
def forward(self, x, y):
return (
x.div(y, rounding_mode="floor"),
torch.div(x, y, rounding_mode="floor"),
)
modules = [TrueDivModule(), TruncDivModule(), FloorDivModule()]
x = (torch.randn(2, 3, 4) * 100).to(torch.int)
y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.int)
for module in modules:
self.run_test(module, (x, y))
self.run_test(torch.jit.trace(module, (x, y)), (x, y))
self.run_test(torch.jit.script(module), (x, y))
x = torch.randn(2, 3, 4)
y = torch.rand(2, 3, 4) * 10.0 + 0.1
for module in modules:
self.run_test(module, (x, y))
self.run_test(torch.jit.trace(module, (x, y)), (x, y))
self.run_test(torch.jit.script(module), (x, y))
def test_slice_trace(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x[0:1]
x = torch.randn(3)
self.run_test(MyModule(), x)
def test_slice_neg(self):
class NegSlice(torch.nn.Module):
def forward(self, x):
return x[-1:]
x = torch.randn(3, 4, 5)
self.run_test(NegSlice(), x)
def test_slice_neg_large(self):
class NegSlice(torch.nn.Module):
def forward(self, x):
return x[:, :, -3:-1, :, -1]
x = torch.randn(3, 4, 5, 6, 7)
self.run_test(NegSlice(), x)
def test_slice_neg_large_negone(self):
class NegSlice(torch.nn.Module):
def forward(self, x):
return x[:, :, :, :, -1]
x = torch.randn(3, 4, 5, 6, 7)
self.run_test(NegSlice(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_slice_with_input_index(self):
class InputIndexSlice(torch.nn.Module):
def forward(self, x, y):
x[: y.size(0), 0, :] = y
return x
x = torch.zeros((56, 6, 256))
y = torch.rand((22, 256))
self.run_test(InputIndexSlice(), (x, y))
@skipIfUnsupportedMinOpsetVersion(10)
@skipScriptTest() # scripting tuple/list append
def test_slice_dynamic(self):
class DynamicSliceExportMod(torch.nn.Module):
def forward(self, x):
results = []
for i in range(4):
results.append(x[: x.size(0) - i, i : x.size(2), i:3])
return tuple(results)
x = torch.rand(5, 5, 5)
y = torch.randn(6, 7, 8)
self.run_test(
DynamicSliceExportMod(),
x,
test_with_inputs=[y],
input_names=["input_1"],
output_names=["output_1"],
dynamic_axes={"input_1": [0, 1, 2], "output_1": [0, 1, 2]},
)
@skipIfUnsupportedMinOpsetVersion(10)
def test_slice_dynamic_script(self):
class DynamicSliceModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x[1 : x.size(1)]
x = torch.rand(1, 2)
self.run_test(DynamicSliceModel(), x)
@skipIfUnsupportedMinOpsetVersion(10)
def test_slice_dynamic_shape_script(self):
class DynamicSliceModel(torch.nn.Module):
def forward(self, x):
return x.new_zeros(x.shape[1 : x.size(2)])
x = torch.rand(1, 2, 3, 4)
self.run_test(
DynamicSliceModel(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2, 3]}
)
self.run_test(DynamicSliceModel(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(10)
@skipScriptTest() # scripting tuple/list append
def test_slice_dynamic_to_end(self):
class DynamicSliceExportMod(torch.nn.Module):
def forward(self, x):
results = []
for i in range(4):
results.append(x[:, i:, x.size(2) - 5])
return tuple(results)
x = torch.rand(5, 5, 5)
self.run_test(
DynamicSliceExportMod(),
x,
dynamic_axes={"input_1": [0, 1, 2], "output_1": [0, 1, 2]},
)
def test_square(self):
class Square(torch.nn.Module):
def forward(self, x):
return torch.square(x)
x = torch.randn(2, 3, 4)
self.run_test(Square(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_dynamic(self):
class ArangeModel(torch.nn.Module):
def forward(self, input):
return (
torch.arange(input.shape[0]),
torch.arange(12),
torch.arange(start=input.shape[0], end=input.shape[0] + 5),
)
x = torch.randn(5, 3, 2)
y = torch.randn(8, 3, 2)
self.run_test(
ArangeModel(),
x,
test_with_inputs=[y],
input_names=["input_1"],
output_names=["output_1", "output_2", "output_3"],
dynamic_axes={"input_1": [0], "output_1": [0]},
)
self.run_test(
torch.jit.script(ArangeModel()),
x,
test_with_inputs=[y],
input_names=["input_1"],
output_names=["output_1", "output_2", "output_3"],
dynamic_axes={"input_1": [0], "output_1": [0]},
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_dynamic_arange_out(self):
class ArangeOutModel(torch.nn.Module):
def forward(self, end):
out_t = torch.tensor([1], dtype=torch.int64)
return torch.arange(end, out=out_t)
x = torch.tensor(8)
self.run_test(ArangeOutModel(), (x))
@skipIfUnsupportedMinOpsetVersion(9)
def test_dynamic_arange_start_out(self):
class ArangeStartOutModel(torch.nn.Module):
def forward(self, start, end):
out_t = torch.tensor([1], dtype=torch.int64)
return torch.arange(start.size(0), end, out=out_t)
x = torch.randn(2, 3, 4)
y = torch.tensor(8)
self.run_test(
ArangeStartOutModel(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2]},
)
self.run_test(ArangeStartOutModel(), (x, y), remained_onnx_input_idx=[1])
@skipIfUnsupportedMinOpsetVersion(9)
def test_linspace(self):
class LinspaceModel(torch.nn.Module):
def forward(self, start, end, steps):
return torch.linspace(start, end, steps)
x = torch.tensor(3, dtype=torch.float)
y = torch.tensor(10, dtype=torch.float)
z = torch.tensor(5, dtype=torch.int)
self.run_test(LinspaceModel(), (x, y, z))
@skipIfUnsupportedMinOpsetVersion(9)
def test_linspace_negative_start(self):
class LinspaceModel(torch.nn.Module):
def forward(self, start, end, steps):
return torch.linspace(start, end, steps)
x = torch.tensor(-1, dtype=torch.float)
y = torch.tensor(1, dtype=torch.float)
z = torch.tensor(6, dtype=torch.int)
self.run_test(LinspaceModel(), (x, y, z))
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_with_floats_out(self):
class ArangeModelEnd(torch.nn.Module):
def forward(self, end):
out_t = torch.tensor([1], dtype=torch.float)
return torch.arange(end, out=out_t)
y = torch.tensor(8.5, dtype=torch.float)
self.run_test(ArangeModelEnd(), (y))
class ArangeModelStep(torch.nn.Module):
def forward(self, start, end):
out_t = torch.tensor([1], dtype=torch.float)
return torch.arange(start.size(0), end, 1.5, out=out_t)
x = torch.randn(2, 3, 4)
y = torch.tensor(8.5, dtype=torch.float)
self.run_test(
ArangeModelStep(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2]},
)
self.run_test(ArangeModelStep(), (x, y), remained_onnx_input_idx=[1])
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_with_floats(self):
class ArangeModelEnd(torch.nn.Module):
def forward(self, end):
return torch.arange(end)
y = torch.tensor(8.5, dtype=torch.float)
self.run_test(ArangeModelEnd(), (y))
class ArangeModelStep(torch.nn.Module):
def forward(self, start, end):
return torch.arange(start.size(0), end, 1.5)
x = torch.randn(2, 3, 4)
y = torch.tensor(8.5, dtype=torch.float)
self.run_test(
ArangeModelStep(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2]},
)
self.run_test(ArangeModelStep(), (x, y), remained_onnx_input_idx=[1])
class ArangeModelStepNeg(torch.nn.Module):
def forward(self, start, end):
return torch.arange(end, start.size(0), -1.5)
x = torch.randn(2, 3, 4)
y = torch.tensor(8.5, dtype=torch.float)
self.run_test(
ArangeModelStepNeg(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2]},
)
self.run_test(ArangeModelStepNeg(), (x, y), remained_onnx_input_idx=[1])
class ArangeModelStart(torch.nn.Module):
def forward(self, start, end):
return torch.arange(start.size(0), end)
x = torch.randn(2, 3, 4)
y = torch.tensor(8.5, dtype=torch.float)
self.run_test(
ArangeModelStart(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2]},
)
self.run_test(ArangeModelStart(), (x, y), remained_onnx_input_idx=[1])
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_with_floats_override(self):
class ArangeModelEnd(torch.nn.Module):
def forward(self, end):
return torch.arange(end, dtype=torch.int64)
y = torch.tensor(8.5, dtype=torch.float)
self.run_test(ArangeModelEnd(), (y))
class ArangeModelStep(torch.nn.Module):
def forward(self, start, end):
return torch.arange(start.size(0), end, 1.5, dtype=torch.int64)
x = torch.randn(2, 3, 4)
y = torch.tensor(8.5, dtype=torch.float)
self.run_test(
ArangeModelStep(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2]},
)
self.run_test(ArangeModelStep(), (x, y), remained_onnx_input_idx=[1])
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_out(self):
class ArangeOutModel(torch.nn.Module):
def forward(self, end):
out_t = torch.tensor([1], dtype=torch.float)
return torch.arange(end, out=out_t)
x = torch.tensor(8.5, dtype=torch.float)
self.run_test(ArangeOutModel(), (x))
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_start_out(self):
class ArangeStartOutModel(torch.nn.Module):
def forward(self, start, end):
out_t = torch.tensor([1], dtype=torch.float)
return torch.arange(start.size(0), end, out=out_t)
x = torch.randn(2, 3, 4)
y = torch.tensor(8.5, dtype=torch.float)
self.run_test(
ArangeStartOutModel(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2]},
)
self.run_test(ArangeStartOutModel(), (x, y), remained_onnx_input_idx=[1])
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_no_type(self):
class ArangeModel(torch.nn.Module):
def forward(self, end):
return torch.arange(end), torch.arange(0, end)
x = torch.tensor(6.2, dtype=torch.float)
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_size(self):
class SizeModel(torch.nn.Module):
def forward(self, input):
return (
torch.arange(input.size(0)),
torch.arange(input.size(-1)),
torch.ones(input.shape),
)
x = torch.randn(5, 3, 2)
self.run_test(SizeModel(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]})
self.run_test(SizeModel(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
@skipScriptTest() # x.stride() not scriptable
def test_as_strided(self):
class Model(torch.nn.Module):
def forward(self, x):
chunk_size = list(x.size())
chunk_size[1] = chunk_size[1] * 2 - 1
chunk_stride = list(x.stride())
chunk_stride[1] = chunk_stride[1] // 2
return x.as_strided(
(3, 3, 3), (1, 4, 2), storage_offset=2
), x.as_strided(chunk_size, chunk_stride)
x = torch.randn(5, 8, 7)
self.run_test(Model(), x)
@skipScriptTest() # Ellipses followed by tensor indexing not scriptable
def test_tensor_index_advanced_indexing_ellipsis(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[..., torch.tensor([2, 1]), torch.tensor([0, 3])]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), (m1,))
def test_tensor_index_advanced_indexing(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[
:,
torch.tensor([[0, 2], [1, 1]]),
:,
torch.tensor([2, 1]),
torch.tensor([0, 3]),
]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), (m1,))
class MyModel(torch.nn.Module):
def forward(self, input):
return input[
:, torch.tensor([0, 2]), None, 2:4, torch.tensor([[1, 3], [4, 0]])
]
self.run_test(MyModel(), (m1,))
class MyModel(torch.nn.Module):
def forward(self, input):
return input[
:,
torch.tensor([0, 2]),
torch.tensor([1]),
2:4,
torch.tensor([[1], [4]]),
]
self.run_test(MyModel(), (m1,))
def test_tensor_index_advanced_indexing_consecutive(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[
:, torch.tensor([0, 2]), torch.tensor([[1, 3], [4, 0]]), None
]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), (m1,))
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put(self):
class IndexPutModel(torch.nn.Module):
def forward(self, x, ind, update):
x[ind] = update
return x
x = torch.randn(3, 4)
ind = torch.tensor([1], dtype=torch.long)
update = torch.ones(4)
self.run_test(IndexPutModel(), (x, ind, update))
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_singular(self):
class IndexPutBoolModel(torch.nn.Module):
def forward(self, mask, indices):
mask[indices] = True
return mask
mask = torch.zeros(100, dtype=torch.bool)
indices = (torch.rand(25) * mask.shape[0]).to(torch.int64)
self.run_test(IndexPutBoolModel(), (mask, indices))
class IndexPutFloatModel(torch.nn.Module):
def forward(self, mask, indices):
mask[indices] = torch.tensor(5.5)
return mask
mask = torch.rand(100, dtype=torch.float)
indices = (torch.rand(50) * mask.shape[0]).to(torch.int64)
self.run_test(IndexPutFloatModel(), (mask, indices))
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_accumulate(self):
class IndexPutModel(torch.nn.Module):
def forward(self, x, ind, update):
return x.index_put((ind,), update, accumulate=True)
x = torch.randn(3, 4)
ind = torch.tensor([2], dtype=torch.long)
update = torch.ones(4)
self.run_test(IndexPutModel(), (x, ind, update))
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_slice_index(self):
class IndexPutModel(torch.nn.Module):
def forward(self, x, update):
x[1:2, 1:3, torch.tensor([1])] += update
return x
x = torch.randn(3, 4, 5)
update = torch.tensor([10, 15]).view(1, 2, 1)
self.run_test(IndexPutModel(), (x, update))
class IndexPutModel2(torch.nn.Module):
def forward(self, x, update):
x[torch.tensor([0, 2]), torch.tensor([1, 2])] += update
return x
x = torch.randn(3, 4, 5)
update = torch.randn(2, 5)
self.run_test(IndexPutModel2(), (x, update))
class IndexPutModel3(torch.nn.Module):
def forward(self, x, update):
x[torch.tensor([0, 2]), 1:2] += update
return x
x = torch.randn(3, 4, 5)
update = torch.tensor([10, 15]).view(2, 1, 1)
self.run_test(IndexPutModel3(), (x, update))
class IndexPutModel4(torch.nn.Module):
def forward(self, x, update):
x[torch.tensor([0, 2]), 2] += update
return x
x = torch.randn(3, 4, 5)
update = torch.tensor([10, 15]).view(2, 1)
self.run_test(IndexPutModel4(), (x, update))
class IndexPutModel5(torch.nn.Module):
def forward(self, x, update):
x[1:3, torch.tensor([0, 2]), 2] += update
return x
x = torch.randn(3, 4, 5)
update = torch.tensor([10, 15]).view(2, 1)
self.run_test(IndexPutModel5(), (x, update))
class IndexPutModel6(torch.nn.Module):
def forward(self, x, update):
x[1:3, 0] = update
return x
x = torch.randn(3, 4, 5)
update = torch.arange(2 * 5).to(torch.float).view(2, 5)
self.run_test(IndexPutModel6(), (x, update))
class IndexPutModel7(torch.nn.Module):
def forward(self, x, update):
x[1:, 0] = update
return x
x = torch.randn(3, 4, 5)
update = torch.arange(2 * 5).to(torch.float).view(2, 5)
self.run_test(IndexPutModel7(), (x, update))
class IndexPutModel8(torch.nn.Module):
def forward(self, x, update):
x[:3, 0] = update
return x
x = torch.randn(3, 4, 5)
update = torch.arange(3 * 5).to(torch.float).view(3, 5)
self.run_test(IndexPutModel8(), (x, update))
class IndexPutModel9(torch.nn.Module):
def forward(self, poses):
w = 32
x = poses[:, :, 0] - (w - 1) // 2
boxes = torch.zeros([poses.shape[0], 17, 4])
boxes[:, :, 0] = x
return boxes
x = torch.zeros([2, 17, 3], dtype=torch.int64)
self.run_test(IndexPutModel9(), (x,))
class IndexPutModel10(torch.nn.Module):
def forward(self, x, ind, update):
x[ind, 1:3] = update.view(1, 1, 1, 5).expand(2, 2, 2, 5)
return x
x = torch.randn(3, 4, 5)
ind = torch.tensor([[0, 2], [1, 1]])
update = torch.randn(5)
self.run_test(IndexPutModel10(), (x, ind, update))
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest() # Ellipses followed by tensor indexing not scriptable
def test_index_put_ellipsis(self):
class IndexPutModel(torch.nn.Module):
def forward(self, x, update):
x[..., torch.tensor([2, 1, 3]), 2:4] += update
return x
x = torch.randn(3, 4, 5, 6, 7)
update = torch.randn(3, 1, 1, 3, 2)
self.run_test(IndexPutModel(), (x, update))
class IndexPutModel2(torch.nn.Module):
def forward(self, x, update):
x[2, ..., torch.tensor([2, 1, 3]), 2:4] += update
return x
x = torch.randn(3, 4, 5, 6, 7)
update = torch.randn(4, 1, 3, 2)
self.run_test(IndexPutModel2(), (x, update))
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_loop(self):
@torch.jit.script
def ngram_attention_bias(
sequence_length: int, ngram: int, device: torch.device, dtype: torch.dtype
):
bias = torch.ones(
(ngram, sequence_length), device=device, dtype=dtype
) * float("-inf")
for stream_idx in range(ngram):
for i in range(sequence_length):
bias = bias * 2
bias[stream_idx, i] = 5
bias = bias * 5
bias[0, 0] = 5
for stream_idx in range(ngram):
for i in range(sequence_length):
bias[stream_idx, i] = 5
bias[0, i] = 5
return bias
class ScriptModel(torch.nn.Module):
def __init__(self):
super(ScriptModel, self).__init__()
self.ngram = 2
self.max_target_positions = 512
def forward(self, hidden_states):
seq_length, batch_size = hidden_states.shape[:2]
predict_causal_mask = ngram_attention_bias(
self.max_target_positions,
self.ngram,
hidden_states.device,
hidden_states.dtype,
)
predict_causal_mask = predict_causal_mask[:, :seq_length]
return predict_causal_mask
x = torch.randn(6, 2)
y = torch.randn(4, 1)
self.run_test(
ScriptModel(),
x,
input_names=["x"],
dynamic_axes={"x": {0: "seq_length", 1: "batch_size"}},
test_with_inputs=[y],
)
@skipIfUnsupportedMinOpsetVersion(11)
def test_copy_(self):
class CopyModel(torch.nn.Module):
def forward(self, x, data):
x[1:3] = data
return x
x = torch.randn(3, 4)
update = torch.randn(2, 4)
self.run_test(CopyModel(), (x, update))
# mixed slice and select
class CopyModel2(torch.nn.Module):
def forward(self, x, data):
x[1:3, 0] = data
return x
x = torch.randn(3, 4)
update = torch.tensor([0], dtype=torch.float32)
self.run_test(CopyModel2(), (x, update))
update = torch.tensor([2, 3], dtype=torch.float32)
self.run_test(CopyModel2(), (x, update))
update = torch.randn(2)
self.run_test(CopyModel2(), (x, update))
class CopyModel3(torch.nn.Module):
def forward(self, x, data):
x[1, 1:3] = data
return x
x = torch.randn(3, 4)
update = torch.tensor([0], dtype=torch.float32)
self.run_test(CopyModel3(), (x, update))
update = torch.tensor([2, 3], dtype=torch.float32)
self.run_test(CopyModel3(), (x, update))
update = torch.randn(2)
self.run_test(CopyModel3(), (x, update))
class CopyModel4(torch.nn.Module):
def forward(self, x, ind, data):
x[ind] = data
return x
x = torch.randn(3, 4)
ind = torch.tensor(2)
data = torch.randn(4)
self.run_test(CopyModel4(), (x, ind, data))
class CopyModel5(torch.nn.Module):
def forward(self, x, mask):
if mask is not None:
x.copy_(mask)
return x
x = torch.randn(3, 4)
mask = torch.randn(3, 1)
self.run_test(CopyModel5(), (x, mask))
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest() # Model not scriptable (output with shape doesn't match the broadcast shape)
def test_copy_tracing(self):
class CopyModel(torch.nn.Module):
def forward(self, x, data):
x[1, 1:3] = data
return x
x = torch.randn(3, 4)
update = torch.randn(1, 2)
self.run_test(CopyModel(), (x, update))
@skipIfUnsupportedMinOpsetVersion(11)
def test_copy_ellipsis(self):
class CopyModel(torch.nn.Module):
def forward(self, x, update):
x[..., 1] = update
return x
x = torch.randn(2, 3, 4)
update = torch.ones(1)
self.run_test(CopyModel(), (x, update))
x = torch.randn(2, 3, 4, 5, 6)
update = torch.ones(1)
self.run_test(CopyModel(), (x, update))
@skipIfUnsupportedMinOpsetVersion(11)
def test_copy_ellipsis_script(self):
class CopyModel(torch.nn.Module):
def forward(self, x, update):
# Insert reshape node to ensure no shape/type info for
# x in scripting, without onnx shape inference.
x = x.reshape(4, 3, 5, 6)
x[2, ..., 1:3] = update
return x
x = torch.randn(3, 4, 5, 6)
update = torch.ones(1)
self.run_test(CopyModel(), (x, update))
@skipIfUnsupportedMinOpsetVersion(10)
def test_flip(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return torch.flip(x, dims=[0])
x = torch.tensor(np.arange(6.0).reshape(2, 3))
self.run_test(MyModule(), x)
def test_random(self):
class RandN(torch.nn.Module):
def forward(self, x):
return torch.mul(x, (torch.randn(2, 3, 4) + x).size(0))
x = torch.randn(2, 3, 4)
self.run_test(RandN(), x)
class Rand(torch.nn.Module):
def forward(self, x):
return torch.mul(x, (torch.rand(2, 3, 4) + x).size(0))
x = torch.randn(2, 3, 4)
self.run_test(Rand(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_random_dynamic_size(self):
class RandN(torch.nn.Module):
def forward(self, x):
return torch.mul(x, torch.randn(x.size()).size(1))
x = torch.randn(2, 3, 4)
self.run_test(RandN(), x)
class Rand(torch.nn.Module):
def forward(self, x):
return torch.mul(x, torch.rand(x.size()).size(1))
x = torch.randn(2, 3, 4)
self.run_test(Rand(), x)
def test_random_like(self):
class RandNLike(torch.nn.Module):
def forward(self, x):
return torch.mul(x, torch.randn_like(x).size(0))
x = torch.randn(2, 3, 4)
self.run_test(RandNLike(), x)
self.run_test(torch.jit.script(RandNLike()), x)
class RandLike(torch.nn.Module):
def forward(self, x):
return torch.mul(x, torch.rand_like(x).size(0))
x = torch.randn(2, 3, 4)
self.run_test(RandLike(), x)
self.run_test(torch.jit.script(RandLike()), x)
def test_random_like_dtype(self):
class RandNLike(torch.nn.Module):
def forward(self, x):
return torch.mul(
x.to(torch.double), torch.randn_like(x, dtype=torch.double).size(0)
)
x = torch.randn(2, 3, 4)
self.run_test(RandNLike(), x)
class RandLike(torch.nn.Module):
def forward(self, x):
return torch.mul(
x.to(torch.double), torch.rand_like(x, dtype=torch.double).size(0)
)
x = torch.randn(2, 3, 4)
self.run_test(RandLike(), x)
def test_bernoulli(self):
class Bernoulli(torch.nn.Module):
def forward(self, x):
return torch.mul(x, torch.bernoulli(x).size(0))
x = torch.empty(3, 3).uniform_(0, 1)
self.run_test(Bernoulli(), x)
x = torch.empty(2, 3, 3, dtype=torch.double).uniform_(0, 1)
self.run_test(Bernoulli(), x)
@unittest.skip("Bug in ORT, skip test until rel-1.11.")
@skipIfUnsupportedMinOpsetVersion(14)
def test_reshape_allowzero(self):
class ReshapeModel(torch.nn.Module):
def forward(self, x):
x = x.reshape(3, 4, 0)
return x
x = torch.randn(0, 3, 4)
self.run_test(ReshapeModel(), x)
def test_reshape_different_rank(self):
class ReshapeModel(torch.nn.Module):
def forward(self, x):
x = x.reshape(-1, 2, 4, 4, 5, 5)
return x
x = torch.randn(1, 32, 5, 5)
self.run_test(ReshapeModel(), x)
def _interpolate(self, x, mode, use_size, is_upsample, align_corners=False):
class MyModel(torch.nn.Module):
__constants__ = [
"mode",
"use_size",
"is_upsample",
"size",
"scale",
"size_array",
"scale_array",
"align_corners",
]
def __init__(self, mode, use_size, is_upsample, align_corners):
super(MyModel, self).__init__()
self.mode = mode
self.use_size = use_size
self.is_upsample = is_upsample
self.align_corners = align_corners
self.scale = 2.0 if self.is_upsample else 0.5
self.size = 24 if self.is_upsample else 2
if x.dim() == 3:
self.scale_array = [2.3]
self.size_array = [16]
elif x.dim() == 4:
self.scale_array = [2.3, 3.1]
self.size_array = [16, 32]
else:
self.scale_array = [2.3, 3.1, 4.6]
self.size_array = [16, 32, 64]
def forward(self, x):
if self.use_size:
if self.align_corners:
return torch.nn.functional.interpolate(
x, mode=self.mode, size=self.size, align_corners=True
), torch.nn.functional.interpolate(
x, mode=self.mode, size=self.size_array, align_corners=True
)
return torch.nn.functional.interpolate(
x, mode=self.mode, size=self.size
), torch.nn.functional.interpolate(
x, mode=self.mode, size=self.size_array
)
if self.align_corners:
return torch.nn.functional.interpolate(
x,
mode=self.mode,
scale_factor=self.scale,
recompute_scale_factor=False,
), torch.nn.functional.interpolate(
x,
mode=self.mode,
scale_factor=self.scale_array,
recompute_scale_factor=False,
)
return torch.nn.functional.interpolate(
x,
mode=self.mode,
scale_factor=self.scale,
recompute_scale_factor=False,
), torch.nn.functional.interpolate(
x,
mode=self.mode,
scale_factor=self.scale_array,
recompute_scale_factor=False,
)
model = MyModel(mode, use_size, is_upsample, align_corners)
self.run_test(model, x, atol=1e-6)
def _interpolate_tests(self, is_upsample):
# - cubic mode is not supported for opsets below 11;
# - linear mode does not match for opsets below 11;
modes = ["nearest", "linear", "bicubic"]
if self.opset_version < 11:
modes = ["nearest"]
x = [
torch.randn(1, 2, 6, requires_grad=True),
torch.randn(1, 2, 4, 6, requires_grad=True),
torch.randn(1, 2, 4, 4, 6, requires_grad=True),
]
for mode in modes:
for xi in x:
mode_i = mode
# TODO: enable bicubic downsample when ORT precision loss fixed
if mode == "bicubic" and xi.dim() != 4:
continue
elif mode == "linear":
if xi.dim() == 3:
# TODO : enable when linear mode is implemented for 1d inputs in ORT
continue
elif xi.dim() == 4:
mode_i = "bilinear"
elif xi.dim() == 5:
# TODO : enable when linear mode is implemented for 3d inputs in ORT
mode_i = "trilinear"
continue
self._interpolate(xi, mode_i, True, is_upsample)
# test with align_corners if supported
if mode != "nearest":
self._interpolate(xi, mode_i, True, is_upsample, True)
# the following cases, require dynamic sizes/scales,
# which which is not supported for opset_version < 9
if self.opset_version >= 9:
self._interpolate(xi, mode_i, True, is_upsample)
# test with align_corners if supported
if mode != "nearest":
self._interpolate(xi, mode_i, False, is_upsample, True)
self._interpolate(xi, mode_i, False, is_upsample)
# ONNX export failed on interpolate scripting because dynamic size not supported for opsets below 9.
@skipIfUnsupportedMinOpsetVersion(9)
def test_interpolate_upsample(self):
self._interpolate_tests(True)
@skipIfUnsupportedMaxOpsetVersion(8)
@skipScriptTest() # Scripting supported for opsets > 8. See test_interpolate_upsample
def test_interpolate_upsample_trace(self):
self._interpolate_tests(True)
@skipIfUnsupportedMinOpsetVersion(9)
def test_interpolate_function_substitution(self):
class ScriptModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.nn.functional.interpolate(
x, mode="nearest", scale_factor=2.0
)
class ScriptModule(torch.jit.ScriptModule):
def __init__(self):
super(ScriptModule, self).__init__()
self.submodule = ScriptModel()
@torch.jit.script_method
def forward(self, input):
return self.submodule(input)
x = torch.randn(1, 2, 4, 4, 6)
self.run_test(ScriptModule(), (x,))
@torch.jit.script
def script_method(x):
return torch.nn.functional.interpolate(x, mode="nearest", scale_factor=2.0)
class TracingModule(torch.nn.Module):
def forward(self, x):
return script_method(x)
self.run_test(TracingModule(), (x,))
@skipIfUnsupportedMinOpsetVersion(10)
def test_interpolate_downsample(self):
self._interpolate_tests(False)
@skipIfUnsupportedMinOpsetVersion(11)
def test_interpolate_no_shape(self):
class MyModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y):
x = torch.add(x, x)
out1 = torch.nn.functional.interpolate(
x, mode="bilinear", size=(16, 16), align_corners=False
)
out2 = torch.nn.functional.interpolate(
x, mode="nearest", size=(int(y.size(0)), int(y.size(1)))
)
return out1, out2
x = torch.randn(1, 2, 4, 4, requires_grad=True)
y = torch.randn(16, 16, requires_grad=True)
self.run_test(
MyModel(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2, 3], "y": [0, 1]},
)
self.run_test(MyModel(), (x, y), remained_onnx_input_idx=[0])
@skipScriptTest() # scripting raises OnnxRuntimeError
def test_interpolate_adaptive_pooling_error(self):
x = torch.randn(1, 2, 6, requires_grad=True)
with self.assertRaises(RuntimeError) as cm:
self._interpolate(x, "area", True, True)
with self.assertRaises(RuntimeError) as cm:
self._interpolate(x, "area", False, True)
def test_groupnorm(self):
model = torch.nn.GroupNorm(3, 6, 0.002)
x = torch.randn(4, 6, 180, 180, 180)
self.run_test(model, x)
model = torch.nn.GroupNorm(1, 6, 0.002)
x = torch.randn(4, 6, 180, 180)
self.run_test(model, x)
model = torch.nn.GroupNorm(6, 6, 0.002)
x = torch.randn(4, 6, 180, 180)
self.run_test(model, x)
def test_groupnorm_noaffine(self):
model = torch.nn.GroupNorm(4, 8, 0.002, affine=False)
x = torch.randn(3, 8, 224, 224)
self.run_test(model, x)
model = torch.nn.GroupNorm(1, 6, 0.002, affine=False)
x = torch.randn(4, 6, 180, 180)
self.run_test(model, x)
model = torch.nn.GroupNorm(6, 6, 0.002, affine=False)
x = torch.randn(4, 6, 180, 180)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_listunpack(self):
class ListUnpack(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
a, b = x.shape
return x.new_zeros((a, b))
x = torch.randn(2, 3)
self.run_test(ListUnpack(), x, input_names=["x"], dynamic_axes={"x": [0, 1]})
self.run_test(ListUnpack(), x, remained_onnx_input_idx=[])
class ListUnpackSlice(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
a, b = x.shape[2:]
return x.new_zeros((a, b))
x = torch.randn(2, 3, 4, 5)
self.run_test(
ListUnpackSlice(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2, 3]}
)
self.run_test(ListUnpackSlice(), x, remained_onnx_input_idx=[])
def test_pow(self):
class PowModule(torch.nn.Module):
def forward(self, x, y):
return x.pow(y)
x = torch.randn(2, 3, 4)
y = torch.randn(2, 3, 4)
self.run_test(PowModule(), (x, y))
x = torch.randint(10, (2, 3, 4))
y = torch.randint(10, (2, 3, 4)).to(dtype=torch.int32)
self.run_test(PowModule(), (x, y))
x = torch.randint(10, (2, 3, 4))
y = torch.randint(10, (2, 3, 4))
self.run_test(PowModule(), (x, y))
x = torch.randn(2, 3, 4).to(dtype=torch.float64)
y = torch.randint(10, (2, 3, 4))
self.run_test(PowModule(), (x, y))
class PowModule2(torch.nn.Module):
def forward(self, x):
return torch.pow(2, x)
x = torch.randn(1, 10)
self.run_test(PowModule2(), (x,))
x = torch.randint(10, (2, 3, 4))
self.run_test(PowModule2(), (x,))
x = torch.randn(1, 10).to(dtype=torch.float64)
self.run_test(PowModule2(), (x,))
class PowModule3(torch.nn.Module):
def forward(self, x, y):
return y[torch.pow(2, x)]
x = torch.randint(5, (2, 3, 4))
y = torch.rand(100)
self.run_test(PowModule3(), (x, y))
# the arithmeticOps(Add\Sub\Mul\Div\Gemm\Pow\Mod) with low precision include unit8 will be failed in ORT
# add to(dtype=torch.long) to avoid ORT output type does not match expected type.
# will be fixed in ONNX version 14.
@skipIfUnsupportedMaxOpsetVersion(13)
def test_arithmeticOps_with_low_precision(self):
class AddModule(torch.nn.Module):
def forward(self, x, y):
return x + y
class SubModule(torch.nn.Module):
def forward(self, x, y):
return x - y
class MulModule(torch.nn.Module):
def forward(self, x, y):
return x * y
class DivModule(torch.nn.Module):
def forward(self, x, y):
return x / y
class PowModule(torch.nn.Module):
def forward(self, x, y):
return x.pow(y)
x = torch.tensor([2, 3, 5], dtype=torch.uint8)
y = torch.tensor([2, 3, 5], dtype=torch.uint8)
z = torch.tensor([1], dtype=torch.uint8)
self.run_test(AddModule(), (x, y))
self.run_test(SubModule(), (x, y))
self.run_test(MulModule(), (x, y))
self.run_test(DivModule(), (x, y))
self.run_test(PowModule(), (x, z))
x = torch.tensor([2, 3, 5], dtype=torch.int8)
y = torch.tensor([2, 3, 5], dtype=torch.int8)
z = torch.tensor([1], dtype=torch.int8)
self.run_test(AddModule(), (x, y))
self.run_test(SubModule(), (x, y))
self.run_test(MulModule(), (x, y))
self.run_test(DivModule(), (x, y))
self.run_test(PowModule(), (x, z))
x = torch.tensor([2, 3, 5], dtype=torch.int16)
y = torch.tensor([2, 3, 5], dtype=torch.int16)
z = torch.tensor([1], dtype=torch.int16)
self.run_test(AddModule(), (x, y))
self.run_test(SubModule(), (x, y))
self.run_test(MulModule(), (x, y))
self.run_test(DivModule(), (x, y))
self.run_test(PowModule(), (x, z))
x = torch.tensor([2, 3, 5], dtype=torch.uint8)
y = torch.tensor([2, 3, 5], dtype=torch.float32)
z = torch.tensor([1], dtype=torch.float64)
self.run_test(AddModule(), (x, y))
self.run_test(SubModule(), (x, y))
self.run_test(MulModule(), (x, y))
self.run_test(DivModule(), (x, y))
self.run_test(PowModule(), (x, z))
x = torch.tensor([2, 3, 5], dtype=torch.uint8)
y = torch.tensor([2, 3, 5], dtype=torch.int64)
z = torch.tensor([1], dtype=torch.int32)
self.run_test(AddModule(), (x, y))
self.run_test(SubModule(), (x, y))
self.run_test(MulModule(), (x, y))
self.run_test(DivModule(), (x, y))
self.run_test(PowModule(), (x, z))
# fmod was added in version 10
@skipIfUnsupportedMinOpsetVersion(10)
@skipIfUnsupportedMaxOpsetVersion(13)
def test_mod_with_low_precision(self):
class ModModule(torch.nn.Module):
def forward(self, x, y):
return torch.fmod(x, y).to(dtype=torch.long)
x = torch.tensor([2, 3, 5], dtype=torch.uint8)
y = torch.tensor([2, 3, 5], dtype=torch.uint8)
self.run_test(ModModule(), (x, y))
x = torch.tensor([2, 3, 5], dtype=torch.int8)
y = torch.tensor([2, 3, 5], dtype=torch.int8)
self.run_test(ModModule(), (x, y))
x = torch.tensor([2, 3, 5], dtype=torch.int16)
y = torch.tensor([2, 3, 5], dtype=torch.int16)
self.run_test(ModModule(), (x, y))
x = torch.tensor([2, 3, 5], dtype=torch.uint8)
y = torch.tensor([2, 3, 5], dtype=torch.int32)
self.run_test(ModModule(), (x, y))
x = torch.tensor([2, 3, 5], dtype=torch.uint8)
y = torch.tensor([2, 3, 5], dtype=torch.float64)
self.run_test(ModModule(), (x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_empty_constant_shape(self):
class Zeros(torch.nn.Module):
def forward(self, x):
y = torch.zeros(())
y += x
return y
x = torch.tensor(42.0)
self.run_test(Zeros(), x)
class Ones(torch.nn.Module):
def forward(self, x):
y = torch.ones(())
y += x
return y
x = torch.tensor(42.0)
self.run_test(Ones(), x)
class Full(torch.nn.Module):
def forward(self, x):
y = torch.full((), 1.0)
y += x
return y
x = torch.tensor(42.0)
self.run_test(Full(), x)
class Empty(torch.nn.Module):
def forward(self, x):
y = torch.empty(()).fill_(0)
y += x
return y
x = torch.tensor(42.0)
self.run_test(Empty(), x)
def test_std(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.std(input, unbiased=False)
x = torch.randn(2, 3, 4)
model = StandardDeviation()
self.run_test(model, x)
class StandardDeviationUnbiased(torch.nn.Module):
def forward(self, input):
return torch.std(input, unbiased=True)
model = StandardDeviationUnbiased()
self.run_test(model, x)
def test_std_along_dims(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.std(input, dim=(0, 1), unbiased=False)
x = torch.randn(2, 3, 4)
model = StandardDeviation()
self.run_test(model, x)
class StandardDeviationUnbiased(torch.nn.Module):
def forward(self, input):
return torch.std(input, dim=(0, 1), unbiased=True)
x = torch.randn(2, 3, 4)
model = StandardDeviationUnbiased()
self.run_test(model, x)
def test_std_keepdim(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.std(input, dim=(0, 1), unbiased=False, keepdim=True)
x = torch.randn(2, 3, 4)
model = StandardDeviation()
self.run_test(model, x)
class StandardDeviationUnbiased(torch.nn.Module):
def forward(self, input):
return torch.std(input, dim=(0, 1), unbiased=True, keepdim=True)
x = torch.randn(2, 3, 4)
model = StandardDeviationUnbiased()
self.run_test(model, x)
def test_std_correction(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.std(input, dim=(0, 1), correction=3, keepdim=True)
x = torch.randn(2, 3, 4)
model = StandardDeviation()
self.run_test(model, x)
def test_var(self):
class Variance(torch.nn.Module):
def forward(self, input):
return torch.var(input, unbiased=False)
x = torch.randn(2, 3, 4)
model = Variance()
self.run_test(model, x)
class VarianceUnbiased(torch.nn.Module):
def forward(self, input):
return torch.var(input, unbiased=True)
model = VarianceUnbiased()
self.run_test(model, x)
class VarianceSqrt(torch.nn.Module):
def forward(self, input):
y = torch.var(input, 1)
return torch.sqrt(y + 1e-8)
x = torch.randn(1, 2, 3, 300, 300)
model = VarianceSqrt()
self.run_test(model, x)
def test_var_along_dims(self):
class Variance(torch.nn.Module):
def forward(self, input):
return torch.var(input, dim=(0, 1), unbiased=False)
x = torch.randn(2, 3, 4)
model = Variance()
self.run_test(model, x)
class VarianceUnbiased(torch.nn.Module):
def forward(self, input):
return torch.var(input, dim=(0, 1), unbiased=True)
x = torch.randn(2, 3, 4)
model = VarianceUnbiased()
self.run_test(model, x)
def test_var_keepdim(self):
class Variance(torch.nn.Module):
def forward(self, input):
return torch.var(input, dim=(0, 1), unbiased=False, keepdim=True)
x = torch.randn(2, 3, 4)
model = Variance()
self.run_test(model, x)
class VarianceUnbiased(torch.nn.Module):
def forward(self, input):
return torch.var(input, dim=(0, 1), unbiased=True, keepdim=True)
x = torch.randn(2, 3, 4)
model = VarianceUnbiased()
self.run_test(model, x)
def test_var_correction(self):
class Variance(torch.nn.Module):
def forward(self, input):
return torch.var(input, dim=(0, 1), correction=3, keepdim=True)
x = torch.randn(2, 3, 4)
model = Variance()
self.run_test(model, x)
def test_var_mean(self):
class Variance(torch.nn.Module):
def forward(self, input):
return torch.var_mean(input, unbiased=False)
x = torch.randn(2, 3, 4)
model = Variance()
self.run_test(model, x)
class VarianceUnbiased(torch.nn.Module):
def forward(self, input):
return torch.var_mean(input, unbiased=True)
model = VarianceUnbiased()
self.run_test(model, x)
def test_var_mean_along_dims(self):
class Variance(torch.nn.Module):
def forward(self, input):
return torch.var_mean(input, dim=(0, 1), unbiased=False)
x = torch.randn(2, 3, 4)
model = Variance()
self.run_test(model, x)
class VarianceUnbiased(torch.nn.Module):
def forward(self, input):
return torch.var_mean(input, dim=(0, 1), unbiased=True)
x = torch.randn(2, 3, 4)
model = VarianceUnbiased()
self.run_test(model, x)
def test_var_mean_mixed_dims(self):
class ReverseDims(torch.nn.Module):
def forward(self, input):
return torch.var_mean(input, dim=(2, 1), unbiased=False)
x = torch.randn(2, 3, 4)
model = ReverseDims()
self.run_test(model, x)
class SkipDims(torch.nn.Module):
def forward(self, input):
return torch.var_mean(input, dim=(0, 2), unbiased=False)
x = torch.randn(2, 3, 4)
model = SkipDims()
self.run_test(model, x)
class NonZeroDims(torch.nn.Module):
def forward(self, input):
return torch.var_mean(input, dim=(1, 2), unbiased=False)
x = torch.randn(2, 3, 4)
model = NonZeroDims()
self.run_test(model, x)
def test_var_mean_keepdim(self):
class Variance(torch.nn.Module):
def forward(self, input):
return torch.var_mean(input, dim=(0, 1), unbiased=False, keepdim=True)
x = torch.randn(2, 3, 4)
model = Variance()
self.run_test(model, x)
class VarianceUnbiased(torch.nn.Module):
def forward(self, input):
return torch.var_mean(input, dim=(0, 1), unbiased=True, keepdim=True)
x = torch.randn(2, 3, 4)
model = VarianceUnbiased()
self.run_test(model, x)
def test_var_mean_correction(self):
class Variance(torch.nn.Module):
def forward(self, input):
return torch.var_mean(input, dim=(0, 1), correction=3, keepdim=True)
x = torch.randn(2, 3, 4)
model = Variance()
self.run_test(model, x)
def test_std_mean(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.std_mean(input, unbiased=False)
x = torch.randn(2, 3, 4)
model = StandardDeviation()
self.run_test(model, x)
class StandardDeviationUnbiased(torch.nn.Module):
def forward(self, input):
return torch.std_mean(input, unbiased=True)
model = StandardDeviationUnbiased()
self.run_test(model, x)
def test_std_mean_along_dims(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.std_mean(input, dim=(0, 1), unbiased=False)
x = torch.randn(2, 3, 4)
model = StandardDeviation()
self.run_test(model, x)
class VarianceUnbiased(torch.nn.Module):
def forward(self, input):
return torch.std_mean(input, dim=(0, 1), unbiased=True)
x = torch.randn(2, 3, 4)
model = VarianceUnbiased()
self.run_test(model, x)
def test_std_mean_keepdim(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.std_mean(input, dim=(0, 1), unbiased=False, keepdim=True)
x = torch.randn(2, 3, 4)
model = StandardDeviation()
self.run_test(model, x)
class StandardDeviationUnbiased(torch.nn.Module):
def forward(self, input):
return torch.std_mean(input, dim=(0, 1), unbiased=True, keepdim=True)
x = torch.randn(2, 3, 4)
model = StandardDeviationUnbiased()
self.run_test(model, x)
def test_std_mean_correction(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.var_mean(input, dim=(0, 1), correction=3, keepdim=True)
x = torch.randn(2, 3, 4)
model = StandardDeviation()
self.run_test(model, x)
def test_bitshift(self):
class BitshiftModel(torch.nn.Module):
def forward(self, input):
return (
input >> 1,
input << 3,
input >> torch.tensor([1, 2]),
input << 4,
)
input = torch.arange(24, dtype=torch.int64).reshape(3, 4, 2)
self.run_test(BitshiftModel(), input)
def test_bitshift_other_fp(self):
class BitshiftModel(torch.nn.Module):
def forward(self, input):
return input << 2.4
input = torch.arange(24, dtype=torch.int64).reshape(3, 4, 2)
self.run_test(BitshiftModel(), input)
# uint8 not implemented in ORT for Mul used in
# exporting bitshift for opset_version < 10
@skipIfUnsupportedMinOpsetVersion(11)
def test_bitshift_uint8(self):
class BitshiftModel(torch.nn.Module):
def forward(self, input, input2):
return (
input >> 1,
input << 3.0,
input2 >> torch.tensor([1, 2], dtype=torch.uint8),
input2 << 4.0,
)
input = torch.arange(24, dtype=torch.uint8).reshape(3, 4, 2)
input2 = torch.arange(24, dtype=torch.uint8).reshape(3, 4, 2)
self.run_test(BitshiftModel(), (input, input2))
def test_narrow(self):
class NarrowModel(torch.nn.Module):
def forward(self, input):
return torch.narrow(input, 0, 0, 2)
x = torch.randn(3, 3, requires_grad=True)
self.run_test(NarrowModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_narrow_dynamic(self):
class NarrowModel(torch.nn.Module):
def forward(self, input):
return torch.narrow(input, 0, 0, input.shape[0] - 1)
x = torch.randn(3, 3, requires_grad=True)
self.run_test(NarrowModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_fill(self):
class IndexFillModel(torch.nn.Module):
def forward(self, input):
index = torch.tensor([2, 0])
return input.index_fill(2, index, -1)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(IndexFillModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_copy(self):
class IndexCopyModel(torch.nn.Module):
def forward(self, input):
index = torch.tensor([2, 0])
source = torch.ones(3, 2, 5)
return input.index_copy(1, index, source)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(IndexCopyModel(), x)
def test_select(self):
class Select(torch.nn.Module):
def forward(self, x):
return x[:, 1]
x = torch.randn(3, 4)
self.run_test(Select(), x)
def test_select_negative_index(self):
class Select(torch.nn.Module):
def forward(self, x):
return x[:, -1]
x = torch.randn(3, 4)
self.run_test(Select(), x)
def test_index_select_constant_scaler_index(self):
class IndexSelectScalerIndexModel(torch.nn.Module):
def forward(self, x):
index = 2
return torch.index_select(x, 1, torch.tensor(index))
x = torch.randn(3, 4)
self.run_test(IndexSelectScalerIndexModel(), x)
def test_index_select_scaler_index(self):
class IndexSelectScalerIndexModel(torch.nn.Module):
def __init__(self, index_base):
super(IndexSelectScalerIndexModel, self).__init__()
self.index_base = torch.tensor(index_base)
def forward(self, x, index_offset):
index = self.index_base + index_offset
return torch.index_select(x, 1, index)
x = torch.randn(3, 4)
offset = 2
index_offset = torch.tensor(offset)
base = 1
self.run_test(IndexSelectScalerIndexModel(base), (x, index_offset))
def test_take(self):
class TakeModel(torch.nn.Module):
def forward(self, x, y):
return torch.take(x, y)
x = torch.randn(6, 4, 3, 3)
y = torch.tensor([4, 1, 7, 15, 63])
self.run_test(TakeModel(), (x, y))
def test_topk(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return torch.topk(x, 3)
x = torch.arange(1.0, 6.0, requires_grad=True)
self.run_test(MyModule(), x)
@skipIfUnsupportedMinOpsetVersion(10)
def test_topk_int32_k(self):
class Model(torch.nn.Module):
def forward(self, x, k):
return torch.topk(x, k)
x = torch.arange(1.0, 6.0)
k = torch.tensor(3, dtype=torch.int32)
self.run_test(Model(), (x, k))
@skipIfUnsupportedMinOpsetVersion(11)
def test_topk_smallest_unsorted(self):
class MyModule(torch.nn.Module):
def forward(self, x, k):
# When sorted=False, order of elements in the outout tensors
# are not expected to match between PyTorch and ORT
topk_unsorted = torch.topk(x, k, largest=False, sorted=False)
topk_sorted = torch.topk(x, k, largest=False, sorted=True)
return topk_sorted, torch.sort(topk_unsorted.values).values
x = torch.arange(1.0, 6.0, requires_grad=True)
k = torch.tensor(3)
self.run_test(MyModule(), (x, k))
@skipIfUnsupportedMinOpsetVersion(10)
def test_topk_script(self):
class MyModuleDynamic(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, k):
return torch.topk(x, k)
x = torch.arange(1.0, 6.0, requires_grad=True)
k = torch.tensor(3)
self.run_test(MyModuleDynamic(), [x, k])
@skipScriptTest() # Python builtin apply of FunctionMeta object is currently not supported in Torchscript.
@skipIfUnsupportedMinOpsetVersion(11) # Clip op min is an input since opset 11.
def test_auto_grad(self):
class MyClip(torch.autograd.Function):
@staticmethod
def forward(ctx, input, scalar):
ctx.save_for_backward(input)
return input.clamp(min=scalar)
class MyRelu(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return input.clamp(min=0)
def symbolic_python_op(
ctx: torch.onnx.SymbolicContext, g: torch._C.Graph, *args, **kwargs
):
n = ctx.cur_node
name = kwargs["name"]
if name == "MyClip":
return g.op("Clip", args[0], args[1], outputs=n.outputsSize())
elif name == "MyRelu":
return g.op("Relu", args[0], outputs=n.outputsSize())
else:
return _unimplemented("prim::PythonOp", "unknown node kind: " + name)
register_custom_op_symbolic("prim::PythonOp", symbolic_python_op, 1)
self.addCleanup(unregister_custom_op_symbolic, "prim::PythonOp", 1)
class MyClipModule(torch.nn.Module):
def forward(self, x, min):
return MyClip.apply(x, min)
x = torch.randn(3, 3)
min = torch.tensor([0.0])
self.run_test(MyClipModule(), (x, min))
class MyReluModule(torch.nn.Module):
def forward(self, x):
return MyRelu.apply(x)
x = torch.randn(3, 3)
self.run_test(MyReluModule(), x)
def test_clip_int(self):
class MyClipInt(torch.nn.Module):
def forward(self, x):
return torch.clamp(x, 0, 1)
self.run_test(MyClipInt(), torch.randn(3, 3).to(torch.int64))
def test_relu_int(self):
self.run_test(torch.nn.ReLU(), torch.randn(3, 3).to(torch.int32))
def test_pad_int(self):
class MyPadInt(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.pad(x, (1, 1))
self.run_test(MyPadInt(), torch.randn(3, 3).to(torch.int32))
def test_min_int(self):
class MyMinInt(torch.nn.Module):
def forward(self, x):
return torch.min(x, x + 1)
self.run_test(MyMinInt(), torch.randn(3, 3).to(torch.int32))
def test_max_int(self):
class MyMaxnInt(torch.nn.Module):
def forward(self, x):
return torch.max(x, x + 1)
self.run_test(MyMaxnInt(), torch.randn(3, 3).to(torch.int32))
@skipIfUnsupportedOpsetVersion([7])
def test_normalize(self):
class Model(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.normalize(x)
x = torch.randn(3, 3)
self.run_test(Model(), x)
def test_layer_norm(self):
model = torch.nn.LayerNorm([10, 10])
x = torch.randn(20, 5, 10, 10)
self.run_test(model, x)
def test_batchnorm1d(self):
x = torch.randn(10, 10)
model = torch.nn.BatchNorm1d(10, affine=True)
self.run_test(model, x)
x = torch.randn(10, 10, 128)
self.run_test(model, x)
def test_batchnorm1d_noaffine(self):
x = torch.randn(10, 10)
model = torch.nn.BatchNorm1d(10, affine=False)
self.run_test(model, x)
x = torch.randn(10, 10, 128)
self.run_test(model, x)
def test_batchnorm1d_norunningstats(self):
x = torch.randn(10, 10)
model = torch.nn.BatchNorm1d(10, track_running_stats=False)
self.run_test(model, x)
x = torch.randn(10, 10, 128)
self.run_test(model, x)
def test_batchnorm2d(self):
x = torch.randn(10, 3, 128, 128)
model = torch.nn.BatchNorm2d(3, affine=True)
self.run_test(model, x)
def test_batchnorm2d_noaffine(self):
x = torch.randn(10, 3, 128, 128)
model = torch.nn.BatchNorm2d(3, affine=False)
self.run_test(model, x)
def test_batchnorm2d_norunningstats(self):
x = torch.randn(10, 3, 128, 128)
model = torch.nn.BatchNorm2d(3, track_running_stats=False)
self.run_test(model, x)
def test_batchnorm3d(self):
x = torch.randn(10, 3, 128, 128, 128)
model = torch.nn.BatchNorm3d(3, affine=True)
self.run_test(model, x)
def test_batchnorm3d_noaffine(self):
x = torch.randn(10, 3, 128, 128, 128)
model = torch.nn.BatchNorm3d(3, affine=False)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(
9
) # Because ConstantOfShape op is not supported for opset < 9
def test_instancenorm1d_runningstats(self):
x = torch.randn(10, 5, 128)
model = torch.nn.InstanceNorm1d(5, affine=True, track_running_stats=True)
self.run_test(model, x)
model = torch.nn.InstanceNorm1d(5, affine=False, track_running_stats=True)
self.run_test(model, x)
def test_instancenorm1d_norunningstats(self):
x = torch.randn(10, 5, 128)
model = torch.nn.InstanceNorm1d(5, affine=True, track_running_stats=False)
self.run_test(model, x)
model = torch.nn.InstanceNorm1d(5, affine=False, track_running_stats=False)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(
9
) # Because ConstantOfShape op is not supported for opset < 9
def test_instancenorm2d_runningstats(self):
x = torch.randn(10, 3, 128, 128)
model = torch.nn.InstanceNorm2d(3, affine=True, track_running_stats=True)
self.run_test(model, x)
model = torch.nn.InstanceNorm2d(3, affine=False, track_running_stats=True)
self.run_test(model, x)
def test_instancenorm2d_norunningstats(self):
x = torch.randn(10, 3, 128, 128)
model = torch.nn.InstanceNorm2d(3, affine=True, track_running_stats=False)
self.run_test(model, x)
model = torch.nn.InstanceNorm2d(3, affine=False, track_running_stats=False)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(
9
) # Because ConstantOfShape op is not supported for opset < 9
def test_instancenorm3d_runningstats(self):
x = torch.randn(10, 3, 128, 128, 128)
model = torch.nn.InstanceNorm3d(3, affine=True, track_running_stats=True)
self.run_test(model, x)
model = torch.nn.InstanceNorm3d(3, affine=False, track_running_stats=True)
self.run_test(model, x)
def test_instancenorm3d_norunningstats(self):
x = torch.randn(10, 3, 128, 128, 128)
model = torch.nn.InstanceNorm3d(3, affine=True, track_running_stats=False)
self.run_test(model, x)
model = torch.nn.InstanceNorm3d(3, affine=False, track_running_stats=False)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_scatter_with_scalar(self):
class ScatterModel(torch.nn.Module):
def forward(self, input, indices):
values = 1.0
return input.scatter(1, indices, values)
input = torch.tensor(
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=torch.float64
)
indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)
self.run_test(ScatterModel(), input=(input, indices))
@skipIfUnsupportedMinOpsetVersion(9)
def test_scatter_with_scalar_different_types(self):
# Tests the case when scalar src (updates values) type is different
# from self type. Happens only with scalar src - PyTorch does not
# allow this when src is a tensor.
class ScatterModel(torch.nn.Module):
def forward(self, input, indices):
values = 1.0
return input.scatter(1, indices, values)
input = torch.tensor(
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=torch.float32
)
indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)
self.run_test(ScatterModel(), input=(input, indices))
@skipIfUnsupportedMinOpsetVersion(9)
def test_scatter(self):
class ScatterModel(torch.nn.Module):
def forward(self, input, indices, values):
return input.scatter(1, indices, values)
input = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)
values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])
self.run_test(ScatterModel(), input=(input, indices, values))
input = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
indices = torch.tensor([[1, 0], [0, 2], [0, 1]], dtype=torch.int64)
values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])
self.run_test(ScatterModel(), (input, indices, values))
input = torch.zeros(3, 4, 5, 6)
indices = torch.tensor([[1, 0], [0, 2], [0, 1]], dtype=torch.int64)
indices = indices.view(3, 2, 1, 1).expand(3, 2, 5, 6)
values = torch.arange(3 * 2 * 5 * 6, dtype=torch.float32).view(3, 2, 5, 6)
self.run_test(ScatterModel(), (input, indices, values))
input = torch.zeros(3, 4, 2)
indices = torch.tensor([[[1, 0], [0, 2]], [[1, 1], [0, 1]], [[2, 1], [2, 2]]])
values = torch.arange(3 * 2 * 2, dtype=torch.float32).view(3, 2, 2)
self.run_test(ScatterModel(), (input, indices, values))
@skipIfUnsupportedMinOpsetVersion(9)
def test_scatter_add(self):
class ScatterModel(torch.nn.Module):
def forward(self, input, indices, values):
return input.scatter_add(1, indices, values)
input = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)
values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])
self.run_test(ScatterModel(), input=(input, indices, values))
@torch.jit.script
def scatter_sum(src: Tensor, index: Tensor):
size = src.size()
out = torch.zeros(size, dtype=src.dtype)
return out.scatter_add_(1, index, src)
class ScatterModel(torch.nn.Module):
def forward(self, src, index):
return scatter_sum(src, index)
src = torch.rand(3, 2)
index = torch.tensor([[0, 1], [0, 1], [0, 1]], dtype=torch.int64)
self.run_test(ScatterModel(), (src, index))
@skipIfUnsupportedMinOpsetVersion(9)
def test_bucketize(self):
class BucketModel(torch.nn.Module):
def forward(self, input, boundaries):
return torch.bucketize(input, boundaries), torch.bucketize(
input, boundaries, right=True
)
input = torch.tensor([[2, 5, 10], [6, 8, 3]])
boundaries = torch.tensor([1, 5, 7, 8, 10])
self.run_test(BucketModel(), (input, boundaries))
@skipIfUnsupportedMinOpsetVersion(9)
def test_one_hot(self):
class OneHot(torch.nn.Module):
def __init__(self, num_classes):
super().__init__()
self.num_classes = num_classes
def forward(self, x):
return torch.nn.functional.one_hot(x, self.num_classes)
x = torch.arange(10)
self.run_test(OneHot(15), (x))
class OneHot(torch.nn.Module):
def forward(self, x, num_classes):
num_classes = num_classes.to(torch.int32)
return torch.nn.functional.one_hot(x, num_classes[0])
x = torch.arange(10)
num_classes = 15 * torch.ones(1)
self.run_test(OneHot(), (x, num_classes))
@skipIfUnsupportedMinOpsetVersion(9)
def test_gather(self):
class GatherModel(torch.nn.Module):
def forward(self, input, indices):
return input.gather(1, indices)
input = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]])
indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)
self.run_test(GatherModel(), input=(input, indices))
@skipScriptTest() # Scripting error: Cannot instantiate nn module
def test_gather_constant_fold(self):
class GatherModule(torch.nn.Module):
def __init__(self):
super(GatherModule, self).__init__()
self.register_buffer("weight", torch.ones(5))
# torch.nn.Embedding is converted to ONNX::Gather.
# Constant folding will be triggerred for constant inputs.
# This pattern is common for constant mask inputs in transformer models.
self.embed = torch.nn.Embedding(8, 3)
def forward(self, x):
# shape is of rank 0
shape = self.weight.shape[0]
m = 5 - shape
y = torch.ones(1, 4, dtype=torch.long)
return x.clamp(min=m), self.embed(y)
x = torch.randn(1)
self.run_test(GatherModule(), (x,))
class GatherModule(torch.nn.Module):
def __init__(self):
super(GatherModule, self).__init__()
self.register_buffer("weight", torch.ones(2))
def forward(self, x):
# shape is of rank 0
shape = self.weight.shape[0]
pad = [1, shape, shape, shape]
zero_pad = torch.nn.ZeroPad2d(pad)
return zero_pad(x)
x = torch.randn(1, 3, 2)
self.run_test(GatherModule(), (x,))
class GatherModule(torch.nn.Module):
def __init__(self):
super(GatherModule, self).__init__()
self.register_buffer("rb", torch.randn(1, 1, 3, 1, 1))
def forward(self, x):
x += self.rb[0]
return x
x = torch.randn(1, 3, 224, 224)
self.run_test(
GatherModule(),
(x,),
dynamic_axes={
"input": {0: "batch", 2: "height", 3: "width"},
"output": {0: "batch", 1: "class", 2: "height", 3: "width"},
},
input_names=["input"],
output_names=["output"],
)
@skipIfUnsupportedOpsetVersion([13])
@skipIfUnsupportedMinOpsetVersion(9)
def test_expand(self):
class ExpandModel(torch.nn.Module):
def forward(self, input):
return input.expand(2, 3, -1)
input = torch.randn(2, 1, 4)
self.run_test(ExpandModel(), input=(input))
class ExpandInferDimModel(torch.nn.Module):
def forward(self, input):
return input.expand(-1, input.size(0))
input = torch.randn(3, 1)
self.run_test(ExpandInferDimModel(), input=(input))
class ExpandTensorSizeModel(torch.nn.Module):
def forward(self, input, size):
return input.expand(size)
input = torch.randn(
3,
)
size = torch.tensor(-1)
self.run_test(ExpandTensorSizeModel(), input=(input, size))
@skipIfUnsupportedMinOpsetVersion(11) # index_put is supported in opsets >= 11
def test_dynamic_expand_as(self):
class Model(torch.nn.Module):
def forward(self, x):
x[:, x.size(0) :] = 0
return x
x = torch.ones(2, 5)
x2 = torch.randn(3, 4)
self.run_test(
Model(),
(x,),
input_names=["x"],
dynamic_axes={"x": [0, 1]},
test_with_inputs=[x2],
)
class Model(torch.nn.Module):
def forward(self, x):
x[:, x.size(0) :] = torch.tensor([1, 2, 3])
return x
x = torch.ones(2, 5, 3)
x2 = torch.randn(3, 4, 3)
self.run_test(
Model(),
(x,),
input_names=["x"],
dynamic_axes={"x": [0, 1, 2]},
test_with_inputs=[x2],
)
def test_multinomial(self):
class Multinomial(torch.nn.Module):
def forward(self, weight):
return torch.multinomial(weight, 3, replacement=True)
class MultinomialNoReplacement(torch.nn.Module):
def forward(self, weight):
return torch.multinomial(weight, 1)
weight = torch.tensor([[0, 10, 0, 0], [0, 0, 100, 0]], dtype=torch.float)
self.run_test(Multinomial(), (weight,))
self.run_test(MultinomialNoReplacement(), (weight,))
def _test_reduced_ops(self, op):
class ReducedOpModule(torch.nn.Module):
def forward(self, input):
return op(input, dim=-1)
if op != torch.mean: # torch.mean only supports float types
x = torch.randint(10, (4, 4), dtype=torch.uint8)
self.run_test(ReducedOpModule(), x)
x = torch.randint(10, (4, 4), dtype=torch.int8)
self.run_test(ReducedOpModule(), x)
x = torch.randint(10, (4, 4), dtype=torch.int16)
self.run_test(ReducedOpModule(), x)
x = torch.randint(10, (4, 4), dtype=torch.int32)
self.run_test(ReducedOpModule(), x)
x = torch.randint(10, (4, 4), dtype=torch.int64)
self.run_test(ReducedOpModule(), x)
# torch.mean only supports float types
# ORT does not support double ReduceProd for double
if op != torch.prod and op != torch.mean:
x = torch.randn(4, 5, dtype=torch.double)
self.run_test(ReducedOpModule(), x)
if op != torch.prod: # torch.prod not implemented for Half
x = torch.randn(4, 4, dtype=torch.half)
self.run_test(ReducedOpModule(), x)
x = torch.randn(4, 5, dtype=torch.float)
self.run_test(ReducedOpModule(), x)
def test_reduced_sum(self):
return self._test_reduced_ops(op=torch.sum)
def test_reduced_mean(self):
return self._test_reduced_ops(op=torch.mean)
def test_reduced_prod(self):
return self._test_reduced_ops(op=torch.prod)
def test_reduced_sum_dtypes(self):
class NoDimModel(torch.nn.Module):
def forward(self, input):
return input.sum(dtype=torch.float)
class DimModel(torch.nn.Module):
def forward(self, input):
return input.sum(dim=-1, dtype=torch.float)
input = torch.randn((4, 4), dtype=torch.half)
self.run_test(NoDimModel(), input)
self.run_test(DimModel(), input)
def test_reduced_min_max(self):
class ReducedMinMaxModule(torch.nn.Module):
def forward(self, input):
return torch.min(input, dim=-1)[0], torch.max(input, dim=0)[0]
x = torch.randint(10, (4, 4), dtype=torch.int32)
self.run_test(ReducedMinMaxModule(), x)
x = torch.randint(10, (4, 4), dtype=torch.int64)
self.run_test(ReducedMinMaxModule(), x)
x = torch.randn(4, 5, dtype=torch.float)
self.run_test(ReducedMinMaxModule(), x)
def test_reduce_log_sum_exp(self):
class ReduceLogSumExpModel(torch.nn.Module):
def forward(self, input):
a = torch.logsumexp(input, dim=0)
b = torch.logsumexp(input, dim=(0, 1))
return a + b
x = torch.randn(4, 4, requires_grad=True)
self.run_test(ReduceLogSumExpModel(), x)
def test_softmax(self):
for i in range(-4, 3):
model = torch.nn.Softmax(dim=i)
input = torch.randn(3, 4, 5, 6)
self.run_test(model, input)
class SoftmaxUnknownRank(torch.nn.Module):
def __init__(self, i):
super().__init__()
self.softmax = torch.nn.Softmax(dim=i)
def forward(self, x):
return self.softmax(x.reshape(3, 4, 5, 6))
model = torch.jit.script(SoftmaxUnknownRank(i))
self.run_test(model, input)
def test_softmax_large_values(self):
input = torch.tensor(
[[-1e12, -1e12, -1e12], [1e12, 0.0, -5.0], [3.0, 4.0, 5.0]]
)
for i in range(-2, 1):
model = torch.nn.Softmax(dim=i)
self.run_test(model, input)
class SoftmaxUnknownRank(torch.nn.Module):
def __init__(self, i):
super().__init__()
self.softmax = torch.nn.Softmax(dim=i)
def forward(self, x):
return self.softmax(x.reshape(3, 3))
model = torch.jit.script(SoftmaxUnknownRank(i))
self.run_test(model, input)
def test_logsoftmax(self):
for i in range(7)[2:]:
model = torch.nn.LogSoftmax(dim=i - 1)
dims = [2] * (i - 2) + [3, 4]
input = torch.ones(*dims, requires_grad=True)
self.run_test(model, input)
def test_logsoftmax_dim(self):
for i in range(-4, 3):
model = torch.nn.LogSoftmax(dim=i)
input = torch.randn(3, 4, 5, 6)
self.run_test(model, input)
def test_logsoftmax_dtype(self):
class Model(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.log_softmax(x, dim=1, dtype=torch.float64)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(Model(), x)
def test_softplus(self):
class BetaOneModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.softplus(x)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(BetaOneModel(), x)
class BetaModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.softplus(x, beta=2)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(BetaModel(), x)
class BetaFloatModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.softplus(x, beta=1.7)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(BetaFloatModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_lstm_no_hidden(self):
class LSTMModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.rnn = torch.nn.LSTM(input_size=16, hidden_size=16)
def forward(self, x):
return self.rnn(x)
input = torch.randn((10, 16, 16))
self.run_test(LSTMModel(), (input,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_lstm_proj_no_hidden(self):
class LSTMModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.rnn = torch.nn.LSTM(input_size=16, hidden_size=16, proj_size=8)
def forward(self, x):
return self.rnn(x)
input = torch.randn((10, 16, 16))
with self.assertRaises(RuntimeError):
self.run_test(LSTMModel(), (input,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_lstm(self):
class LSTMModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.rnn = torch.nn.LSTM(
RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False
)
def forward(self, x, h0, c0):
return self.rnn(x, (h0, c0))
input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
h0 = torch.randn(1, BATCH_SIZE, RNN_HIDDEN_SIZE)
c0 = torch.randn(1, BATCH_SIZE, RNN_HIDDEN_SIZE)
self.run_test(LSTMModel(), (input, h0, c0))
@skipIfUnsupportedMinOpsetVersion(9)
def test_lstm_cell(self):
class LSTMCellModel(torch.nn.Module):
def __init__(self, bias):
super().__init__()
self.lstm_cell = torch.nn.LSTMCell(
RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, bias=bias
)
def forward(self, x, h0, c0):
return self.lstm_cell(x, (h0, c0))
input = torch.randn(BATCH_SIZE, RNN_INPUT_SIZE)
h0 = torch.randn(BATCH_SIZE, RNN_HIDDEN_SIZE)
c0 = torch.randn(BATCH_SIZE, RNN_HIDDEN_SIZE)
for bias in [True, False]:
self.run_test(LSTMCellModel(bias), (input, h0, c0))
@skipIfUnsupportedMinOpsetVersion(9)
def test_lstm_default_init_state(self):
class LSTMModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.rnn = torch.nn.LSTM(
RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False
)
def forward(self, x):
return self.rnn(x)
input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
self.run_test(LSTMModel(), input)
@skipIfUnsupportedMinOpsetVersion(9)
def test_lstm_fixed_batch_size(self):
class LSTMModel(torch.nn.Module):
def __init__(self):
super(LSTMModel, self).__init__()
self.lstm = torch.nn.LSTM(
RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False
)
self.RNN_HIDDEN_SIZE = RNN_HIDDEN_SIZE
def forward(self, input):
batch_size = input.size()[1]
h0 = torch.ones([1, batch_size, self.RNN_HIDDEN_SIZE])
c0 = torch.ones([1, batch_size, self.RNN_HIDDEN_SIZE])
return self.lstm(input, (h0, c0))
input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
# verify with different input of same batch size
input2 = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
self.run_test(
LSTMModel(), input, fixed_batch_size=True, test_with_inputs=[input2]
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_lstm_post_fix_init_state(self):
class LSTMModel(torch.nn.Module):
def __init__(self):
super(LSTMModel, self).__init__()
self.lstm = torch.nn.LSTM(
RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False
)
self.RNN_HIDDEN_SIZE = RNN_HIDDEN_SIZE
def forward(self, input):
batch_size = input.size()[1]
h0 = torch.ones([1, batch_size, self.RNN_HIDDEN_SIZE])
c0 = torch.ones([1, batch_size, self.RNN_HIDDEN_SIZE])
return self.lstm(input, (h0, c0))
model = LSTMModel()
input = torch.randn(RNN_SEQUENCE_LENGTH, 1, RNN_INPUT_SIZE)
# verify with different input of different batch size
input2 = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
self.run_test(
model,
input,
input_names=["input.1"],
dynamic_axes={"input.1": {0: "seq", 1: "batch"}},
test_with_inputs=[input2],
)
def test_lstm_constant_folding(self):
class LstmNet(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_layers, bidirectional):
super(LstmNet, self).__init__()
self.lstm = torch.nn.LSTM(
input_size, hidden_size, num_layers, bidirectional=bidirectional
)
def forward(self, input, initial_state: Tuple[Tensor, Tensor]):
return self.lstm(input, initial_state)
def get_LstmNet_model_and_inputs(
input_size, hidden_size, num_layers, batch_size, seq_len, bidirectional
):
num_directions = 2 if bidirectional else 1
model = LstmNet(input_size, hidden_size, num_layers, bidirectional)
input = torch.randn(seq_len, batch_size, input_size)
h0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)
c0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)
return model, (input, (h0, c0))
batch_size1 = 3
model1, input1 = get_LstmNet_model_and_inputs(7, 3, 2, batch_size1, 5, True)
self.run_test(model1, input1, do_constant_folding=True)
batch_size2 = 4
model2, input2 = get_LstmNet_model_and_inputs(5, 4, 3, batch_size2, 7, False)
self.run_test(model2, input2, do_constant_folding=True)
@skipIfUnsupportedMinOpsetVersion(9)
def test_lstm_no_bias(self):
class LstmNet(torch.nn.Module):
def __init__(self, num_layers, bidirectional):
super(LstmNet, self).__init__()
self.lstm = torch.nn.LSTM(
RNN_INPUT_SIZE,
RNN_HIDDEN_SIZE,
num_layers,
bias=False,
bidirectional=bidirectional,
)
def forward(self, input, initial_state: Tuple[Tensor, Tensor]):
return self.lstm(input, initial_state)
def get_LstmNet_model_and_inputs(num_layers, bidirectional):
input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
num_directions = 2 if bidirectional else 1
model = LstmNet(num_layers, bidirectional)
h0 = torch.randn(num_layers * num_directions, BATCH_SIZE, RNN_HIDDEN_SIZE)
c0 = torch.randn(num_layers * num_directions, BATCH_SIZE, RNN_HIDDEN_SIZE)
return model, (input, (h0, c0))
num_layers = [1, 1, 2, 3]
bidirectional = [True, False, True, False]
models_and_inputs = [
get_LstmNet_model_and_inputs(n, b)
for n, b in zip(num_layers, bidirectional)
]
for model, input in models_and_inputs:
self.run_test(model, input)
@skipIfUnsupportedMinOpsetVersion(9)
def test_lstm_sequence(self):
class LstmNet(torch.nn.Module):
def __init__(self):
super().__init__()
self.rnn1 = torch.nn.LSTM(8, 8, bidirectional=True, batch_first=True)
self.linear1 = torch.nn.Linear(8 * 2, 8)
self.rnn2 = torch.nn.LSTM(8, 8, bidirectional=True, batch_first=True)
self.linear2 = torch.nn.Linear(8 * 2, 8)
def forward(self, input):
rnn_output1, _ = self.rnn1(input)
linear_output1 = self.linear1(rnn_output1)
rnn_output2, _ = self.rnn2(linear_output1)
linear_output2 = self.linear2(rnn_output2)
return linear_output2
input = torch.zeros((1, 100, 8), dtype=torch.float32)
self.run_test(
LstmNet(),
input,
input_names=["input"],
output_names=["output"],
dynamic_axes={
"input": {0: "batch_size", 1: "w", 2: "h"},
"output": {0: "batch_size", 1: "w", 2: "h"},
},
)
@skipScriptTest()
def test_rnn_no_bias(self):
def make_model(layers, packed_sequence):
batch_first = True if packed_sequence == 2 else False
model = torch.nn.RNN(
RNN_INPUT_SIZE,
RNN_HIDDEN_SIZE,
layers,
bidirectional=False,
batch_first=batch_first,
bias=False,
)
if packed_sequence == 1:
model = RnnModelWithPackedSequence(model, False)
if packed_sequence == 2:
model = RnnModelWithPackedSequence(model, True)
return model
def make_input(batch_size, layers, packed_sequence):
batch_first = True if packed_sequence == 2 else False
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)
inputs = [inputs]
h0 = torch.randn(layers, batch_size, RNN_HIDDEN_SIZE)
inputs.append(h0)
if packed_sequence != 0:
inputs.append(torch.IntTensor(seq_lengths))
if len(inputs) == 1:
input = inputs[0]
else:
input = tuple(inputs)
return input
layers = [1, 3, 1, 3, 1, 3]
packed_sequence = [0, 0, 1, 1, 2, 2]
models = [make_model(l, p) for l, p in zip(layers, packed_sequence)]
inputs = [
make_input(RNN_BATCH_SIZE, l, p) for l, p in zip(layers, packed_sequence)
]
for model, input in zip(models, inputs):
self.run_test(model, input, batch_size=RNN_BATCH_SIZE)
def test_gru_no_bias(self):
class GruNet(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_layers, bidirectional):
super(GruNet, self).__init__()
self.mygru = torch.nn.GRU(
input_size,
hidden_size,
num_layers,
bidirectional=bidirectional,
bias=False,
)
def forward(self, input, initial_state):
out = self.mygru(input, initial_state)
return out
def get_GruNet_model_and_inputs(
input_size, hidden_size, num_layers, batch_size, seq_len, bidirectional
):
num_directions = 2 if bidirectional else 1
model = GruNet(input_size, hidden_size, num_layers, bidirectional)
input = torch.randn(seq_len, batch_size, input_size)
h0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)
return model, (input, h0)
input_size = [7, 5]
hidden_size = [3, 4]
num_layers = [2, 3]
batch_size = [3, 4]
seq_len = [5, 7]
bidirectional = [True, False]
models_and_inputs = [
get_GruNet_model_and_inputs(i, h, n, b, s, bi)
for i, h, n, b, s, bi in zip(
input_size, hidden_size, num_layers, batch_size, seq_len, bidirectional
)
]
for model, input in models_and_inputs:
self.run_test(model, input, do_constant_folding=True)
def test_gru_constant_folding(self):
class GruNet(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_layers, bidirectional):
super(GruNet, self).__init__()
self.mygru = torch.nn.GRU(
input_size, hidden_size, num_layers, bidirectional=bidirectional
)
def forward(self, input, initial_state):
out = self.mygru(input, initial_state)
return out
def get_GruNet_model_and_inputs(
input_size, hidden_size, num_layers, batch_size, seq_len, bidirectional
):
num_directions = 2 if bidirectional else 1
model = GruNet(input_size, hidden_size, num_layers, bidirectional)
input = torch.randn(seq_len, batch_size, input_size)
h0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)
return model, (input, h0)
batch_size1 = 3
model1, input1 = get_GruNet_model_and_inputs(7, 3, 2, batch_size1, 5, True)
self.run_test(model1, input1, do_constant_folding=True)
batch_size2 = 4
model2, input2 = get_GruNet_model_and_inputs(5, 4, 3, batch_size2, 7, False)
self.run_test(model2, input2, do_constant_folding=True)
@skipIfUnsupportedMinOpsetVersion(8)
def test_max_tensors(self):
class MaxModel(torch.nn.Module):
def forward(self, input, other):
return torch.max(input, other)
model = MaxModel()
x = torch.randn(4, 4, requires_grad=True)
y = torch.randn(4, 1, requires_grad=True)
self.run_test(model, (x, y))
def test_amax_amin(self):
class Model(torch.nn.Module):
def forward(self, x):
return torch.amax(x, dim=0, keepdim=True), torch.amin(
x, dim=[0, 1], keepdim=False
)
model = Model()
x = torch.randn(4, 4)
self.run_test(model, x)
def test_aminmax(self):
class Model(torch.nn.Module):
def forward(self, x):
return torch.aminmax(x, dim=1, keepdim=True), torch.aminmax(
x, keepdim=False
)
model = Model()
x = torch.randn(3, 4)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_end(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(a.size(0), dtype=torch.float).view(-1, 1) + a
x = torch.randn(3, 4, requires_grad=True)
outputs = ArangeScript()(x)
self.run_test(ArangeScript(), x)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(a.size(0), dtype=torch.float).view(-1, 1) + a
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_end_notype(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(a.size(0))
x = torch.randn(3, 4, requires_grad=True)
outputs = ArangeScript()(x)
self.run_test(ArangeScript(), x, input_names=["x"], dynamic_axes={"x": [0, 1]})
self.run_test(ArangeScript(), x, remained_onnx_input_idx=[])
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(a.size(0))
self.run_test(ArangeModel(), x, input_names=["x"], dynamic_axes={"x": [0, 1]})
self.run_test(ArangeModel(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_start_end(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(2, a.size(0) + 2, dtype=torch.float).view(-1, 1) + a
x = torch.randn(3, 4, requires_grad=True)
self.run_test(ArangeScript(), x)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(2, a.size(0) + 2, dtype=torch.float).view(-1, 1) + a
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_start_end_notype(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(2.7, a.size(0) + 2).view(-1, 1) + a
x = torch.randn(3, 4, requires_grad=True)
self.run_test(ArangeScript(), x)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(2.7, a.size(0) + 2).view(-1, 1) + a
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_start_end_step(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return (
torch.arange(
2, a.size(0) * a.size(1) + 2, a.size(1), dtype=torch.float
).view(-1, 1)
+ a
)
x = torch.randn(3, 4, requires_grad=True)
self.run_test(ArangeScript(), x)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return (
torch.arange(
2, a.size(0) * a.size(1) + 2, a.size(1), dtype=torch.float
).view(-1, 1)
+ a
)
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_start_end_step_notype(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return (
torch.arange(2.7, a.size(0) * a.size(1) + 2, a.size(1)).view(-1, 1)
+ a
)
x = torch.randn(3, 4, requires_grad=True)
self.run_test(ArangeScript(), x)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return (
torch.arange(2.7, a.size(0) * a.size(1) + 2, a.size(1)).view(-1, 1)
+ a
)
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test__dim_arange(self):
class DimArange(torch.nn.Module):
def forward(self, input):
return torch._dim_arange(input, 1)
x = torch.ones(5, 6)
self.run_test(DimArange(), x, input_names=["x"], dynamic_axes={"x": [0, 1]})
remained_onnx_input_idx = None if self.opset_version < 11 else []
self.run_test(DimArange(), x, remained_onnx_input_idx=remained_onnx_input_idx)
def _test_compare_ops(self, model, num_inputs):
x_float = torch.randn(1, 2, 3, 4, requires_grad=True)
x_int = torch.randint(10, (3, 4), dtype=torch.int32)
if num_inputs > 1:
y_float = torch.randn(1, 2, 3, 4, requires_grad=True)
y_int = torch.randint(10, (3, 4), dtype=torch.int32)
self.run_test(model, (x_float, y_float))
self.run_test(model, (x_float, y_int))
self.run_test(model, (x_int, y_float))
self.run_test(model, (x_int, y_int))
else:
self.run_test(model, x_float)
self.run_test(model, x_int)
@skipIfUnsupportedMinOpsetVersion(9)
def test_and_or_xor(self):
class MyModel(torch.nn.Module):
def forward(self, x, y):
return x ^ y, x | y, x & y, ~x
x = torch.randint(0, 2, (5, 5), dtype=torch.bool)
y = torch.randint(0, 2, (5, 5), dtype=torch.bool)
self.run_test(MyModel(), input=(x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_logical_and(self):
class AndModel(torch.nn.Module):
def forward(self, x, y):
return torch.logical_and(x, y)
x = torch.randint(0, 2, (5, 5), dtype=torch.bool)
y = torch.randint(0, 2, (5, 5), dtype=torch.bool)
self.run_test(AndModel(), input=(x, y))
x = torch.randint(10, (5, 5), dtype=torch.int32)
y = torch.randint(10, (5, 5), dtype=torch.int32)
self.run_test(AndModel(), input=(x, y))
x = torch.randint(10, (5, 5), dtype=torch.double)
y = torch.randint(10, (5, 5), dtype=torch.double)
self.run_test(AndModel(), input=(x, y))
x = torch.randint(10, (2, 3, 5), dtype=torch.float32)
y = torch.randint(10, (2, 3, 5), dtype=torch.long)
self.run_test(AndModel(), input=(x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_logical_or(self):
class OrModel(torch.nn.Module):
def forward(self, x, y):
return torch.logical_or(x, y)
x = torch.randint(0, 2, (5, 5), dtype=torch.bool)
y = torch.randint(0, 2, (5, 5), dtype=torch.bool)
self.run_test(OrModel(), input=(x, y))
x = torch.randint(10, (5, 5), dtype=torch.int32)
y = torch.randint(10, (5, 5), dtype=torch.int32)
self.run_test(OrModel(), input=(x, y))
x = torch.randint(10, (5, 5), dtype=torch.double)
y = torch.randint(10, (5, 5), dtype=torch.double)
self.run_test(OrModel(), input=(x, y))
x = torch.randint(10, (2, 3, 5), dtype=torch.float32)
y = torch.randint(10, (2, 3, 5), dtype=torch.long)
self.run_test(OrModel(), input=(x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_logical_xor(self):
class XorModel(torch.nn.Module):
def forward(self, x, y):
return torch.logical_xor(x, y)
x = torch.randint(0, 2, (5, 5), dtype=torch.bool)
y = torch.randint(0, 2, (5, 5), dtype=torch.bool)
self.run_test(XorModel(), input=(x, y))
x = torch.randint(10, (5, 5), dtype=torch.int32)
y = torch.randint(10, (5, 5), dtype=torch.int32)
self.run_test(XorModel(), input=(x, y))
x = torch.randint(10, (5, 5), dtype=torch.double)
y = torch.randint(10, (5, 5), dtype=torch.double)
self.run_test(XorModel(), input=(x, y))
x = torch.randint(10, (2, 3, 5), dtype=torch.float32)
y = torch.randint(10, (2, 3, 5), dtype=torch.long)
self.run_test(XorModel(), input=(x, y))
@skipIfUnsupportedMinOpsetVersion(11) # float equal added after opset 11
def test_eq(self):
class EqualModel(torch.nn.Module):
def forward(self, input, other):
return input == other
self._test_compare_ops(EqualModel(), 2)
def test_gt(self):
class GreaterModel(torch.nn.Module):
def forward(self, input, other):
return input > other
self._test_compare_ops(GreaterModel(), 2)
@skipIfUnsupportedMinOpsetVersion(9)
def test_ge(self):
class GreaterOrEqualModel(torch.nn.Module):
def forward(self, input, other):
return input >= other
self._test_compare_ops(GreaterOrEqualModel(), 2)
def test_gt_scalar(self):
class GreaterModel(torch.nn.Module):
def forward(self, input):
return input > 1
self._test_compare_ops(GreaterModel(), 1)
def test_gt_primitive(self):
class GreaterModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.y: int = 2
def forward(self, x: int):
return self.y > x
x = 3
self.run_test(GreaterModel(), (x,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_ge_scalar(self):
class GreaterOrEqualModel(torch.nn.Module):
def forward(self, input):
return input >= 1
self._test_compare_ops(GreaterOrEqualModel(), 1)
def test_lt(self):
class LessModel(torch.nn.Module):
def forward(self, input, other):
return input > other
self._test_compare_ops(LessModel(), 2)
@skipIfUnsupportedMinOpsetVersion(9)
def test_le(self):
class LessOrEqualModel(torch.nn.Module):
def forward(self, input, other):
return input <= other
self._test_compare_ops(LessOrEqualModel(), 2)
def test_lt_scalar(self):
class LessModel(torch.nn.Module):
def forward(self, input):
return input < 1
self._test_compare_ops(LessModel(), 1)
@skipIfUnsupportedMinOpsetVersion(9)
def test_le_scalar(self):
class LessOrEqualModel(torch.nn.Module):
def forward(self, input):
return input <= 1
self._test_compare_ops(LessOrEqualModel(), 1)
def test_matmul(self):
class MatmulModel(torch.nn.Module):
def forward(self, input, other):
return torch.matmul(input, other)
x = torch.randn(3, 4, requires_grad=True)
y = torch.randn(4, 5, requires_grad=True)
self.run_test(MatmulModel(), (x, y))
x = torch.randint(10, (3, 4))
y = torch.randint(10, (4, 5))
self.run_test(MatmulModel(), (x, y))
def test_matmul_batch(self):
class MatmulModel(torch.nn.Module):
def forward(self, input, other):
return torch.matmul(input, other)
x = torch.randn(2, 3, 4, requires_grad=True)
y = torch.randn(2, 4, 5, requires_grad=True)
self.run_test(MatmulModel(), (x, y))
x = torch.randint(10, (2, 3, 4))
y = torch.randint(10, (2, 4, 5))
self.run_test(MatmulModel(), (x, y))
def _argmin_argmax_model(self, input):
class ArgminArgmaxModel(torch.nn.Module):
def forward(self, input):
return (
torch.argmin(input),
torch.argmax(input),
torch.argmin(input, keepdim=True),
torch.argmax(input, keepdim=True),
)
self.run_test(ArgminArgmaxModel(), input)
def test_argmin_argmax(self):
input = torch.randn(7, 3, 5)
self._argmin_argmax_model(input)
# Argmin and Argmax with "select_last_index" is not supprted before opset 12
# "select_last_index" was added in opset 12 to deal with corner case where the
# same value appears multiple times in the tensor
@skipIfUnsupportedMinOpsetVersion(12)
def test_argmin_argmax_select_last_index(self):
input = torch.tensor([[1.0, 2.0, 3.0], [1.0, 1.0, 2.0]])
self._argmin_argmax_model(input)
input = torch.ones(7, 3, 5)
self._argmin_argmax_model(input)
def test_repeat(self):
class RepeatModel(torch.nn.Module):
def forward(self, x, y):
x2 = x.repeat(y.shape[0], 1)
y1 = y.view(-1, 1)
return x2 + y1
x = torch.tensor([1, 2, 3])
y = torch.tensor([4, 5, 8, 9])
self.run_test(RepeatModel(), (x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_repeat_interleave(self):
class FlattenModel(torch.nn.Module):
def forward(self, x):
return x.repeat_interleave(2)
x = torch.tensor([1, 2, 3])
self.run_test(FlattenModel(), (x,))
class DimsModel(torch.nn.Module):
def forward(self, x):
return x.repeat_interleave(4, dim=1)
x = torch.tensor([[1, 2], [3, 4]])
self.run_test(DimsModel(), (x,))
class DimsModel2(torch.nn.Module):
def forward(self, x):
repeats = torch.tensor([4])
return torch.repeat_interleave(x, repeats, dim=1)
x = torch.tensor([[1, 2], [3, 4]])
self.run_test(DimsModel2(), (x,))
class RepeatsDimsModel(torch.nn.Module):
def forward(self, x):
repeats = torch.tensor([1, 2])
return torch.repeat_interleave(x, repeats, dim=0)
x = torch.tensor([[1, 2], [3, 4]])
self.run_test(RepeatsDimsModel(), (x,))
class RepeatsDimsModel2(torch.nn.Module):
def forward(self, x):
repeats = torch.tensor([1, 2])
return torch.repeat_interleave(x, repeats, dim=1)
x = torch.tensor([[1, 2], [3, 4]])
self.run_test(RepeatsDimsModel2(), (x,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_repeat_interleave_noop(self):
class Model(torch.nn.Module):
def forward(self, x):
return x.repeat_interleave(1, dim=1)
x = torch.randn(4, 1, 8)
self.run_test(Model(), (x,))
@skipIfUnsupportedMinOpsetVersion(13)
def test_dynamic_repeat_interleave(self):
class SingleDynamicModel(torch.nn.Module):
def forward(self, x):
repeats = torch.tensor(4)
return torch.repeat_interleave(x, repeats, dim=1)
x = torch.tensor([[1, 2, 4], [3, 4, 7]])
another_x = torch.tensor([[7, 8], [5, 6]])
self.run_test(
SingleDynamicModel(),
x,
test_with_inputs=[another_x],
input_names=["input_1"],
dynamic_axes={"input_1": {1: "w"}},
)
class NegDynamicModel(torch.nn.Module):
def forward(self, x):
repeats = torch.tensor(4)
return torch.repeat_interleave(x, repeats, dim=-1)
x = torch.tensor([[1, 2, 4], [3, 4, 7]])
another_x = torch.tensor([[7, 8], [5, 6]])
self.run_test(
NegDynamicModel(),
x,
test_with_inputs=[another_x],
input_names=["input_1"],
dynamic_axes={"input_1": {1: "w"}},
)
class SingleDynamicModelFloat(torch.nn.Module):
def forward(self, x):
repeats = torch.tensor([4])
return torch.repeat_interleave(x, repeats, dim=0)
x = torch.tensor([[1.1, 2.1], [3.1, 4.1]])
another_x = torch.tensor([[7.1, 8.1], [5.1, 6.1]])
self.run_test(
SingleDynamicModelFloat(),
x,
test_with_inputs=[another_x],
input_names=["input_1"],
dynamic_axes={"input_1": {0: "h"}},
)
class DynamicRepeatsModel(torch.nn.Module):
def forward(self, x, repeats):
return torch.repeat_interleave(x, repeats, dim=1)
x = torch.tensor([[1, 2, 4], [3, 4, 7]])
another_x = torch.tensor([[7, 8], [5, 6]])
repeats = torch.tensor([2])
another_repeats = torch.tensor([4])
self.run_test(
DynamicRepeatsModel(),
(x, repeats),
test_with_inputs=[(another_x, another_repeats)],
input_names=["input_1", "repeats_1"],
dynamic_axes={"input_1": {1: "w"}, "repeats_1": {0: "r"}},
)
class DynamicRepeatsModel2(torch.nn.Module):
def forward(self, x, repeats):
return torch.repeat_interleave(x, repeats, dim=1)
x = torch.tensor([[1, 2, 4], [3, 4, 7]])
repeats = torch.tensor([2])
another_repeats = torch.tensor([4])
self.run_test(
DynamicRepeatsModel2(),
(x, repeats),
test_with_inputs=[(x, another_repeats)],
input_names=["input_1", "repeats_1"],
dynamic_axes={"repeats_1": {0: "r"}},
)
@skipIfUnsupportedMinOpsetVersion(13)
def test_multiple_dynamic_repeat_interleave(self):
class DynamicRepeatsModel(torch.nn.Module):
def forward(self, x, repeats):
return torch.repeat_interleave(x, repeats, dim=1)
x = torch.tensor([[1, 2, 4], [3, 4, 7]])
repeats = torch.tensor([2, 3, 4])
another_repeats = torch.tensor([4, 3, 2])
self.run_test(
DynamicRepeatsModel(),
(x, repeats),
test_with_inputs=[(x, another_repeats)],
input_names=["input_1", "repeats_1"],
dynamic_axes={"repeats_1": {0: "r"}},
)
class DynamicRepeatsModel2(torch.nn.Module):
def forward(self, x, repeats):
return torch.repeat_interleave(x, repeats, dim=0)
x = torch.tensor([[1, 2, 4], [3, 4, 7]])
repeats = torch.tensor([2, 3])
another_repeats = torch.tensor([4, 3])
self.run_test(
DynamicRepeatsModel2(),
(x, repeats),
test_with_inputs=[(x, another_repeats)],
input_names=["input_1", "repeats_1"],
dynamic_axes={"repeats_1": {0: "r"}},
)
def test_view(self):
class ViewModel(torch.nn.Module):
def forward(self, input):
return input.view(4, 24)
x = torch.randint(10, (4, 2, 3, 4), dtype=torch.int32)
self.run_test(ViewModel(), x)
def test_view_dynamic(self):
class ViewModel(torch.nn.Module):
def forward(self, input, other):
return input.view(other.shape)
x = torch.randn(2, 3, 4)
shape = torch.randn(6, 4)
self.run_test(
ViewModel(),
(x, shape),
input_names=["x", "shape"],
dynamic_axes={"x": [0, 1, 2], "shape": [0, 1]},
)
self.run_test(ViewModel(), (x, shape), remained_onnx_input_idx=[0])
def test_view_dynamic_zero_dim(self):
class ViewModel(torch.nn.Module):
def forward(self, input):
input = input.view(-1, 2)
return input.view(1, -1)
x = torch.ones(2)
another_x = torch.empty((0,))
self.run_test(
ViewModel(),
x,
test_with_inputs=[another_x],
input_names=["input_1"],
dynamic_axes={
"input_1": [
0,
]
},
)
def test_view_as(self):
class ViewModel(torch.nn.Module):
def forward(self, input, other):
return input.view_as(other)
x = torch.randn(2, 3, 4)
y = torch.randn(6, 4)
self.run_test(ViewModel(), (x, y))
def test_linear(self):
class LinearModel(torch.nn.Module):
def __init__(self):
super(LinearModel, self).__init__()
self.fc = torch.nn.Linear(16, 16)
def forward(self, x):
out = self.fc(x)
out = self.fc(out)
return out
x = torch.randn(3, 16)
self.run_test(LinearModel(), (x,))
class LinearModel(torch.nn.Module):
def forward(self, input, weight, bias):
return torch.nn.functional.linear(input, weight, bias)
# input of rank 2
x = torch.randn(2, 2)
y = torch.randn(2, 2)
z = torch.randn(1)
self.run_test(LinearModel(), (x, y, z))
# input of rank 3
x = torch.randn(3, 3, 3)
y = torch.randn(3, 3)
z = torch.randn(1)
self.run_test(LinearModel(), (x, y, z))
@skipScriptTest()
def test_weight_norm(self):
# addmm for 3-d inputs converts to onnx::MatMul
model = torch.nn.utils.weight_norm(torch.nn.Linear(5, 10), dim=1)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(model, x)
# addmm for 2-d inputs converts to onnx::Gemm
model = torch.nn.utils.weight_norm(torch.nn.Linear(5, 10), dim=1)
x = torch.randn(4, 5, requires_grad=True)
self.run_test(model, x)
model = torch.nn.utils.weight_norm(torch.nn.Conv1d(1, 1, 3))
x = torch.randn(1, 1, 5, requires_grad=True)
self.run_test(model, x)
model = torch.nn.utils.weight_norm(torch.nn.Conv1d(1, 1, 3), dim=-2)
x = torch.randn(1, 1, 5, requires_grad=True)
self.run_test(model, x)
model = torch.nn.utils.weight_norm(torch.nn.Conv1d(3, 6, 3), name="weight")
x = torch.randn(3, 3, 5, requires_grad=True)
self.run_test(model, x)
@skipScriptTest()
def test_weight_norm_nodim(self):
# addmm for 3-d inputs converts to onnx::MatMul
model = torch.nn.utils.weight_norm(torch.nn.Linear(5, 10), dim=None)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(model, x)
# addmm for 2-d inputs converts to onnx::Gemm
model = torch.nn.utils.weight_norm(torch.nn.Linear(5, 10), dim=None)
x = torch.randn(4, 5, requires_grad=True)
self.run_test(model, x)
def test_flatten(self):
class FlattenModel(torch.nn.Module):
def forward(self, input):
return torch.flatten(input)
x = torch.randint(10, (1, 2, 3, 4))
self.run_test(FlattenModel(), x)
x = torch.randn(4)
self.run_test(FlattenModel(), x)
def test_flatten2d(self):
class FlattenModel(torch.nn.Module):
def forward(self, input):
return torch.flatten(input, 1)
x = torch.randint(10, (1, 2, 3, 4))
self.run_test(FlattenModel(), x)
def test_flatten2d_neg(self):
class FlattenModel(torch.nn.Module):
def forward(self, x):
return (
torch.flatten(x, 1, -1),
torch.flatten(x, 0, -2),
torch.flatten(x, 1, -2),
)
x = torch.randint(10, (1, 2, 3, 4))
self.run_test(FlattenModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_flatten_dynamic_axes(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return torch.flatten(x, start_dim=2, end_dim=3)
batch_size = 3
x = torch.randn(batch_size, 5, 4, 5)
y = torch.randn(5, 5, 4, 5)
model = MyModule()
self.run_test(
model,
x,
test_with_inputs=[y],
input_names=["input"],
output_names=["output"],
dynamic_axes={"input": {0: "batch_size"}, "output": {0: "batch_size"}},
)
@skipIfUnsupportedMinOpsetVersion(11)
def test_getitem(self):
class GetItemModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y, z, ind):
# this will create prim::ListConstruct(x, y, z) + aten::__getitem__
arr = [x, y, z]
return arr[ind]
x = torch.randn(3, 4, 5)
y = torch.randn(1, 4, 5)
z = torch.randn(2, 4, 5)
ind = torch.tensor(1, dtype=torch.long)
self.run_test(GetItemModel(), (x, y, z, ind))
ind = torch.tensor(-2, dtype=torch.long)
self.run_test(GetItemModel(), (x, y, z, ind))
def test_item(self):
class M(torch.nn.Module):
def forward(self, x, y, i: int):
return int(x[y[i]].item())
x = torch.arange(6, dtype=torch.float)
y = torch.tensor([0, 1, 2, 3, 4], dtype=torch.long)
i = 3
self.run_test(torch.jit.script(M()), (x, y, i))
@skipScriptTest() # torch.nonzero(x, as_tuple=True) is not scriptable.
@skipIfUnsupportedMinOpsetVersion(9)
def test_nonzero(self):
class NonzeroModel(torch.nn.Module):
def forward(self, x):
return x.nonzero(), x.nonzero(as_tuple=True)
x = torch.randn(60).index_fill_(0, torch.randint(0, 60, (20,)), 0).view(3, 4, 5)
self.run_test(NonzeroModel(), (x,))
def test_unbind(self):
class UnbindModel(torch.nn.Module):
def forward(self, input):
_, out, _ = input.unbind()
return out
x = torch.randn(3, 4, 5)
self.run_test(UnbindModel(), x)
class UnbindModel2(torch.nn.Module):
def forward(self, input):
_, out, _, _ = input.unbind(1)
return out
x = torch.randn(3, 4, 5)
self.run_test(UnbindModel2(), x)
class UnbindModel3(torch.nn.Module):
def forward(self, input):
_, out, _, _ = input.unbind(-2)
return out
x = torch.randn(3, 4, 5)
self.run_test(UnbindModel3(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_len(self):
class LenModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return len(input.unbind()) + input
x = torch.randn(4, 5)
self.run_test(
LenModel(),
x,
input_names=["input"],
dynamic_axes={"input": {0: "seq"}},
test_with_inputs=(torch.randn(5, 5),),
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_len_list(self):
class LenListModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.ones(len(input.shape))
x = torch.randn(4, 5)
self.run_test(LenListModel(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(11)
def test_unbind_dynamic(self):
class UnbindModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return input.unbind()[1]
x = torch.randn(3, 4, 5)
self.run_test(UnbindModel(), x)
class UnbindModel2(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return input.unbind(-1)[1]
x = torch.randn(3, 4, 5)
self.run_test(UnbindModel2(), x)
@skipScriptTest() # scripting tests run for opsets > 11. See: test_split_script
def test_split(self):
class SplitModel(torch.nn.Module):
def forward(self, input):
return input.split([2, 1, 2]), input.split([3, 2])[0]
x = torch.randn(5, 4, 3)
self.run_test(SplitModel(), x)
class SplitModel2(torch.nn.Module):
def forward(self, input):
return input.split([2, 1, 1], -2), input.split([2, 2], -2)[-1]
x = torch.randn(5, 4, 3)
self.run_test(SplitModel2(), x)
class SplitModel3(torch.nn.Module):
def forward(self, input):
return input.split([2, 1, 2])
x = torch.randn(5, 4, 3)
self.run_test(SplitModel3(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_split_script(self):
class SplitModel(torch.nn.Module):
def forward(self, input):
return input.split([2, 1, 2]), input.split([3, 2])[0]
x = torch.randn(5, 4, 3)
self.run_test(SplitModel(), x)
class SplitModel2(torch.nn.Module):
def forward(self, input):
return input.split([2, 1, 1], -2), input.split([2, 2], -2)[-1]
x = torch.randn(5, 4, 3)
self.run_test(SplitModel2(), x)
class SplitModel3(torch.nn.Module):
def forward(self, input):
return input.split([2, 1, 2])
x = torch.randn(5, 4, 3)
self.run_test(SplitModel3(), x)
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest()
def test_split_size_as_list(self):
class SplitModel(torch.nn.Module):
def forward(self, input, split_sizes: List[int]):
out = []
split_list: List[Tensor] = input.split(split_sizes)
for ob in split_list:
out.append(ob)
return torch.cat(out, dim=0)
x = torch.randn(6, 4, 3)
split_sizes = [torch.tensor(2), torch.tensor(4)]
self.run_test(SplitModel(), (x, split_sizes))
@skipIfUnsupportedMinOpsetVersion(11)
def test_split_size_with_slice(self):
class SplitModule(torch.nn.Module):
def forward(self, x, y, t):
splits = (x.size(1), y.size(1))
out, out2 = torch.split(t, splits, dim=1)
return out, out2
x = torch.randn(2, 3)
y = torch.randn(2, 4)
t = torch.randn(2, 7)
self.run_test(
SplitModule(),
(x, y, t),
input_names=["x", "y", "t"],
dynamic_axes={"x": [0, 1], "y": [0, 1], "t": [0, 1]},
)
self.run_test(SplitModule(), (x, y, t), remained_onnx_input_idx=[2])
@skipIfUnsupportedMinOpsetVersion(11)
def test_split_dynamic(self):
class SplitModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return input.split(2)[1]
x = torch.randn(5, 4, 3)
self.run_test(SplitModel(), x)
class SplitModel2(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return input.split(2, -3)[1]
x = torch.randn(5, 4, 3)
self.run_test(SplitModel2(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_split_dynamic_axes(self):
class Split(torch.nn.Module):
def forward(self, x):
return x.split(1, dim=-1)
x = torch.randn(4, 384, 2)
input_names = ["logits"]
self.run_test(
Split(),
x,
input_names=input_names,
dynamic_axes={input_names[0]: {0: "batch"}},
)
@skipIfUnsupportedMinOpsetVersion(11)
def test_chunk(self):
class ChunkModel(torch.nn.Module):
def __init__(self, dim=1):
super(ChunkModel, self).__init__()
self.dim = dim
def forward(self, x):
return torch.chunk(x, 3, dim=self.dim)
model = ChunkModel()
model.eval()
model_neg_dim = ChunkModel(-1)
model_neg_dim.eval()
x = torch.randn(1, 18)
for dim_size_ in range(13, 16):
y = torch.randn(1, dim_size_)
self.run_test(
model,
x,
test_with_inputs=[y],
input_names=["x"],
dynamic_axes={"x": {0: "batch_size", 1: "dims"}},
)
self.run_test(
model_neg_dim,
x,
test_with_inputs=[y],
input_names=["x"],
dynamic_axes={"x": {0: "batch_size", 1: "dims"}},
)
@skipIfUnsupportedMinOpsetVersion(11)
def test_dynamic_chunk(self):
class ChunkModel(torch.nn.Module):
def __init__(self, dim=1):
super(ChunkModel, self).__init__()
self.dim = dim
def forward(self, x):
return torch.chunk(x, x.size(0), dim=self.dim)
model = ChunkModel()
model.eval()
model_neg_dim = ChunkModel(-1)
model_neg_dim.eval()
x = torch.randn(3, 18)
for dim_size_ in range(13, 16):
y = torch.randn(3, dim_size_)
self.run_test(
model,
x,
test_with_inputs=[y],
input_names=["x"],
dynamic_axes={"x": {0: "batch_size", 1: "dims"}},
)
self.run_test(
model_neg_dim,
x,
test_with_inputs=[y],
input_names=["x"],
dynamic_axes={"x": {0: "batch_size", 1: "dims"}},
)
def test_concat(self):
class ConcatModel(torch.nn.Module):
def forward(self, x, y, z):
return torch.cat((x, y, z))
x = torch.randn(3, 4, 5)
y = torch.randn(1, 4, 5)
z = torch.randn(2, 4, 5)
self.run_test(ConcatModel(), (x, y, z))
@skipIfUnsupportedMinOpsetVersion(11)
def test_concat_dynamic(self):
class ConcatDynamicModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.cat(x.unbind())
x = torch.randn(4, 5, 6)
self.run_test(ConcatDynamicModel(), x)
def test_stack(self):
class StackModel(torch.nn.Module):
def forward(self, x, y, z):
return torch.stack((x, y, z), 1)
x = torch.randn(3, 4, 5)
y = torch.randn(3, 4, 5)
z = torch.randn(3, 4, 5)
self.run_test(StackModel(), (x, y, z))
@skipIfUnsupportedMinOpsetVersion(11)
def test_stack_dynamic(self):
class StackDynamicModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.stack(x.unbind(), 1)
x = torch.randn(4, 5, 6)
self.run_test(StackDynamicModel(), x)
def test_loop_dynamic(self):
class LoopModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
for i in range(x.size(2)):
x = x + i
return x
model = LoopModel()
inputs = torch.zeros(1, 2, 3, dtype=torch.long)
self.run_test(model, inputs)
@skipIfUnsupportedMinOpsetVersion(9)
def test_loop_nested(self):
class NestedLoopsModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
for i in range(5):
a = 0
while a < 4:
a += 1
x = x + a
return x
model = NestedLoopsModel()
inputs = torch.zeros(1, 2, 3, dtype=torch.long)
self.run_test(model, inputs)
@skipIfUnsupportedMinOpsetVersion(11)
def test_loop_with_list(self):
class ListLoopModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
res = []
res1 = []
arr = x.split([3, 4, 1, 1, 2, 3, 2], 0)
res2 = torch.zeros(3, 4, dtype=torch.long)
res3 = []
res4 = []
for i in range(len(arr)):
res.append(arr[i].sum(0, False))
res1.append(arr[-1 - i].sum(0, False))
res2 += 1
res3 = res3 + [arr[i].sum(0, False)]
res4 += [arr[-1 - i].sum(0, False)]
return res, res1, res2, torch.stack(res3), torch.stack(res4)
model = ListLoopModel()
inputs = torch.randn(16)
self.run_test(model, inputs)
@skipIfUnsupportedMinOpsetVersion(11)
def test_loop_transpose(self):
class LoopModel(torch.nn.Module):
def forward(self, x):
res = torch.zeros_like(x[0])
for i in range(x.size(0)):
res += x[0].transpose(0, 1)
return res
model = torch.jit.script(LoopModel())
x = torch.randn(5, 3, 3)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_loop_multi_dim(self):
class LoopMultiDimModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y):
for x_ in torch.flip(x.narrow(0, 0, 7), [0]):
y = x_[0][y]
return y
model = LoopMultiDimModel()
x = torch.randint(0, 5, (8, 1, 17), dtype=torch.long)
y = torch.ones(1, dtype=torch.long)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(11)
def test_list(self):
class ListModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
tensors = x.unbind()
res = []
res.append(tensors[0])
res.append(tensors[1])
res.pop(1)
res.insert(0, tensors[1])
res.append(tensors[2])
res += [tensors[3], tensors[4]]
res = res + [tensors[5]]
return torch.ones(len(res))
model = ListModel()
inputs = torch.randn(16, 1)
self.run_test(model, inputs)
@skipIfUnsupportedMinOpsetVersion(11)
def test_list_append(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
for i in range(x.size(0)):
res += [torch.matmul(x[i], y)]
return res
model = torch.jit.script(ListModel())
x = torch.randn(16, 3, 4)
y = torch.randn(4, 5)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(13)
def test_list_append_nested(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
for i in range(x.size(0)):
for j in range(x.size(1)):
res += [torch.matmul(x[i][j], y)]
return res
model = torch.jit.script(ListModel())
x = torch.randn(4, 4, 3, 4)
y = torch.randn(4, 5)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(14) # Need onnx::Identity of sequence in opset 14
def test_list_append_nested_2(self):
class ListModel(torch.nn.Module):
def forward(self, x):
res = []
res_replicate = []
for i in range(x.size(0)):
if len(res) > 2:
for j in range(x.size(1)):
res.append(x[i][j])
res_replicate.append(res[-1])
res.append(res_replicate[-1])
return res, res_replicate
model = torch.jit.script(ListModel())
x = torch.randn(4, 4, 3, 4)
self.run_test(model, (x,))
@skipIfUnsupportedMinOpsetVersion(13)
def test_list_append_nested_mixed_dtype(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
for i in range(x.size(0)):
for j in range(x.size(1)):
if i == j:
res.append(x == y)
else:
res.append(x != y)
return res
model = torch.jit.script(ListModel())
x = torch.randn(4, 4, 3, 4)
y = torch.randn(3, 4)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(11)
def test_list_pop(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
for i in range(x.size(0)):
res += [torch.matmul(x[i], y)]
res.pop()
return res
model = torch.jit.script(ListModel())
x = torch.randn(16, 3, 4)
y = torch.randn(4, 5)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(13)
def test_list_pop_nested(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
for i in range(x.size(0)):
for j in range(x.size(1)):
res += [torch.matmul(x[i][j], y)]
res.pop()
res += [torch.matmul(x[i][0], y)]
return res
model = torch.jit.script(ListModel())
x = torch.randn(4, 4, 3, 4)
y = torch.randn(4, 5)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(11)
def test_list_del(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
for i in range(x.size(0)):
res += [torch.matmul(x[i], y)]
del res[2]
return res
model = torch.jit.script(ListModel())
x = torch.randn(16, 3, 4)
y = torch.randn(4, 5)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(13)
def test_list_del_nested(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
for i in range(x.size(0)):
for j in range(x.size(1)):
res += [torch.matmul(x[i][j], y)]
del res[i]
res += [torch.matmul(x[i][0], y)]
return res
model = torch.jit.script(ListModel())
x = torch.randn(4, 4, 3, 4)
y = torch.randn(4, 5)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(11)
def test_list_set(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
for i in range(x.size(0)):
res.append(x[i])
res[y] = x[y]
return res
model = torch.jit.script(ListModel())
x = torch.randn(12, 4)
y = torch.tensor(2, dtype=torch.long)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(13)
def test_list_idx_sum(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
indices = torch.arange(x.size(0))
res = []
for i in range(x.size(0)):
res.append(x[i])
return res[torch.sum(indices[:y])]
model = torch.jit.script(ListModel())
x = torch.randn(12, 4)
y = torch.tensor(2, dtype=torch.long)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_tensor_factories(self):
class TensorFactory(torch.nn.Module):
def forward(self, x):
return torch.zeros(x.size()) + torch.ones(x.size())
x = torch.randn(2, 3, 4)
self.run_test(
TensorFactory(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]}
)
self.run_test(TensorFactory(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_tensor_factories_script(self):
class TensorFactory(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.zeros(x.shape, dtype=torch.float) + torch.ones(
x.shape, dtype=torch.float
)
x = torch.randn(2, 3, 4)
self.run_test(
TensorFactory(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]}
)
self.run_test(TensorFactory(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_tensor_like_factories_script(self):
class TensorFactory(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
zeros = torch.zeros_like(
x,
dtype=torch.float,
layout=torch.strided,
device=torch.device("cpu"),
)
ones = torch.ones_like(
x,
dtype=torch.float,
layout=torch.strided,
device=torch.device("cpu"),
)
return zeros + ones
x = torch.randn(2, 3, 4)
self.run_test(
TensorFactory(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]}
)
self.run_test(TensorFactory(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_eye(self):
class TensorFactory(torch.nn.Module):
def forward(self, x):
return (
torch.eye(x.size()[1], 3),
torch.eye(4, 4, dtype=torch.long),
torch.eye(x.size()[1], 2, dtype=torch.long),
torch.eye(x.shape[0]),
torch.eye(x.shape[0], dtype=torch.float64),
)
x = torch.randn(2, 3, 4)
another_x = torch.randn(5, 6, 7)
self.run_test(
TensorFactory(),
x,
test_with_inputs=[another_x],
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1, 2]},
)
@skipIfUnsupportedMinOpsetVersion(13)
def test_diagonal(self):
class DiagonalModel(torch.nn.Module):
def forward(self, x):
return torch.diagonal(x)
x = torch.randn(2, 4, 5, 2)
# Other test inputs to test dynamic behavior
another_x = torch.randn(5, 6, 7, 8)
self.run_test(
DiagonalModel(),
x,
test_with_inputs=[another_x],
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1, 2, 3]},
)
class DiagonalModelNegOffset(torch.nn.Module):
def forward(self, x):
return torch.diagonal(x, offset=-1)
x = torch.randn(2, 4, 5, 2)
# Other test inputs to test dynamic behavior
another_x = torch.randn(5, 6, 7, 8)
self.run_test(
DiagonalModelNegOffset(),
x,
test_with_inputs=[another_x],
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1, 2, 3]},
)
class DiagonalModelPosOffset(torch.nn.Module):
def forward(self, x):
return torch.diagonal(x, offset=1)
x = torch.randn(2, 4, 5, 2)
# Other test inputs to test dynamic behavior
another_x = torch.randn(5, 6, 7, 8)
self.run_test(
DiagonalModelPosOffset(),
x,
test_with_inputs=[another_x],
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1, 2, 3]},
)
class DiagonalModelWithDims(torch.nn.Module):
def forward(self, x):
return torch.diagonal(x, offset=-1, dim1=1, dim2=2)
x = torch.randn(2, 4, 5, 2)
# Other test inputs to test dynamic behavior
another_x = torch.randn(5, 6, 7, 8)
self.run_test(
DiagonalModelWithDims(),
x,
test_with_inputs=[another_x],
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1, 2, 3]},
)
class DiagonalModelOffsetOverrun(torch.nn.Module):
def forward(self, x):
return torch.diagonal(x, offset=-2), torch.diagonal(x, offset=5)
x = torch.randn(2, 4, 5, 2)
# Other test inputs to test dynamic behavior
another_x = torch.randn(5, 6, 7, 8)
self.run_test(
DiagonalModelOffsetOverrun(),
x,
test_with_inputs=[another_x],
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1, 2, 3]},
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_inplace_zero(self):
class Zero_(torch.nn.Module):
def forward(self, x):
return x.zero_(), x
x = torch.randn(2, 3, 4)
self.run_test(Zero_(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]})
self.run_test(Zero_(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_new_zeros(self):
class Zero_(torch.nn.Module):
def forward(self, x):
return x.new_zeros(x.shape[1:2]), x.new_zeros(
x.shape[2:], dtype=torch.long
)
x = torch.randn(2, 3, 4)
self.run_test(Zero_(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]})
self.run_test(Zero_(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_new_ones(self):
class OnesModel(torch.nn.Module):
def forward(self, x):
return x.new_ones(x.shape[1:2]), x.new_ones(
x.shape[2:], dtype=torch.long
)
x = torch.randn(2, 3, 4)
self.run_test(OnesModel(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]})
self.run_test(OnesModel(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
@skipScriptTest() # torch.zeros/torch.ones with size tensor of dim != 0 not scriptable.
def test_zeros_ones_with_tensor_input(self):
class ZeroAndOnes(torch.nn.Module):
def forward(self, x):
return torch.zeros(x, 1), torch.ones(x, 1)
x = torch.tensor([2])
self.run_test(ZeroAndOnes(), (x,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_tolist(self):
class List(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
res: List[int] = input.tolist()
return res
self.run_test(List(), (torch.randint(100, (1,)),))
@skipIfUnsupportedMinOpsetVersion(9)
def test_list_pass(self):
class Slice(torch.nn.Module):
def forward(self, x, y):
return x.new_zeros(x.shape[2:] + y.shape[1:])
x = torch.randn(2, 3, 4, 5)
y = torch.randn(1, 2, 3, 4)
self.run_test(
Slice(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2, 3], "y": [0, 1, 2, 3]},
)
self.run_test(Slice(), (x, y), remained_onnx_input_idx=[])
class Size(torch.nn.Module):
def forward(self, x, y):
return x.new_zeros(x.shape + y.shape)
x = torch.randn(2, 3, 4)
y = torch.randn(1, 2, 3)
self.run_test(
Size(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2], "y": [0, 1, 2]},
)
self.run_test(Size(), (x, y), remained_onnx_input_idx=[])
class Array(torch.nn.Module):
def forward(self, x, y):
arr1 = [x.shape[0], x.shape[1], 2]
arr2 = [y.shape[0], y.shape[1]]
return x.new_zeros(arr1 + arr2)
x = torch.randn(2, 3, 4)
y = torch.randn(1, 2, 3)
self.run_test(
Array(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2], "y": [0, 1, 2]},
)
self.run_test(Array(), (x, y), remained_onnx_input_idx=[])
class List(torch.nn.Module):
def forward(self, x, y):
l1 = list(x.shape)
l2 = list(y.shape)
return x.new_zeros(l1 + l2)
x = torch.randn(2, 3, 4)
y = torch.randn(1, 2, 3)
self.run_test(
List(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2], "y": [0, 1, 2]},
)
self.run_test(List(), (x, y), remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_new_empty(self):
class Emtpy(torch.nn.Module):
def forward(self, x):
return (
x.new_empty(x.shape[0]).fill_(0),
x.new_empty(x.shape[0], dtype=torch.long) * 0,
)
x = torch.randn(2, 3, 4)
self.run_test(Emtpy(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]})
self.run_test(Emtpy(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_new_full(self):
class Full(torch.nn.Module):
def forward(self, x):
return x.new_full(x.shape[1:2], 5), x.new_full(
x.shape[0:1], 1.3, dtype=torch.long
)
x = torch.randn(2, 3, 4)
self.run_test(Full(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]})
self.run_test(Full(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_inplace_list(self):
class Arithmetic(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y):
return torch.cat([x.add_(3), y.fill_(0)])
x = torch.randn(2, 3)
y = torch.randn(2, 3)
self.run_test(
Arithmetic(),
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1], "y": [0, 1]},
)
self.run_test(Arithmetic(), (x, y), remained_onnx_input_idx=[0])
@skipIfUnsupportedMinOpsetVersion(9)
def test_inplace_fill(self):
class Fill_(torch.nn.Module):
def forward(self, x):
return x.fill_(3), x
x = torch.randn(2, 3, 4)
self.run_test(Fill_(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]})
self.run_test(Fill_(), x, remained_onnx_input_idx=[])
def test_inplace_arithmetic(self):
class Arithmetic(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y):
x.add_(3)
y.mul_(x)
return x, y
x = torch.randn(2, 3, 4)
y = torch.randn(2, 3, 4)
self.run_test(Arithmetic(), (x, y))
def test_inplace_arithmetic_half(self):
class InplaceAddModel(torch.nn.Module):
def forward(self, x, y):
return x.add_(y)
class InplaceMulModel(torch.nn.Module):
def forward(self, x, y):
return x.mul_(y)
x = torch.randn(2, 2, dtype=torch.half)
y = torch.randn(2, 2, dtype=torch.float)
self.run_test(InplaceAddModel(), (x, y), rtol=1e-2, atol=1e-2)
self.run_test(InplaceMulModel(), (x, y), rtol=1e-2, atol=1e-2)
@skipIfUnsupportedMinOpsetVersion(9)
def test_inplace_with_loop(self):
class M(torch.nn.Module):
def forward(self, x):
a = torch.ones(
12,
)
for i in range(10):
a.add_(
torch.ones(
12,
)
)
return a + x
m = M()
x = torch.randn(
12,
)
self.run_test(torch.jit.script(M()), (x))
@skipIfUnsupportedMinOpsetVersion(9)
def test_inplace_with_loop_2(self):
class M(torch.nn.Module):
def forward(self, x):
_bias = torch.ones(
12,
)
a = torch.ones(
12,
) # used in loop, altered.
a_ref = a # not used in loop, should be altered.
b = x.clone() # used in loop, not be altered.
b_ref = b # not used in loop, should not be altered.
for i in range(10):
if i == 3:
for j in range(5):
a += _bias
_bias.add_(
torch.ones(
12,
)
)
b = b + torch.ones(
12,
)
_bias.add_(
torch.ones(
12,
)
)
a += _bias
# TODO: value for a_ref is incorrect.
# a_ref += torch.ones(12,)
b_ref += torch.ones(
12,
)
return _bias + x, a, b, b_ref
m = M()
x = torch.zeros(
12,
)
self.run_test(torch.jit.script(M()), (x))
@skipIfUnsupportedMinOpsetVersion(11)
def test_inplace_attr_with_loop(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self._bias = torch.arange(
12,
)
def forward(self, x):
self._bias = torch.arange(
12,
)
for i in range(10):
if i == 3:
for j in range(5):
self._bias += torch.arange(
12,
)
return self._bias + x
m = M()
x = torch.zeros(
12,
)
self.run_test(torch.jit.script(M()), (x))
@skipIfUnsupportedMinOpsetVersion(11)
def test_inplace_attr_copy_with_loop(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self._bias = torch.arange(
12,
)
def forward(self, x):
self._bias = torch.arange(
12,
)
for i in range(10):
if i == 3:
for j in range(5):
self._bias.copy_(
torch.arange(
12,
)
)
self._bias.copy_(
self._bias
+ torch.arange(
12,
)
)
self._bias.copy_(
self._bias
+ torch.arange(
12,
)
)
return self._bias + x
m = M()
x = torch.zeros(
12,
)
self.run_test(torch.jit.script(M()), (x))
@skipIfUnsupportedMinOpsetVersion(14) # Need onnx::Identity of sequence in opset 14
def test_inplace_sequence_with_loop(self):
class M(torch.nn.Module):
def process(self, beam_hyps: List[Tensor], done: Tensor, x):
batch_size = x.shape[0]
for i in range(batch_size):
if done[i]:
continue
beam_idx = 0
for _, token in enumerate(x[i]):
beam_hyps.append(token)
beam_idx += 1
if beam_idx == 6:
break
done[i] = len(beam_hyps) > 4
return beam_hyps, done
def forward(self, x):
beam_hyps: List[Tensor] = []
batch_size = x.shape[0]
cur_len = 0
max_len = x.shape[1]
done = torch.zeros(batch_size, dtype=torch.bool)
while cur_len < max_len:
beam_hyps, done = self.process(beam_hyps, done, x[:, 0, :])
cur_len = cur_len + 1
return beam_hyps
m = torch.jit.script(M())
x = torch.randn(8, 4, 3)
self.run_test(torch.jit.script(M()), (x))
@skipScriptTest() # Sort with dynamic dim not supported in ONNX
def test_sort(self):
class SortModel(torch.nn.Module):
def forward(self, x):
out = []
for i in range(-2, 2):
out.append(torch.sort(x, dim=i, descending=True))
return out
x = torch.randn(3, 4)
self.run_test(SortModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest() # Sort with dynamic dim not supported in ONNX
def test_sort_ascending(self):
class SortModel(torch.nn.Module):
def forward(self, x):
out = []
for i in range(-2, 2):
out.append(torch.sort(x, dim=i, descending=False))
return out
x = torch.randn(3, 4)
self.run_test(SortModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_masked_fill(self):
class MaskedFillModel(torch.nn.Module):
def forward(self, x):
mask = torch.tensor([[0, 0, 1], [1, 1, 0]], dtype=torch.uint8)
return x.masked_fill(mask, 2)
x = torch.zeros(4, 2, 3, requires_grad=True)
self.run_test(MaskedFillModel(), x)
class MaskedFillModel2(torch.nn.Module):
def forward(self, x):
return x.masked_fill(x > 3, -1)
x = torch.arange(16).view(2, 2, 4).to(torch.float32)
self.run_test(MaskedFillModel2(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_masked_fill_inplace(self):
class MaskedFillModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
mask = torch.tensor([[0, 0, 1], [1, 1, 0]], dtype=torch.uint8)
x.masked_fill_(mask, 2)
return x
x = torch.zeros(4, 2, 3, requires_grad=True)
self.run_test(MaskedFillModel(), x)
class MaskedFillModel2(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
x.masked_fill_(x > 3, -1)
return x
x = torch.arange(16).view(2, 2, 4).to(torch.float32)
self.run_test(MaskedFillModel2(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_masked_scatter(self):
class MaskedScatterModel(torch.nn.Module):
def forward(self, x):
return torch.masked_scatter(x, x.ge(0.5), torch.ones(100, 100) * 5)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(MaskedScatterModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_masked_select(self):
class MaskedSelectModel(torch.nn.Module):
def forward(self, x):
return torch.masked_select(x, x.ge(0.5))
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(MaskedSelectModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_to_masked_fill(self):
class MaskedFillModel(torch.nn.Module):
def forward(self, input_mask, some_const):
mask = input_mask.clone()
mask[mask != some_const] = 1
mask[mask == some_const] = 0
return mask
mask = torch.randn(2, 2, 2, requires_grad=True)
constant = torch.tensor(5, dtype=torch.float)
self.run_test(MaskedFillModel(), (mask, constant))
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_to_masked_scatter(self):
class MaskedScatterModel(torch.nn.Module):
def forward(self, input_mask, some_const):
mask = input_mask.clone()
mask[mask != some_const] = torch.ones(8)
return mask
mask = torch.randn(2, 2, 2, requires_grad=True)
constant = torch.tensor(5, dtype=torch.float)
self.run_test(MaskedScatterModel(), (mask, constant))
@skipIfUnsupportedMinOpsetVersion(9)
def test_pixel_shuffle(self):
class PixelShuffle(torch.nn.Module):
def forward(self, x):
return torch.pixel_shuffle(x, upscale_factor=2)
x = torch.randn(2, 16, 4, 3, requires_grad=True)
y = torch.randn(4, 32, 8, 4, requires_grad=True)
self.run_test(PixelShuffle(), x)
self.run_test(
PixelShuffle(),
x,
input_names=["x"],
dynamic_axes={"x": [0, 1, 2, 3]},
test_with_inputs=[y],
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_pixel_unshuffle(self):
class PixelUnshuffle(torch.nn.Module):
def forward(self, x):
return torch.pixel_unshuffle(x, downscale_factor=2)
x = torch.randn(2, 16, 4, 6, requires_grad=True)
y = torch.randn(4, 32, 8, 4, requires_grad=True)
self.run_test(PixelUnshuffle(), x)
self.run_test(
PixelUnshuffle(),
x,
input_names=["x"],
dynamic_axes={"x": [0, 1, 2, 3]},
test_with_inputs=[y],
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_reciprocal(self):
class ReciprocalModel(torch.nn.Module):
def forward(self, x):
return torch.reciprocal(x)
model = ReciprocalModel()
x = torch.tensor([2, 4])
self.run_test(model, x.to(torch.long))
self.run_test(model, x.to(torch.float))
self.run_test(model, x.to(torch.double))
@skipIfUnsupportedMinOpsetVersion(9)
def test_scalar_type(self):
class ArithmeticModel(torch.nn.Module):
def forward(self, x):
return x.size(0) * 2 * x, 2 - x
x = torch.ones(2, 3, dtype=torch.float32)
self.run_test(ArithmeticModel(), x)
class ComparisonModel(torch.nn.Module):
def forward(self, x, y):
a = torch.tensor([12.0])
return x.lt(1.5) & y.le(2) & x.le(1), x.gt(y), x.lt(y), a.ge(x.size(0))
x = torch.ones(2, 3, dtype=torch.int32)
y = torch.ones(2, 3, dtype=torch.float32)
self.run_test(ComparisonModel(), (x, y))
class MatMulModel(torch.nn.Module):
def forward(self, x):
return torch.mm(x, x) + x + torch.mm(x, x) + x
x = torch.ones(3, 3)
self.run_test(MatMulModel(), x)
class AddMMModel(torch.nn.Module):
def forward(self, x):
return torch.mm(x, x) + x
x = torch.ones(3, 3)
self.run_test(AddMMModel(), x)
class FullModel(torch.nn.Module):
# add is used for exporting full
def forward(self, x):
return torch.full((3, 4), x)
x = torch.tensor(12.0)
self.run_test(FullModel(), x)
class CatModel(torch.nn.Module):
def forward(self, fp16, fp32):
return torch.cat([fp16, fp32])
fp16 = Tensor([0.5])
fp16 = fp16.half()
fp32 = Tensor([1.5])
self.run_test(CatModel(), (fp16, fp32))
@skipIfUnsupportedMinOpsetVersion(9)
def test_full_like(self):
class FullLikeModel(torch.nn.Module):
def forward(self, x):
return torch.full_like(x, 1.3, dtype=torch.int)
x = torch.tensor(12)
self.run_test(FullLikeModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_full_like_value(self):
class FullLikeModel(torch.nn.Module):
def forward(self, x, y):
out = y + 2
return torch.full_like(x, out)
x = torch.tensor(12)
y = torch.tensor(2)
self.run_test(FullLikeModel(), (x, y))
def test_l1_norm(self):
class NormModel(torch.nn.Module):
def forward(self, x):
return torch.norm(x, p=1, dim=-1, keepdim=False)
x = torch.randn(4, 2, 3, requires_grad=True)
self.run_test(NormModel(), x)
def test_l2_norm(self):
class NormModel(torch.nn.Module):
def forward(self, x):
return torch.norm(x, p=2, dim=-2, keepdim=False)
x = torch.randn(4, 2, 3, requires_grad=True)
self.run_test(NormModel(), x)
def test_frobenius_norm(self):
class NormModel(torch.nn.Module):
def forward(self, x):
return torch.norm(x, p="fro", dim=0, keepdim=False)
x = torch.randn(4, 2, 3, requires_grad=True)
self.run_test(NormModel(), x)
def test_frobenius_norm_keepdim(self):
class NormModel(torch.nn.Module):
def forward(self, x):
return torch.norm(x, p="fro", dim=(0, 1), keepdim=True)
x = torch.randn(4, 2, 3, requires_grad=True)
self.run_test(NormModel(), x)
def test_unfold(self):
class UnfoldModel(torch.nn.Module):
def forward(self, x):
return x.unfold(dimension=2, size=2, step=2)
x = torch.randn(4, 2, 3, requires_grad=True)
y = torch.randn(2, 1, 3, requires_grad=True)
self.run_test(
UnfoldModel(),
x,
dynamic_axes={"x": [0, 1]},
input_names=["x"],
test_with_inputs=[y],
)
def test_unfold_infer_shape(self):
class UnfoldModule(torch.jit.ScriptModule):
def __init__(self):
super(UnfoldModule, self).__init__()
self.conv = torch.nn.Conv1d(3, 1, 3, stride=2)
@torch.jit.script_method
def forward(self, x):
x = self.conv(x)
return x.unfold(dimension=2, size=2, step=2)
x = torch.randn(32, 3, 64)
self.run_test(UnfoldModule(), x)
@skipIfUnsupportedMinOpsetVersion(12)
def test_unfold_dynamic_inputs(self):
class UnfoldModel(torch.nn.Module):
def forward(self, x):
return x.unfold(dimension=2, size=x.shape[1], step=x.shape[1] - 1)
x = torch.randn(4, 2, 4, requires_grad=True)
self.run_test(UnfoldModel(), x)
class UnfoldModel(torch.nn.Module):
def forward(self, x):
return x.unfold(dimension=2, size=x.shape[1], step=1)
x = torch.randn(4, 2, 4, requires_grad=True)
self.run_test(UnfoldModel(), x)
@skipIfUnsupportedMinOpsetVersion(9) # MatMul long inputs is added in ONNX opset 9.
def test_mv(self):
class MatmulModel(torch.nn.Module):
def forward(self, input, other):
return torch.mv(input, other)
x = torch.randn(4, 5, requires_grad=True)
y = torch.randn(5, requires_grad=True)
self.run_test(MatmulModel(), (x, y))
x = torch.randint(10, (4, 5))
y = torch.randint(10, (5,))
self.run_test(MatmulModel(), (x, y))
@skipIfUnsupportedMinOpsetVersion(9) # MatMul long inputs is added in ONNX opset 9.
def test_dot(self):
class MatmulModel(torch.nn.Module):
def forward(self, input, other):
return torch.dot(input, other)
x = torch.randn(5, requires_grad=True)
y = torch.randn(5, requires_grad=True)
self.run_test(MatmulModel(), (x, y))
x = torch.randint(10, (5,))
y = torch.randint(10, (5,))
self.run_test(MatmulModel(), (x, y))
@skipScriptTest() # SpectralNorm not TorchScript compatible.
def test_spectral_norm(self):
m = torch.nn.utils.spectral_norm(torch.nn.Linear(2, 4))
x = torch.randn(6, 2)
self.run_test(m, (x,))
def test_prelu(self):
class PReluModel(torch.nn.Module):
def __init__(self):
super(PReluModel, self).__init__()
self.prelu = torch.nn.PReLU()
def forward(self, x):
return self.prelu(x)
x = torch.randn(2, 3, 4)
y = torch.randn(2, 4, 5)
self.run_test(
PReluModel(),
x,
input_names=["x"],
dynamic_axes={"x": [1, 2]},
test_with_inputs=[y],
)
def test_prelu_scalar(self):
x = torch.scalar_tensor(1.0)
self.run_test(torch.nn.PReLU(), x, input_names=["x"])
def test_relu6(self):
class Relu6Model(torch.nn.Module):
def __init__(self):
super(Relu6Model, self).__init__()
self.relu6 = torch.nn.ReLU6()
def forward(self, x):
return self.relu6(x)
x = torch.randn(2, 3, 4) * 100.0
y = torch.randn(2, 4, 5) * 100.0
self.run_test(
Relu6Model(),
x,
input_names=["x"],
dynamic_axes={"x": [1, 2]},
test_with_inputs=[y],
)
def test_silu(self):
class SiLUModel(torch.nn.Module):
def __init__(self):
super(SiLUModel, self).__init__()
self.silu = torch.nn.SiLU()
def forward(self, x):
return self.silu(x)
x = torch.randn(2, 3, 4)
self.run_test(SiLUModel(), (x))
@skipIfUnsupportedMinOpsetVersion(14)
def test_tril(self):
class trilModel(torch.nn.Module):
def forward(self, x):
return torch.tril(x)
x = torch.randn(2, 3, 4)
self.run_test(trilModel(), (x))
class trilModelwithDiagonal(torch.nn.Module):
def forward(self, x):
return torch.tril(x, diagonal=1)
x = torch.randn(2, 3, 4)
self.run_test(trilModelwithDiagonal(), (x))
class trilModelwithNegDiagonal(torch.nn.Module):
def forward(self, x):
return torch.tril(x, diagonal=-1)
x = torch.randn(2, 3, 4)
self.run_test(trilModelwithNegDiagonal(), (x))
@skipIfUnsupportedMinOpsetVersion(14)
def test_triu(self):
class triuModel(torch.nn.Module):
def forward(self, x):
return torch.triu(x)
x = torch.randn(2, 3, 4)
self.run_test(triuModel(), (x))
class triuModelwithDiagonal(torch.nn.Module):
def forward(self, x):
return torch.triu(x, diagonal=1)
x = torch.randn(2, 3, 4)
self.run_test(triuModelwithDiagonal(), (x))
class trilModelwithNegDiagonal(torch.nn.Module):
def forward(self, x):
return torch.tril(x, diagonal=-1)
x = torch.randn(2, 3, 4)
self.run_test(trilModelwithNegDiagonal(), (x))
def test_mish(self):
class MishModel(torch.nn.Module):
def __init__(self):
super(MishModel, self).__init__()
self.mish = torch.nn.Mish()
def forward(self, x):
return self.mish(x)
x = torch.randn(2, 3, 4)
self.run_test(MishModel(), (x))
def test_remainder(self):
class RemainderModel(torch.nn.Module):
def forward(self, input, other):
return torch.remainder(input, other)
x = torch.randn(4, 2, 3)
y = torch.randn(1, 2, 1)
self.run_test(RemainderModel(), (x, y))
x = torch.tensor([7, 6, -7, -6], dtype=torch.long)
y = torch.tensor([2], dtype=torch.long)
self.run_test(RemainderModel(), (x, y))
x = x.to(torch.float)
self.run_test(RemainderModel(), (x, y))
y = y.to(torch.float)
self.run_test(RemainderModel(), (x, y))
x = x.to(torch.int32)
self.run_test(RemainderModel(), (x, y))
def test_remainder_scalar(self):
class RemainderModel(torch.nn.Module):
def __init__(self, scalar=2.55):
super().__init__()
self.scalar = scalar
def forward(self, input):
return torch.remainder(input, self.scalar)
x = torch.randint(10, (2, 3))
self.run_test(RemainderModel(), x)
x = torch.tensor([7, 6, -7, -6], dtype=torch.long)
self.run_test(RemainderModel(2), x)
@skipIfUnsupportedMinOpsetVersion(10)
def test_fmod(self):
class FModModel(torch.nn.Module):
def forward(self, input, other):
return torch.fmod(input, other)
x = torch.randn(4, 2, 3)
y = torch.randn(1, 2, 1)
self.run_test(FModModel(), (x, y))
@skipIfUnsupportedMinOpsetVersion(10)
def test_fmod_scalar(self):
class FModModel(torch.nn.Module):
def forward(self, input):
return torch.fmod(input, 2.55)
x = torch.randint(10, (2, 3))
self.run_test(FModModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_glu(self):
class GluModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.glu(x)
x = torch.randn(2, 4, 5, 6, requires_grad=True)
self.run_test(GluModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_gelu(self):
class GeluModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.gelu(x, approximate="none")
x = torch.randn(2, 4, 5, 6, requires_grad=True)
self.run_test(GeluModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_tanh_gelu(self):
class GeluModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.gelu(x, approximate="tanh")
x = torch.randn(2, 4, 5, 6, requires_grad=True)
self.run_test(GeluModel(), x)
def test_add_inplace(self):
class InplaceAddModel(torch.nn.Module):
def forward(self, x):
x += 12
return x
x = torch.randn(4, 2, 3, requires_grad=True)
self.run_test(InplaceAddModel(), x)
def test_addcmul(self):
class AddcmulModel(torch.nn.Module):
def forward(self, x, t1, t2):
return torch.addcmul(x, t1, t2), torch.addcmul(x, t1, t2, value=2.2)
x = torch.randn(1, 3)
t1 = torch.randn(3, 1)
t2 = torch.randn(1, 3)
self.run_test(AddcmulModel(), (x, t1, t2))
def test_rsqrt(self):
class RsqrtModel(torch.nn.Module):
def forward(self, x):
return x.rsqrt()
x = torch.randn(4, 2, 3, requires_grad=True, dtype=torch.float64)
self.run_test(RsqrtModel(), x)
def test_rsqrt_zeros(self):
class RsqrtModel(torch.nn.Module):
def forward(self, x):
return x.rsqrt()
x = torch.zeros(4, 2, 3, requires_grad=True, dtype=torch.float64)
self.run_test(RsqrtModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_unique(self):
class UniqueModel(torch.nn.Module):
def forward(self, x):
return torch.unique(
x, sorted=True, return_inverse=False, return_counts=True
)
x = torch.tensor([1, 3, 2, 3], dtype=torch.long)
self.run_test(UniqueModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_unique_along_dim(self):
class UniqueModel(torch.nn.Module):
def forward(self, x):
return torch.unique(
x, dim=0, sorted=True, return_inverse=True, return_counts=False
)
x = torch.tensor([1, 3, 2, 3], dtype=torch.long)
self.run_test(UniqueModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_cumsum(self):
class CumSum(torch.nn.Module):
def forward(self, input):
return torch.cumsum(input, dim=0)
x = torch.randn(2, 3, 4)
model = CumSum()
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_cumsum_with_cast(self):
class CumSum(torch.nn.Module):
def forward(self, input):
return torch.cumsum(input, dim=0, dtype=torch.float32)
model = CumSum()
x = torch.tensor([2, 3, 4], dtype=torch.int32)
self.run_test(model, x)
x = torch.tensor([False, True, True])
self.run_test(model, x)
@skipScriptTest() # error in propagate as assign input shape
@skipIfUnsupportedMinOpsetVersion(10)
def test_embedding_bag(self):
model = torch.nn.EmbeddingBag(10, 5, mode="sum", scale_grad_by_freq=True)
input = torch.randint(10, (7,))
offset = torch.tensor([0, 2, 5, 6])
self.run_test(model, (input, offset))
model = torch.nn.EmbeddingBag(10, 5, mode="sum", include_last_offset=True)
input = torch.randint(10, (7,))
offset = torch.tensor([0, 2, 5, 6])
self.run_test(model, (input, offset))
model = torch.nn.EmbeddingBag(10, 5, mode="max")
input = torch.randint(10, (7, 5))
self.run_test(model, (input))
@skipIfUnsupportedMinOpsetVersion(11)
def test_embedding_bag_1d_per_sample_weights(self):
class EmbeddingModel(torch.nn.Module):
def forward(self, embedding_matrix, input, offset, weights):
return torch.nn.functional.embedding_bag(
input,
embedding_matrix,
offsets=offset,
mode="sum",
per_sample_weights=weights,
)
model = EmbeddingModel()
x = torch.randint(7, (6,))
w = torch.randn(
6,
)
offset = torch.tensor([0, 2, 5])
embedding_matrix = torch.rand(10, 15)
self.run_test(model, (embedding_matrix, x, offset, w))
@skipIfUnsupportedMinOpsetVersion(11)
def test_embedding_bag_2d_per_sample_weights(self):
class EmbeddingModel(torch.nn.Module):
def forward(self, embedding_matrix, input, weights):
return torch.nn.functional.embedding_bag(
input, embedding_matrix, mode="sum", per_sample_weights=weights
)
embedding_matrix = torch.rand(10, 15)
model = EmbeddingModel()
x = torch.randint(7, (2, 3))
w = torch.randn(2, 3)
x2 = torch.randint(7, (4, 3))
w2 = torch.randn(4, 3)
self.run_test(
model,
(embedding_matrix, x, w),
input_names=["embed", "x", "w"],
dynamic_axes={"x": [0], "w": [0]},
test_with_inputs=[(embedding_matrix, x2, w2)],
)
@skipScriptTest() # scripting prim::Uninitialized, prim::dtype, prim::unchecked_cast
@skipIfUnsupportedMinOpsetVersion(11)
@unittest.skip(
"Due to ONNX Loop shape inference issue. "
"https://msdata.visualstudio.com/Vienna/_workitems/edit/1352001"
)
def test_embedding_bag_dynamic_input(self):
class EmbeddingModel1D(torch.nn.Module):
def forward(self, embedding_matrix, input, weights, offsets):
return torch.nn.functional.embedding_bag(
input,
embedding_matrix,
offsets=offsets,
mode="sum",
per_sample_weights=weights,
)
model = EmbeddingModel1D()
x = torch.randint(7, (6,))
w = torch.randn(
6,
)
offsets = torch.tensor([0, 2, 5], dtype=torch.long)
embedding_matrix = torch.rand(10, 15)
x2 = torch.randint(7, (2,))
w2 = torch.randn(
2,
)
embedding_matrix2 = torch.rand(12, 25)
offsets2 = torch.tensor(
[
0,
],
dtype=torch.long,
)
self.run_test(
model,
(embedding_matrix, x, w, offsets),
test_with_inputs=[(embedding_matrix2, x2, w2, offsets2)],
input_names=["embedding_matrix", "x", "offsets", "w"],
dynamic_axes={
"embedding_matrix": [0, 1],
"x": [0],
"offsets": [0],
"w": [0],
},
)
class EmbeddingModel2D(torch.nn.Module):
def forward(self, embedding_matrix, input, weights):
return torch.nn.functional.embedding_bag(
input, embedding_matrix, mode="sum", per_sample_weights=weights
)
model = EmbeddingModel2D()
x = torch.randint(7, (2, 3))
w = torch.randn(2, 3)
embedding_matrix = torch.rand(10, 15)
x2 = torch.randint(7, (3, 5))
w2 = torch.randn(3, 5)
embedding_matrix2 = torch.rand(12, 25)
self.run_test(
model,
(embedding_matrix, x, w),
test_with_inputs=[(embedding_matrix2, x2, w2)],
input_names=["embedding_matrix", "x", "w"],
dynamic_axes={"embedding_matrix": [0, 1], "x": [0, 1], "w": [0, 1]},
)
@skipIfUnsupportedMinOpsetVersion(8)
def test_meshgrid(self):
class Meshgrid(torch.nn.Module):
def forward(self, x, y, z):
output1, output2, output3 = torch.meshgrid(x, y, z)
return output1, output2, output3
x = torch.randn(3, requires_grad=True)
y = torch.zeros(4, requires_grad=True)
z = torch.randn(5, requires_grad=True)
self.run_test(Meshgrid(), (x, y, z))
@skipIfUnsupportedMinOpsetVersion(8)
def test_meshgrid_scalar(self):
class Meshgrid(torch.nn.Module):
def forward(self, x, y, z):
output1, output2, output3 = torch.meshgrid(x, y, z)
return output1, output2, output3
x = torch.ones(3, requires_grad=True)
y = torch.zeros(4, requires_grad=True)
z = torch.tensor(2.0)
self.run_test(Meshgrid(), (x, y, z))
def test_baddbmm(self):
class MyModule(torch.nn.Module):
def forward(self, input, batch1, batch2):
return torch.baddbmm(
input, batch1, batch2, alpha=torch.tensor(5), beta=3.5
)
x = torch.randn(10, 3, 5)
batch1 = torch.randn(10, 3, 4)
batch2 = torch.randn(10, 4, 5)
model = MyModule()
self.run_test(model, (x, batch1, batch2))
def test_baddbmm_dynamic(self):
class MyModule(torch.nn.Module):
def forward(self, input, batch1, batch2, alpha, beta):
return torch.baddbmm(input, batch1, batch2, alpha=alpha, beta=beta)
x = torch.randn(10, 3, 5)
batch1 = torch.randn(10, 3, 4)
batch2 = torch.randn(10, 4, 5)
alpha = torch.tensor(5)
beta = torch.tensor(3.5)
model = MyModule()
self.run_test(model, (x, batch1, batch2, alpha, beta))
def test_numel(self):
class MyModule(torch.nn.Module):
def forward(self, input):
return input.numel() * input
x = torch.randn(2, 3, 5)
x2 = torch.randn(4, 5, 6)
model = MyModule()
self.run_test(
model,
(x,),
input_names=["x"],
dynamic_axes={"x": [0, 1, 2]},
test_with_inputs=[(x2,)],
)
def test_numel_empty(self):
class MyModule(torch.nn.Module):
def forward(self, input):
return input.numel() * input
x = torch.randn(0)
x2 = torch.randn(4)
model = MyModule()
self.run_test(
model,
(x,),
input_names=["x"],
dynamic_axes={"x": [0]},
test_with_inputs=[(x2,)],
)
def test_dtype(self):
class MyModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input, other):
return input.to(dtype=other.dtype) + other
x = torch.randn(2, 3)
y = torch.randn(2, 3)
self.run_test(MyModel(), (x, y))
def test_dtype_eq(self):
class MyModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input, other):
if input.dtype == other.dtype:
return input + other
return input
x = torch.randn(2, 3)
y = torch.randn(2, 3)
self.run_test(MyModel(), (x, y))
def test_cast_to(self):
class MyModule(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input, other):
return input.to(other) + other
x = torch.randn(2, 3, 4)
y = torch.tensor([1], dtype=torch.int64)
model = MyModule()
self.run_test(model, (x, y))
def test_cast_to_bool(self):
class MyModule(torch.nn.Module):
def forward(self, input, other):
return torch.cat((input.to(other), other), 0)
x = torch.randn(2, 3, 4)
y = torch.zeros([2, 3, 4], dtype=torch.bool)
model = MyModule()
self.run_test(model, (x, y))
# ONNX supports bfloat16 for opsets >= 13
@skipIfUnsupportedMinOpsetVersion(13)
def test_cast_type_as_with_bfloat16(self):
class MyModule(torch.nn.Module):
def forward(self, x):
y = torch.ones((3, 4), dtype=torch.bfloat16)
x = x.type_as(y)
return x.to(dtype=torch.float16)
x = torch.ones(3, 4, dtype=torch.float16)
model = MyModule()
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_type_as(self):
class MyModule(torch.nn.Module):
def forward(self, x):
y = torch.tensor([1.0])
return x.type_as(y)
a = torch.tensor([True, False], dtype=torch.bool)
b = torch.randn(3, 4, dtype=torch.double)
c = torch.ones((2, 2), dtype=torch.int64)
model = MyModule()
self.run_test(model, a)
self.run_test(model, b)
self.run_test(model, c)
@skipIfUnsupportedMinOpsetVersion(9)
def test_ones_bool(self):
class MyModule(torch.nn.Module):
def forward(self, input):
true = torch.ones(input.shape, dtype=torch.bool)
return input.to(true) & true
x = torch.randn(2, 3, 4)
model = MyModule()
self.run_test(model, x)
def test_log(self):
class Log(torch.nn.Module):
def forward(self, input):
return torch.log(input)
x = torch.rand(2, 3, 4)
model = Log()
self.run_test(model, x)
def test_log1p(self):
class Log1p(torch.nn.Module):
def forward(self, input):
return torch.log1p(input)
x = torch.rand(2, 3, 4)
model = Log1p()
self.run_test(model, x)
def test_log10(self):
class Log10(torch.nn.Module):
def forward(self, input):
return torch.log10(input)
x = torch.rand(2, 3, 4)
model = Log10()
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_round(self):
class Round(torch.nn.Module):
def forward(self, x):
return torch.round(x)
x = torch.tensor([0.9920, -1.0362, -1.5000, 3.5000], requires_grad=True)
self.run_test(Round(), x)
def test_constant_pad(self):
model = torch.nn.ConstantPad1d(2, 3.5)
x = torch.randn(2, 4, 4)
self.run_test(model, x)
model = torch.nn.ConstantPad2d((3, 0, 2, 1), 3.5)
x = torch.randn(2, 2, 4, 4)
self.run_test(model, x)
# Dynamic padding is added in opset 11
@skipIfUnsupportedMinOpsetVersion(11)
def test_pad_types(self):
# Test for different pad integer types
class Pad(torch.nn.Module):
def forward(self, x, pad: List[int]):
return torch.nn.functional.pad(x, pad)
x = torch.randn(2, 2, 4, 4)
y = pad = [2, 4]
self.run_test(Pad(), (x, y))
y = pad = [
torch.tensor(2, dtype=torch.int64),
torch.tensor(4, dtype=torch.int64),
]
self.run_test(Pad(), (x, y))
@skipIfUnsupportedMaxOpsetVersion(10)
@skipScriptTest() # TODO: the logic in symbolic_opset9 doesn't handle script
def test_unsupported_pad(self):
class Pad(torch.nn.Module):
def forward(self, x, pad: List[int]):
return torch.nn.functional.pad(x, pad)
x = torch.randn(2, 2, 4, 4)
y = [2, 4]
with self.assertRaisesRegex(
RuntimeError,
(
"Unsupported: ONNX export of Pad.*"
+ "The sizes of the padding must be constant"
),
):
self.run_test(Pad(), (x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_if_fold(self):
class IfFoldModel(torch.nn.Module):
def forward(self, y):
if y.dim() == 2:
y = y + 4
y = y + 2
else:
y = y - 1
return y
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(IfFoldModel(), x)
class IfFoldModel(torch.nn.Module):
def forward(self, y):
if y.numel() > 1:
y = y + 4
else:
y = y + 2
return y
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(IfFoldModel(), x)
class IfFoldModel(torch.nn.Module):
def forward(self, y):
if y.dim() != 3:
y = y + 4
y = y + 2
else:
return y
return y
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(IfFoldModel(), x)
class IfFoldModel(torch.nn.Module):
def forward(self, y):
if y.dim() >= 1:
y = y + 4
else:
y = y - 1
return y
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(IfFoldModel(), x)
class IfFoldModel(torch.nn.Module):
def forward(self, y):
if y.dim() <= 1:
y = y + 4
else:
y = y + 2
return y
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(IfFoldModel(), x)
class IfFoldModel(torch.nn.Module):
def forward(self, y):
if y.dim() < 3 and y.dtype == torch.int:
y = y + 4
y = y + 2
else:
return y
return y
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(IfFoldModel(), x)
class IfFoldModel(torch.nn.Module):
def forward(self, y):
if y.dim() == 3 and y.dtype == torch.int:
y = y + 4
y = y + 2
else:
y = y + 1
return y
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(IfFoldModel(), x)
class IfFoldModel(torch.nn.Module):
def forward(self, y):
if y.numel() != 0 and y.dim() == 2:
y = y + 4
y = y + 2
else:
return y
return y
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(IfFoldModel(), x)
class IfFoldModel(torch.nn.Module):
def forward(self, x, y):
if x.numel() == y.numel():
y = x + y
else:
y = y - x
return y
x = torch.ones((3, 4), dtype=torch.int)
y = torch.ones((3, 4), dtype=torch.int)
self.run_test(IfFoldModel(), (x, y))
class IfFoldModel(torch.nn.Module):
def forward(self, x, y):
if x.numel() != y.numel():
y = x + y
else:
y = y - x
return y
x = torch.ones((3, 4), dtype=torch.int)
y = torch.ones((3, 4), dtype=torch.int)
self.run_test(IfFoldModel(), (x, y))
@skipIfUnsupportedMinOpsetVersion(11)
def test_uninitialized(self):
class UninitializedModel(torch.nn.Module):
def forward(self, y):
if y.shape[1] < 5:
if y.size(0) == 1:
y = y + 4
else:
return y
return y
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(UninitializedModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_uninitialized_dynamic(self):
class UninitializedModel(torch.nn.Module):
def forward(self, y):
if y.shape[1] < 5:
if y.size(0) == 1:
y = y + 4
else:
return y
return y
x = torch.ones((3, 4), dtype=torch.int)
y = torch.ones((6, 7), dtype=torch.int)
self.run_test(
UninitializedModel(),
x,
test_with_inputs=[y],
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1]},
)
# onnx::Identity of sequence supported for ONNX opset >= 14
@skipIfUnsupportedMinOpsetVersion(14)
def test_uninitialized_tensorList(self):
class UninitializedTensorListModel(torch.nn.Module):
def forward(self, x):
if x[0].shape[0] < 5:
if x.size(0) == 1:
x = x + 4
else:
return [x]
return [x]
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(torch.jit.script(UninitializedTensorListModel()), x)
# onnx::Identity of sequence supported for ONNX opset >= 14
@skipIfUnsupportedMinOpsetVersion(14)
def test_uninitialized_tensorList_dynamic(self):
class UninitializedTensorListModel(torch.nn.Module):
def forward(self, x):
if x[0].shape[0] < 5:
if x.size(0) == 1:
x += x
else:
return list(x)
return list(x)
x = torch.ones((3, 4), dtype=torch.double)
self.run_test(
torch.jit.script(UninitializedTensorListModel()),
x,
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1]},
)
# onnx::Identity of sequence supported for ONNX opset >= 14
@skipIfUnsupportedMinOpsetVersion(14)
def test_uninitialized_intList(self):
class UninitializedListModel(torch.nn.Module):
def forward(self, x):
y = list(range(x.size(0)))
if y[0] < 5:
# if x.size(0) != 3, ORT will throw type error.
if x.size(0) == 3:
y.append(10)
else:
return y
return y
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(
torch.jit.script(UninitializedListModel()),
x,
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1]},
)
# onnx::Identity of sequence supported for ONNX opset >= 14
@skipIfUnsupportedMinOpsetVersion(14)
def test_uninitialized_tensorList_shape(self):
class UninitializedModel(torch.nn.Module):
def forward(self, x):
if x.shape[1] < 5:
if x.size(0) == 1:
x = x + 4
else:
x_list = list(x)
x_list.append(x)
return x_list
return [x, x]
x = torch.ones((3, 4), dtype=torch.int)
y = torch.ones((4, 6), dtype=torch.int)
self.run_test(
torch.jit.script(UninitializedModel()),
x,
test_with_inputs=[y],
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1]},
)
# Sequence type as loop-carried dependencies only supported for ONNX opset >= 13
@skipIfUnsupportedMinOpsetVersion(13)
def test_sequance_loopcarried(self):
class SequanceLoopModel(torch.nn.Module):
def forward(self, x):
outputs = []
for i in range(3):
outputs += [x]
return torch.stack(outputs).transpose(0, 1)
x = torch.ones((3, 4), dtype=torch.int)
self.run_test(torch.jit.script(SequanceLoopModel()), x)
def test_reflection_pad(self):
model = torch.nn.ReflectionPad1d(2)
x = torch.randn(2, 4, 4)
self.run_test(model, x)
model = torch.nn.ReflectionPad2d((3, 0, 2, 1))
x = torch.randn(2, 2, 4, 4)
self.run_test(model, x)
def test_replication_pad(self):
model = torch.nn.ReplicationPad1d(2)
x = torch.randn(2, 4, 4)
self.run_test(model, x)
model = torch.nn.ReplicationPad2d((3, 0, 2, 1))
x = torch.randn(2, 2, 4, 4)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_im2col(self):
class Unfold(torch.nn.Module):
def forward(self, input):
return (
torch.nn.functional.unfold(
input, kernel_size=(10, 15), dilation=2, padding=5, stride=3
),
torch.nn.functional.unfold(
input, kernel_size=(2, 2), dilation=1, padding=0, stride=3
),
torch.nn.functional.unfold(
input, kernel_size=(1, 1), dilation=5, padding=2, stride=3
),
)
x = torch.rand(1, 1, 200, 100)
self.run_test(Unfold(), x)
@skipIfNoLapack
@skipIfUnsupportedMinOpsetVersion(11)
def test_det(self):
class Det(torch.nn.Module):
def forward(self, x):
return torch.linalg.det(x)
x = torch.randn(2, 3, 5, 5)
self.run_test(Det(), x)
def test_linalg_norm(self):
class LinalgSingleDimModel(torch.nn.Module):
def __init__(self, ord_val):
super(LinalgSingleDimModel, self).__init__()
self.ord = ord_val
def forward(self, x):
return torch.linalg.norm(x, ord=self.ord, dim=1)
x = torch.randn(2, 3, 5, 5)
self.run_test(LinalgSingleDimModel(None), x)
self.run_test(LinalgSingleDimModel(2), x)
self.run_test(LinalgSingleDimModel(float("inf")), x)
self.run_test(LinalgSingleDimModel(-float("inf")), x)
self.run_test(LinalgSingleDimModel(-4), x)
self.run_test(LinalgSingleDimModel(1.5), x)
class LinalgMultiDimModel(torch.nn.Module):
def __init__(self, ord_val):
super(LinalgMultiDimModel, self).__init__()
self.ord = ord_val
def forward(self, x):
return torch.linalg.norm(x, ord=self.ord, dim=(0, 2))
x = torch.randn(2, 3, 5, 5)
self.run_test(LinalgMultiDimModel("fro"), x)
self.run_test(LinalgMultiDimModel(float("inf")), x)
self.run_test(LinalgMultiDimModel(-float("inf")), x)
self.run_test(LinalgMultiDimModel(1), x)
self.run_test(LinalgMultiDimModel(-1), x)
class LinalgNoDimNoOrdModel(torch.nn.Module):
def forward(self, x):
return torch.linalg.norm(x)
x = torch.randn(2, 3, 5, 5)
self.run_test(LinalgNoDimNoOrdModel(), x)
y = torch.randn(2, 3)
self.run_test(LinalgNoDimNoOrdModel(), y)
z = torch.randn(2)
self.run_test(LinalgNoDimNoOrdModel(), z)
class LinalgNoDim1DModel(torch.nn.Module):
def __init__(self, ord_val):
super(LinalgNoDim1DModel, self).__init__()
self.ord = ord_val
def forward(self, x):
return torch.linalg.norm(x, ord=self.ord)
x = torch.randn(2)
self.run_test(LinalgNoDim1DModel(None), x)
self.run_test(LinalgNoDim1DModel(2), x)
self.run_test(LinalgNoDim1DModel(float("inf")), x)
self.run_test(LinalgNoDim1DModel(-float("inf")), x)
self.run_test(LinalgNoDim1DModel(-4), x)
self.run_test(LinalgNoDim1DModel(1.5), x)
class LinalgNoDim2DModel(torch.nn.Module):
def __init__(self, ord_val):
super(LinalgNoDim2DModel, self).__init__()
self.ord = ord_val
def forward(self, x):
return torch.linalg.norm(x, ord=self.ord)
x = torch.randn(2, 3)
self.run_test(LinalgNoDim2DModel("fro"), x)
self.run_test(LinalgNoDim2DModel(float("inf")), x)
self.run_test(LinalgNoDim2DModel(-float("inf")), x)
self.run_test(LinalgNoDim2DModel(1), x)
self.run_test(LinalgNoDim2DModel(-1), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_linalg_vector_norm_zero(self):
class LinalgVectorNormModel(torch.nn.Module):
def __init__(self, ord_val):
super(LinalgVectorNormModel, self).__init__()
self.ord = ord_val
def forward(self, x):
return torch.linalg.vector_norm(x, ord=self.ord)
x = torch.randn(2, 3, 5, 5)
self.run_test(LinalgVectorNormModel(0), x)
def test_linalg_vector_norm(self):
class LinalgVectorNormModel(torch.nn.Module):
def __init__(self, ord_val, dim_info):
super(LinalgVectorNormModel, self).__init__()
self.ord = ord_val
self.dim, self.keepdim = dim_info
def forward(self, x):
return torch.linalg.vector_norm(
x, ord=self.ord, dim=self.dim, keepdim=self.keepdim
)
x = torch.randn(2, 3, 5, 5)
ord_options = [2, float("inf"), -float("inf"), -4, 1.5]
dim_options = [(None, False), (1, False), ((1, 2), False), ((1, 2), True)]
for ord_val in ord_options:
for dim_info in dim_options:
self.run_test(LinalgVectorNormModel(ord_val, dim_info), x)
def test_linalg_matrix_norm(self):
class LinalgMatrixNormModel(torch.nn.Module):
def __init__(self, ord_val, dim_val=(-2, -1), keepdim_val=False):
super(LinalgMatrixNormModel, self).__init__()
self.ord = ord_val
self.dim = dim_val
self.keepdim = keepdim_val
def forward(self, x):
return torch.linalg.matrix_norm(
x, ord=self.ord, dim=self.dim, keepdim=self.keepdim
)
x = torch.randn(2, 3, 5, 5)
ord_options = ["fro", float("inf"), -float("inf"), 1, -1]
for ord_val in ord_options:
self.run_test(LinalgMatrixNormModel(ord_val), x)
self.run_test(LinalgMatrixNormModel(ord_val, (0, 2)), x)
self.run_test(LinalgMatrixNormModel(ord_val, (0, 2), True), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_linalg_cross(self):
class Cross(torch.nn.Module):
def forward(self, x, y):
return torch.linalg.cross(x, y, dim=1), torch.linalg.cross(x, y)
x = torch.randn(5, 3, 2, 3)
y = torch.randn(1, 3, 1, 3)
self.run_test(Cross(), input=(x, y))
# This test checks output scalar type in the ONNX graph should not be null
# https://github.com/pytorch/pytorch/issues/28607
@skipIfUnsupportedMinOpsetVersion(10)
def test_trace_script(self):
@torch.jit.script
def center_slice_helper(input, h_offset):
return input[:, h_offset:]
class CenterCrop(torch.nn.Module):
def forward(self, input):
return center_slice_helper(input, torch.tensor(input.shape[1] - 1))
x = torch.randn(3, 4)
self.run_test(CenterCrop(), x)
@skipIfNoLapack
@skipIfUnsupportedMinOpsetVersion(11)
def test_logdet(self):
class LogDet(torch.nn.Module):
def forward(self, x):
return torch.logdet(x)
x = torch.randn(2, 3, 5, 5)
self.run_test(LogDet(), x)
def test_dim(self):
class DimModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
out = input * 2
out *= out.dim()
return out
empty_input = torch.randn(0, requires_grad=True)
multi_dim_input = torch.randn(1, 2, 3, requires_grad=True)
self.run_test(DimModel(), empty_input)
self.run_test(DimModel(), multi_dim_input)
@skipIfUnsupportedMinOpsetVersion(11)
def test_dim_1(self):
class M(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, poses):
boxes = torch.zeros([poses.shape[0], 2, 4])
batch_boxes = []
for kp_boxes in boxes:
kp_boxes = torchvision.ops.clip_boxes_to_image(kp_boxes, (2, 3))
batch_boxes.append(kp_boxes)
return batch_boxes
dummy_inputs = torch.rand(2, 2, 3)
self.run_test(M(), (dummy_inputs,), input_names=["x"], dynamic_axes={"x": [0]})
@skipIfUnsupportedMinOpsetVersion(12)
def test_outer(self):
class Outer(torch.nn.Module):
def forward(self, x, y):
return torch.outer(x, y)
x = torch.arange(1, 5)
y = torch.arange(1, 4)
self.run_test(Outer(), input=(x, y))
x = torch.arange(1, 6).to(dtype=torch.float32)
y = torch.arange(1, 4).to(dtype=torch.long)
self.run_test(Outer(), input=(x, y))
x = torch.arange(2, 5).to(dtype=torch.float32)
y = torch.arange(2, 4).to(dtype=torch.float64)
self.run_test(Outer(), input=(x, y))
x = torch.arange(3, 6).to(dtype=torch.int32)
y = torch.arange(4, 7).to(dtype=torch.long)
self.run_test(Outer(), input=(x, y))
@skipIfUnsupportedMinOpsetVersion(12)
def test_einsum(self):
class EinsumModelBatchDiagonal(torch.nn.Module):
def forward(self, x):
eqn = "...ii ->...i"
return torch.einsum(eqn, x)
for x in [torch.randn(3, 5, 5), torch.randn(3, 5, 5).to(dtype=torch.bool)]:
self.run_test(EinsumModelBatchDiagonal(), input=(x,))
class EinsumModelBatchMatmul(torch.nn.Module):
def forward(self, x, y):
eqn = "bij, bjk -> bik"
return torch.einsum(eqn, x, y)
x = torch.randn(5, 2, 3)
y = torch.randn(5, 3, 4)
self.run_test(EinsumModelBatchMatmul(), input=(x, y))
class EinsumModelInnerProd(torch.nn.Module):
def forward(self, x, y):
eqn = "i,i"
return torch.einsum(eqn, x, y)
x = torch.randn(5)
y = torch.randn(5)
self.run_test(EinsumModelInnerProd(), input=(x, y))
class EinsumModelTranspose(torch.nn.Module):
def forward(self, x):
eqn = "ij->ji"
return torch.einsum(eqn, x)
for x in [torch.randn(3, 4), torch.randn(3, 4).to(dtype=torch.bool)]:
self.run_test(EinsumModelTranspose(), input=(x,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_cosine_similarity(self):
x = torch.randn(5, 3, 2)
y = torch.randn(5, 3, 2)
self.run_test(torch.nn.CosineSimilarity(dim=2), input=(x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_pairwise_distance(self):
x = torch.randn(5, 3, 2)
y = torch.randn(5, 3, 2)
self.run_test(torch.nn.PairwiseDistance(p=2.0), input=(x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_cross(self):
class Cross(torch.nn.Module):
def forward(self, x, y):
return torch.cross(x, y, dim=3), torch.cross(x, y)
x = torch.randn(5, 3, 2, 3)
y = torch.randn(5, 3, 2, 3)
self.run_test(Cross(), input=(x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_cdist(self):
class Cdist(torch.nn.Module):
def forward(self, x, y):
return torch.cdist(x, y)
x = torch.randn(5, 3, 3)
y = torch.randn(5, 2, 3)
self.run_test(Cdist(), input=(x, y))
@skipIfUnsupportedMinOpsetVersion(12)
def test_crossentropyloss(self):
for ignore_index in [-100, 1]:
x = torch.randn(3, 5)
y = torch.empty(3, dtype=torch.long).random_(5)
y[y == 1] = ignore_index
self._crossentropyloss(x, y, ignore_index)
x = torch.randn(3, 5, 2)
y = torch.empty(3, 2, dtype=torch.long).random_(5)
y[y == 1] = ignore_index
self._crossentropyloss(x, y, ignore_index)
x = torch.randn(3, 5, 2, 7)
y = torch.empty(3, 2, 7, dtype=torch.long).random_(5)
y[y == 1] = ignore_index
self._crossentropyloss(x, y, ignore_index)
def _crossentropyloss(self, x, y, ignore_index):
class CrossEntropyLossNone(torch.nn.Module):
def __init__(self, ignore_index):
super(CrossEntropyLossNone, self).__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss(reduction="none")
else:
self.loss = torch.nn.CrossEntropyLoss(
reduction="none", ignore_index=ignore_index
)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossNone(ignore_index), input=(x, y))
class CrossEntropyLossNoneWeight(torch.nn.Module):
def __init__(self, ignore_index):
super(CrossEntropyLossNoneWeight, self).__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss(
reduction="none", weight=torch.randn(5)
)
else:
self.loss = torch.nn.CrossEntropyLoss(
reduction="none",
weight=torch.randn(5),
ignore_index=ignore_index,
)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossNoneWeight(ignore_index), input=(x, y))
class CrossEntropyLossSum(torch.nn.Module):
def __init__(self, ignore_index):
super(CrossEntropyLossSum, self).__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss(reduction="sum")
else:
self.loss = torch.nn.CrossEntropyLoss(
reduction="sum", ignore_index=ignore_index
)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossSum(ignore_index), input=(x, y))
class CrossEntropyLossSumWeight(torch.nn.Module):
def __init__(self, ignore_index):
super(CrossEntropyLossSumWeight, self).__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss(
reduction="sum", weight=torch.randn(5)
)
else:
self.loss = torch.nn.CrossEntropyLoss(
reduction="sum",
weight=torch.randn(5),
ignore_index=ignore_index,
)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossSumWeight(ignore_index), input=(x, y))
class CrossEntropyLossMean(torch.nn.Module):
def __init__(self, ignore_index):
super(CrossEntropyLossMean, self).__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss()
else:
self.loss = torch.nn.CrossEntropyLoss(ignore_index=ignore_index)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossMean(ignore_index), input=(x, y))
class CrossEntropyLossMeanWeight(torch.nn.Module):
def __init__(self, ignore_index):
super(CrossEntropyLossMeanWeight, self).__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss(weight=torch.randn(5))
else:
self.loss = torch.nn.CrossEntropyLoss(
weight=torch.randn(5), ignore_index=ignore_index
)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossMeanWeight(ignore_index), input=(x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_kldiv_loss(self):
x = torch.randn(5)
y = torch.randn(5)
self._kldiv_loss(x, y)
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
self._kldiv_loss(x, y)
x = torch.randn(2, 3, 5, 7)
y = torch.randn(2, 3, 5, 7)
self._kldiv_loss(x, y)
def _kldiv_loss(self, x, y):
class KLDivLossNone(torch.nn.Module):
def __init__(self):
super(KLDivLossNone, self).__init__()
self.loss = torch.nn.KLDivLoss(reduction="none", log_target=True)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(KLDivLossNone(), input=(x, y))
class KLDivLossMean(torch.nn.Module):
def __init__(self):
super(KLDivLossMean, self).__init__()
self.loss = torch.nn.KLDivLoss(reduction="mean", log_target=False)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(KLDivLossMean(), input=(x, y))
class KLDivLossSum(torch.nn.Module):
def __init__(self):
super(KLDivLossSum, self).__init__()
self.loss = torch.nn.KLDivLoss(reduction="sum", log_target=True)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(KLDivLossSum(), input=(x, y))
class KLDivLossBatchMean(torch.nn.Module):
def __init__(self):
super(KLDivLossBatchMean, self).__init__()
self.loss = torch.nn.KLDivLoss(reduction="batchmean", log_target=False)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(KLDivLossBatchMean(), input=(x, y))
class KLDivLossMiniBatchMean(torch.nn.Module):
def __init__(self):
super(KLDivLossMiniBatchMean, self).__init__()
self.loss = torch.nn.KLDivLoss(
reduction="batchmean", size_average=False, log_target=True
)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(KLDivLossMiniBatchMean(), input=(x, y))
@skipIfUnsupportedMinOpsetVersion(12)
def test_nllloss(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super(NLLModel, self).__init__()
self.loss = torch.nn.NLLLoss(reduction="none")
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(2 * input), target)
return output
N, C = 5, 4
input = torch.randn(N, 16)
target = torch.empty(N, dtype=torch.long).random_(0, C)
# using test data containing default ignore_index=-100
target[target == 1] = -100
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
def test_nllloss_2d_none(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super(NLLModel, self).__init__()
self.loss = torch.nn.NLLLoss(reduction="none")
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
# using test data containing default ignore_index=-100
target[target == 1] = -100
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
def test_nllloss_2d_mean(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super(NLLModel, self).__init__()
self.loss = torch.nn.NLLLoss(reduction="mean")
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
# using test data containing default ignore_index=-100
target[target == 1] = -100
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
def test_nllloss_2d_sum(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super(NLLModel, self).__init__()
self.loss = torch.nn.NLLLoss(reduction="sum")
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
# using test data containing default ignore_index=-100
target[target == 1] = -100
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
def test_nllloss_2d_mean_weights(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super(NLLModel, self).__init__()
self.loss = torch.nn.NLLLoss(reduction="mean", weight=torch.randn(C))
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
# using test data containing default ignore_index=-100
target[target == 1] = -100
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
def test_nllloss_2d_mean_ignore_index(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super(NLLModel, self).__init__()
self.loss = torch.nn.NLLLoss(reduction="mean", ignore_index=1)
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
def test_nllloss_dynamic_ignore_index(self):
import torch.nn.functional as F
def linear_combination(x, y, epsilon):
return epsilon * x + (1 - epsilon) * y
def reduce_loss(loss, reduction="mean"):
return (
loss.mean()
if reduction == "mean"
else loss.sum()
if reduction == "sum"
else loss
)
class LabelSmoothingCrossEntropy(torch.nn.Module):
def __init__(self, epsilon: float = 0.1, reduction="mean"):
super().__init__()
self.epsilon = epsilon
self.reduction = reduction
def forward(self, preds, target, start_position):
n = preds.size()[-1]
log_preds = F.log_softmax(preds, dim=-1)
ignore_index = start_position.size(1)
nll = F.nll_loss(
log_preds,
target,
reduction=self.reduction,
ignore_index=ignore_index,
)
return nll + start_position.float()
N = 5
preds = torch.randn(N, 16)
target = torch.randint(5, (N,))
start_position = torch.randint(10, (N, N))
self.run_test(LabelSmoothingCrossEntropy(), (preds, target, start_position))
@skipIfUnsupportedMinOpsetVersion(12)
def test_nllloss_2d_mean_ignore_index_weights(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super(NLLModel, self).__init__()
self.loss = torch.nn.NLLLoss(
reduction="mean", weight=torch.randn(C), ignore_index=1
)
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
def test_binary_cross_entropy_with_logits(self):
x = torch.randn(5)
y = torch.empty(5).random_(2)
self._bce_logits(x, y)
x = torch.randn(3, 4)
y = torch.empty(3, 4).random_(2)
weight = torch.tensor([3])
self._bce_logits_wegiht(x, y, weight)
x = torch.randn(3, 2, 4)
y = torch.empty(3, 2, 4).random_(2)
pos_weight = torch.empty([2, 4]).random_(2)
self._bce_logits_posweight(x, y, pos_weight)
x = torch.randn(3, 3, 4)
y = torch.empty(3, 3, 4).random_(2)
weight = torch.tensor([3])
pos_weight = torch.empty([3, 4]).random_(2)
self._bce_logits_loss_weight_posweight(x, y, weight, pos_weight)
def _bce_logits(self, x, y):
class BCEWithLogitsLossNone(torch.nn.Module):
def forward(self, input, target):
return torch.nn.functional.binary_cross_entropy_with_logits(
input, target, reduction="none"
)
self.run_test(BCEWithLogitsLossNone(), input=(x, y))
class BCEWithLogitsLossMean(torch.nn.Module):
def forward(self, input, target):
return torch.nn.functional.binary_cross_entropy_with_logits(
input, target, reduction="mean"
)
self.run_test(BCEWithLogitsLossMean(), input=(x, y))
class BCEWithLogitsLossSum(torch.nn.Module):
def forward(self, input, target):
return torch.nn.functional.binary_cross_entropy_with_logits(
input, target, reduction="sum"
)
self.run_test(BCEWithLogitsLossSum(), input=(x, y))
def _bce_logits_wegiht(self, x, y, weight):
class BCEWithLogitsLossWegihtNone(torch.nn.Module):
def forward(self, input, target, weight):
return torch.nn.functional.binary_cross_entropy_with_logits(
input, target, weight=weight, reduction="none"
)
self.run_test(BCEWithLogitsLossWegihtNone(), input=(x, y, weight))
class BCEWithLogitsLossWegihtMean(torch.nn.Module):
def forward(self, input, target, weight):
return torch.nn.functional.binary_cross_entropy_with_logits(
input, target, weight=weight, reduction="mean"
)
self.run_test(BCEWithLogitsLossWegihtMean(), input=(x, y, weight))
class BCEWithLogitsLossWegihtSum(torch.nn.Module):
def forward(self, input, target, weight):
return torch.nn.functional.binary_cross_entropy_with_logits(
input, target, weight=weight, reduction="sum"
)
self.run_test(BCEWithLogitsLossWegihtSum(), input=(x, y, weight))
def _bce_logits_posweight(self, x, y, pos_weight):
class BCEWithLogitsLossPosWegihtNone(torch.nn.Module):
def forward(self, input, target, pos_weight):
return torch.nn.functional.binary_cross_entropy_with_logits(
input, target, pos_weight=pos_weight, reduction="none"
)
self.run_test(BCEWithLogitsLossPosWegihtNone(), input=(x, y, pos_weight))
class BCEWithLogitsLossPosWegihtMean(torch.nn.Module):
def forward(self, input, target, pos_weight):
return torch.nn.functional.binary_cross_entropy_with_logits(
input, target, pos_weight=pos_weight, reduction="mean"
)
self.run_test(BCEWithLogitsLossPosWegihtMean(), input=(x, y, pos_weight))
class BCEWithLogitsLossPosWegihtSum(torch.nn.Module):
def forward(self, input, target, pos_weight):
return torch.nn.functional.binary_cross_entropy_with_logits(
input, target, pos_weight=pos_weight, reduction="sum"
)
self.run_test(BCEWithLogitsLossPosWegihtSum(), input=(x, y, pos_weight))
def _bce_logits_loss_weight_posweight(self, x, y, weight, pos_weight):
class BCEWithLogitsLossWeightPosweightNone(torch.nn.Module):
def forward(self, input, target, weight, pos_weight):
return torch.nn.functional.binary_cross_entropy_with_logits(
input,
target,
weight=weight,
pos_weight=pos_weight,
reduction="none",
)
self.run_test(
BCEWithLogitsLossWeightPosweightNone(), input=(x, y, weight, pos_weight)
)
class BCEWithLogitsLossWeightPosweightMean(torch.nn.Module):
def forward(self, input, target, weight, pos_weight):
return torch.nn.functional.binary_cross_entropy_with_logits(
input,
target,
weight=weight,
pos_weight=pos_weight,
reduction="mean",
)
self.run_test(
BCEWithLogitsLossWeightPosweightMean(), input=(x, y, weight, pos_weight)
)
class BCEWithLogitsLossWeightPosweightSum(torch.nn.Module):
def forward(self, input, target, weight, pos_weight):
return torch.nn.functional.binary_cross_entropy_with_logits(
input, target, weight=weight, pos_weight=pos_weight, reduction="sum"
)
self.run_test(
BCEWithLogitsLossWeightPosweightSum(), input=(x, y, weight, pos_weight)
)
def test_torch_mm(self):
class M(torch.nn.Module):
def forward(self, mat1, mat2):
mm = torch.mm(mat1, mat2)
return mm
mat1 = torch.randn(2, 3)
mat2 = torch.randn(3, 3)
self.run_test(M(), input=(mat1, mat2))
@skipIfUnsupportedMinOpsetVersion(
9
) # Because where op is not supported for opset < 9.
def test_where_with_bool_tensor(self):
class M(torch.nn.Module):
def forward(self, mat1, mat2):
out = torch.where(mat1 > 0, mat1, mat2)
return out
mat1 = torch.randn(2, 3)
mat2 = torch.ones(2, 3)
self.run_test(M(), input=(mat1, mat2))
@skipIfUnsupportedMinOpsetVersion(
9
) # Because where op is not supported for opset < 9.
def test_where_with_byte_tensor(self):
class M(torch.nn.Module):
def forward(self, cond, mat1, mat2):
out = torch.where(cond, mat1, mat2)
return out
cond = torch.ones(2, 3, dtype=torch.uint8)
cond[1, 2] = 0
mat1 = torch.randn(2, 3)
mat2 = torch.ones(2, 3)
self.run_test(M(), input=(cond, mat1, mat2))
@skipIfUnsupportedMinOpsetVersion(10) # ONNX IsInf op is added in opset 10.
def test_isinf(self):
class M(torch.nn.Module):
def forward(self, x):
return x.isinf()
x = torch.tensor([[1, 2, float("inf")], [2, float("nan"), float("inf")]])
self.run_test(M(), (x,))
@skipIfUnsupportedMinOpsetVersion(10)
def test_isfinite(self):
class M(torch.nn.Module):
def forward(self, x):
return x.isfinite()
x = torch.tensor([[1, 2, float("inf")], [2, float("nan"), -float("inf")]])
self.run_test(M(), (x,))
@skipIfUnsupportedMinOpsetVersion(9) # ONNX IsNaN op is added in opset 9.
def test_isnan(self):
class M(torch.nn.Module):
def forward(self, x):
return x.isnan()
x = torch.tensor([[1, 2, float("inf")], [2, float("nan"), float("inf")]])
self.run_test(M(), (x,))
@skipIfUnsupportedMinOpsetVersion(
10
) # ONNX IsNaN, IsInf op is added in opset 9, 10 respectively.
def test_nan_to_num(self):
class NoParams(torch.nn.Module):
def forward(self, x):
return x.nan_to_num()
x = torch.tensor([[1, 2, float("inf")], [2, float("nan"), -float("inf")]])
xint = torch.ones((2, 4), dtype=torch.int)
xhalf = torch.ones((2, 4), dtype=torch.half)
self.run_test(NoParams(), (x,))
self.run_test(NoParams(), (xint,))
self.run_test(NoParams(), (xhalf,))
class WithParams(torch.nn.Module):
def forward(self, x):
return x.nan_to_num(nan=2.3, posinf=4.5, neginf=6.7)
x = torch.tensor([[1, 2, float("inf")], [2, float("nan"), -float("inf")]])
self.run_test(WithParams(), (x,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_maximum_minimum(self):
class ModelWithNan(torch.nn.Module):
def forward(self, x, y):
return torch.maximum(x, y), torch.minimum(x, y)
x = torch.tensor([-2, -2, float("nan")])
y = torch.rand(1, 3)
self.run_test(ModelWithNan(), (x, y))
@skipIfUnsupportedMinOpsetVersion(12)
def test_minimum_dtypes(self):
class MinimumModel(torch.nn.Module):
def forward(self, x, y):
return torch.minimum(x, y)
x = torch.randn((5, 5), dtype=torch.float16)
y = torch.randn((5, 5), dtype=torch.float)
self.run_test(MinimumModel(), (x, y))
x = torch.randn((5, 5), dtype=torch.float16)
y = torch.randint(10, (5, 5), dtype=torch.int16)
self.run_test(MinimumModel(), (x, y))
x = torch.randint(10, (5, 5), dtype=torch.int16)
y = torch.randint(10, (5, 5), dtype=torch.int32)
self.run_test(MinimumModel(), (x, y))
x = torch.randint(10, (5, 5), dtype=torch.int)
y = torch.full_like(x, True)
self.run_test(MinimumModel(), (x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_any(self):
class M(torch.nn.Module):
def forward(self, x):
return x.any()
x = torch.tensor([[True, False], [False, False]])
self.run_test(M(), (x,))
class MDim(torch.nn.Module):
def forward(self, x):
return x.any(dim=1)
x = torch.rand(3, 4).bool()
self.run_test(MDim(), (x,))
class MKeepdim(torch.nn.Module):
def forward(self, x):
return x.any(dim=1, keepdim=True)
x = torch.rand(3, 4).bool()
self.run_test(MKeepdim(), (x,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_all(self):
class M(torch.nn.Module):
def forward(self, x):
return x.all()
x = torch.tensor([[True, False], [False, False]])
self.run_test(M(), (x,))
class MDim(torch.nn.Module):
def forward(self, x):
return x.all(dim=1)
x = torch.rand(3, 4).bool()
self.run_test(MDim(), (x,))
class MKeepdim(torch.nn.Module):
def forward(self, x):
return x.all(dim=1, keepdim=True)
x = torch.rand(3, 4).bool()
self.run_test(MKeepdim(), (x,))
def test_dropout(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.dropout = torch.nn.Dropout(0.3)
def forward(self, x):
dropout = self.dropout(x)
return dropout
x = torch.randn(10, 3, 53)
self.run_test(M(), (x))
def test_shape_constant_fold(self):
class ShapeModule(torch.nn.Module):
def __init__(self):
super(ShapeModule, self).__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
shape = self.weight.shape[0]
return x + shape
x = torch.randn(2, 5)
self.run_test(ShapeModule(), (x,), rtol=1e-3, atol=1e-5)
@skipIfUnsupportedMinOpsetVersion(12)
def test_celu(self):
class Celu(torch.nn.Module):
def __init__(self):
super(Celu, self).__init__()
self.celu = torch.nn.CELU(alpha=1.0)
def forward(self, input):
return self.celu(input)
input = torch.randn(2)
self.run_test(Celu(), (input,))
@skipIfUnsupportedMinOpsetVersion(12)
def test_celu_default(self):
class Celu(torch.nn.Module):
def __init__(self):
super(Celu, self).__init__()
self.celu = torch.nn.CELU()
def forward(self, input):
return self.celu(input)
input = torch.randn(2)
self.run_test(Celu(), (input,))
@skipIfUnsupportedMinOpsetVersion(12)
def test_celu_alpha(self):
class Celu(torch.nn.Module):
def __init__(self):
super(Celu, self).__init__()
self.celu = torch.nn.CELU(alpha=2.0)
def forward(self, input):
return self.celu(input)
input = torch.randn(2)
self.run_test(Celu(), (input,))
@skipIfUnsupportedMinOpsetVersion(12)
def test_celu_cast(self):
class Celu(torch.nn.Module):
def __init__(self):
super(Celu, self).__init__()
self.celu = torch.nn.CELU()
def forward(self, input):
return self.celu(input)
input = torch.randn(2, 5, 7, dtype=torch.float64)
self.run_test(Celu(), (input,))
def test_lower_tuple(self):
class TupleModule(torch.nn.Module):
def forward(self, input1: Tensor, input2: Tensor, input3: Tensor) -> Tensor:
a = (input1, input2)
b = a
c = (input1, input2, input3)
for i in range(5):
d = a[0]
for j in range(2):
e, f = a
a = (d, f)
f = c[2]
if f.size(0) != input1.size(-1):
g = b[1]
b = (g, f)
else:
k = c[1:]
b = (f, k[0])
m, n = b
c = (input1, n, m)
p, q, r = c
return p + q + r
input1 = torch.randn(2)
input2 = torch.randn(2)
input3 = torch.randn(2)
self.run_test(TupleModule(), (input1, input2, input3))
def test_lower_tuple_2(self):
class TupleModule(torch.nn.Module):
def forward(self, input1: Tensor, input2: Tensor) -> Tuple[Tensor, Tensor]:
a = (input1, input2)
for x in range(5):
c, d = a
a = (c, d)
return a
input1 = torch.randn(2)
input2 = torch.randn(2)
self.run_test(TupleModule(), (input1, input2))
def test_lower_tuple_3(self):
class TupleModule(torch.nn.Module):
def forward(
self,
input1: Tuple[Tensor, Tensor],
input2: Tuple[Tensor, Tensor],
) -> Tuple[Tuple[Tensor, Tensor], Tuple[Tensor, Tensor]]:
a = input1
b = input2
for x in range(5):
c, d = a
e, f = b
if c.shape[0] == e.shape[0]:
e = e + c
else:
f = f + d
a = (e, f)
b = (c, d)
return a, b
input1 = (torch.randn(2), torch.randn(2))
input2 = (torch.randn(2), torch.randn(2))
self.run_test(TupleModule(), (input1, input2))
@skipIfUnsupportedMinOpsetVersion(9)
def test_where(self):
class Model(torch.nn.Module):
def forward(self, cond, input, other):
return torch.where(cond, input, other)
x = torch.randint(0, 1, (2, 3, 4), dtype=torch.bool)
y = torch.randn(2, 1, 4)
z = torch.ones(2, 3, 1)
self.run_test(Model(), (x, y, z))
@skipIfUnsupportedMinOpsetVersion(9)
@skipScriptTest() # scripting tests run for opsets > 11. See: test_where_condition_script
def test_where_condition(self):
class Model1(torch.nn.Module):
def forward(self, input):
return torch.stack(torch.where(input > 0.5), dim=1)
x = torch.randint(0, 2, (2, 3, 4), dtype=bool)
self.run_test(Model1(), (x))
class Model2(torch.nn.Module):
def forward(self, input, other):
return torch.stack(torch.where(input > other), dim=1)
x = torch.randint(0, 1, (2, 3, 4), dtype=bool)
y = torch.randint(1, 2, (2, 3, 4), dtype=bool)
self.run_test(Model2(), (x, y))
@skipIfUnsupportedOpsetVersion([13])
@skipIfUnsupportedMinOpsetVersion(11)
def test_where_condition_script(self):
class Model1(torch.nn.Module):
def forward(self, input):
return torch.stack(torch.where(input > 0.5), dim=1)
x = torch.randint(0, 2, (2, 3, 4), dtype=bool)
self.run_test(Model1(), (x))
class Model2(torch.nn.Module):
def forward(self, input, other):
return torch.stack(torch.where(input > other), dim=1)
x = torch.randint(0, 1, (2, 3, 4), dtype=bool)
y = torch.randint(1, 2, (2, 3, 4), dtype=bool)
self.run_test(Model2(), (x, y))
def test_empty_branch(self):
class EmptyBranchModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
out = input + 1
if out.dim() > 2:
if out.dim() > 3:
out += 3
else:
pass
else:
pass
return out
x = torch.randn(1, 2, 3, requires_grad=True)
self.run_test(EmptyBranchModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_derive_index_scripting(self):
class MyModule(torch.nn.Module):
def forward(self, x: Tensor):
j = []
for idx in range(len(x) - 1, -len(x), -2):
y = x[idx]
j += [x * y]
return j
x = torch.randn(5, 13)
self.run_test(MyModule(), x)
class MyModule(torch.nn.Module):
def forward(self, x: Tensor):
j = []
for idx in range(-len(x), len(x) - 1, 2):
y = x[idx]
j += [x * y]
return j
x = torch.randn(5, 13)
self.run_test(MyModule(), x)
class MyModule(torch.nn.Module):
def forward(self, x: Tensor):
j = []
for idx in range(len(x) - 1, -len(x), -3):
y = x[idx]
j += [x * y]
return j
self.run_test(MyModule(), x)
class MyModule(torch.nn.Module):
def forward(self, x: Tensor):
j = []
for idx in range(-len(x), len(x) - 1, 3):
y = x[idx]
j += [x * y]
return j
self.run_test(MyModule(), x)
@skipScriptTest() # Scripting fails for add lists for opsets < 11. Chek test_derive_index_scripting
def test_derive_index(self):
class MyModule(torch.nn.Module):
def forward(self, x: Tensor):
j = []
for idx in range(len(x) - 1, -len(x), -2):
y = x[idx]
j += [x * y]
return j
x = torch.randn(5, 13)
self.run_test(MyModule(), x)
class MyModule(torch.nn.Module):
def forward(self, x: Tensor):
j = []
for idx in range(-len(x), len(x) - 1, 2):
y = x[idx]
j += [x * y]
return j
x = torch.randn(5, 13)
self.run_test(MyModule(), x)
class MyModule(torch.nn.Module):
def forward(self, x: Tensor):
j = []
for idx in range(len(x) - 1, -len(x), -3):
y = x[idx]
j += [x * y]
return j
self.run_test(MyModule(), x)
class MyModule(torch.nn.Module):
def forward(self, x: Tensor):
j = []
for idx in range(-len(x), len(x) - 1, 3):
y = x[idx]
j += [x * y]
return j
self.run_test(MyModule(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_if_transpose(self):
class IfModel(torch.nn.Module):
def forward(self, x):
x = x.transpose(0, 1)
if x.size(0) == 2:
return x.transpose(0, 1)
else:
return x
x = torch.randn(2, 3)
self.run_test(
torch.jit.script(IfModel()),
x,
output_names=["output_1"],
dynamic_axes={"output_1": [0, 1]},
)
@skipIfUnsupportedMinOpsetVersion(13)
def test_if_list(self):
class IfModel(torch.nn.Module):
def forward(self, x, y, cond):
res = []
if cond:
res = res + [x]
else:
res = res + [y]
return res
x = torch.randn(2, 3)
y = torch.randn(3, 3)
cond = torch.tensor(1, dtype=torch.bool)
self.run_test(torch.jit.script(IfModel()), (x, y, cond))
@skipIfUnsupportedMinOpsetVersion(13)
def test_if_view(self):
class IfModel(torch.nn.Module):
def forward(self, x, y, cond):
bs, seq = y.shape[:2]
if cond:
res = x.view(bs, seq, -1)
else:
res = y
return res.transpose(1, 2)
x = torch.randn(2, 16, 2, 2)
y = torch.randn(2, 16, 8)
cond = torch.tensor(1, dtype=torch.bool)
self.run_test(
torch.jit.script(IfModel()),
(x, y, cond),
output_names=["output_1"],
dynamic_axes={"output_1": [1]},
)
def test_onnx_proto_checker(self):
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
def forward(self, x):
return 2 * x
x = torch.randn(1, 2, 3, requires_grad=True)
f = io.BytesIO()
torch.onnx._export(Model(), x, f)
model = onnx.load(f)
model.ir_version = 0
def check_proto():
torch._C._check_onnx_proto(model.SerializeToString())
self.assertRaises(RuntimeError, check_proto)
@skipScriptTest(min_opset_version=11) # dynamic split support addded in 11
def test_split_tensor_scalar(self):
class SplitModel(torch.nn.Module):
def forward(self, x):
return torch.split(x, x.size(1))
x = torch.randn(1, 2, 3, requires_grad=True)
self.run_test(SplitModel(), x)
def test_split_tensor_multi(self):
class SplitModel(torch.nn.Module):
def forward(self, x):
return torch.split(x, torch.ones(3))
x = torch.randn(1, 2, 3, requires_grad=True)
def run_model():
SplitModel(x)
self.assertRaises(TypeError, run_model)
@skipIfUnsupportedMinOpsetVersion(9)
def test_embedding(self):
class EmbedModel(torch.nn.Module):
def forward(self, input, emb):
return torch.nn.functional.embedding(input, emb, padding_idx=1)
model = EmbedModel()
x = torch.randint(4, (4,))
x[2] = x[0] = 1
embedding_matrix = torch.rand(10, 3)
self.run_test(model, (x, embedding_matrix))
x = torch.randint(4, (4, 3, 2))
x[2] = 1
x[0][1] = 1
self.run_test(model, (x, embedding_matrix))
self.run_test(
model, (x, embedding_matrix), training=torch.onnx.TrainingMode.TRAINING
)
class EmbedModelWithoutPaddingIdx(torch.nn.Module):
def forward(self, input, emb):
return torch.nn.functional.embedding(input, emb)
model = EmbedModelWithoutPaddingIdx()
x = torch.randint(4, (4, 3, 2))
self.run_test(model, (x, embedding_matrix))
@skipIfUnsupportedMinOpsetVersion(9)
def test_embedding_module(self):
class EmbedModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.Embedding(4, 3, padding_idx=1)
self.emb2 = torch.nn.Embedding(4, 3, padding_idx=1)
with torch.no_grad():
self.emb2.weight[1] = torch.ones(3)
def forward(self, input):
return self.emb(input), self.emb2(input)
model = EmbedModel()
x = torch.randint(4, (4,))
x[2] = x[0] = 1
self.run_test(model, (x,))
x = torch.randint(4, (4, 3, 2))
x[2] = 1
x[0][1] = 1
self.run_test(model, (x,))
class EmbedModelWithoutPaddingIdx(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.Embedding(4, 3)
def forward(self, input):
return self.emb(input)
model = EmbedModelWithoutPaddingIdx()
x = torch.randint(4, (4, 3, 2))
self.run_test(model, (x,))
@skipIfUnsupportedMinOpsetVersion(11)
def test_embedding_renorm(self):
n, d = 7, 5
embedding = torch.nn.Embedding(n, d, max_norm=0.2)
idx = torch.tensor([2, 1])
self.run_test(embedding, idx)
embedding = torch.nn.Embedding(n, d, max_norm=0.5, norm_type=1.0)
idx = torch.tensor([4, 3, 4, 2])
self.run_test(embedding, idx)
def _dispatch_rnn_test(self, name, *args, **kwargs):
if name == "elman":
self._elman_rnn_test(*args, **kwargs)
if name == "lstm":
self._lstm_test(*args, **kwargs)
if name == "gru":
self._gru_test(*args, **kwargs)
def _elman_rnn_test(
self,
layers,
nonlinearity,
bidirectional,
initial_state,
packed_sequence,
dropout,
):
class ElmanWithStateModel(torch.nn.Module):
def __init__(self, layers, nonlinearity, bidirect, dropout, batch_first):
super(ElmanWithStateModel, self).__init__()
self.batch_first = batch_first
self.inner_model = torch.nn.RNN(
RNN_INPUT_SIZE,
RNN_HIDDEN_SIZE,
layers,
nonlinearity=nonlinearity,
bidirectional=bidirectional,
dropout=dropout,
batch_first=batch_first,
)
def forward(self, input: PackedSequence, hx=None):
return self.inner_model(input, hx)
class ElmanWithoutStateModel(torch.nn.Module):
def __init__(self, layers, nonlinearity, bidirect, dropout, batch_first):
super(ElmanWithoutStateModel, self).__init__()
self.batch_first = batch_first
self.inner_model = torch.nn.RNN(
RNN_INPUT_SIZE,
RNN_HIDDEN_SIZE,
layers,
nonlinearity=nonlinearity,
bidirectional=bidirectional,
dropout=dropout,
batch_first=batch_first,
)
def forward(self, input: PackedSequence):
return self.inner_model(input)
batch_first = packed_sequence == 2
if initial_state:
model = ElmanWithStateModel(
layers=layers,
bidirect=bidirectional,
nonlinearity=nonlinearity,
dropout=dropout,
batch_first=batch_first,
)
if packed_sequence:
model = RnnModelWithPackedSequenceWithState(model, batch_first)
else:
model = ElmanWithStateModel(
layers=layers,
bidirect=bidirectional,
nonlinearity=nonlinearity,
dropout=dropout,
batch_first=batch_first,
)
if packed_sequence:
model = RnnModelWithPackedSequenceWithoutState(model, batch_first)
def make_input(batch_size):
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)
inputs = [inputs]
directions = 2 if bidirectional else 1
if initial_state:
h0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
inputs.append(h0)
if packed_sequence != 0:
inputs.append(torch.IntTensor(seq_lengths))
if len(inputs) == 1:
input = inputs[0]
else:
input = tuple(inputs)
return input
input = make_input(RNN_BATCH_SIZE)
self.run_test(model, input, batch_size=RNN_BATCH_SIZE)
# test that the model still runs with a different batch size
other_input = make_input(RNN_BATCH_SIZE + 1)
self.run_test(model, other_input, batch_size=RNN_BATCH_SIZE + 1)
def _lstm_test(
self, layers, bidirectional, initial_state, packed_sequence, dropout
):
batch_first = packed_sequence == 2
if packed_sequence:
model = LstmFlatteningResultWithSeqLength(
RNN_INPUT_SIZE,
RNN_HIDDEN_SIZE,
layers,
bidirectional,
dropout,
batch_first,
)
if initial_state:
model = RnnModelWithPackedSequenceWithState(model, batch_first)
else:
model = RnnModelWithPackedSequenceWithoutState(model, batch_first)
else:
model = LstmFlatteningResultWithoutSeqLength(
RNN_INPUT_SIZE,
RNN_HIDDEN_SIZE,
layers,
bidirectional,
dropout,
batch_first,
)
def make_input(batch_size):
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)
inputs = [inputs]
directions = 2 if bidirectional else 1
if initial_state:
h0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
c0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
inputs.append((h0, c0))
if packed_sequence != 0:
inputs.append(torch.IntTensor(seq_lengths))
if len(inputs) == 1:
input = inputs[0]
else:
input = tuple(inputs)
return input
input = make_input(RNN_BATCH_SIZE)
self.run_test(model, input, batch_size=RNN_BATCH_SIZE)
# test that the model still runs with a different batch size
other_input = make_input(RNN_BATCH_SIZE + 1)
self.run_test(model, other_input, batch_size=RNN_BATCH_SIZE + 1)
def _gru_test(self, layers, bidirectional, initial_state, packed_sequence, dropout):
class GRUWithStateModel(torch.nn.Module):
def __init__(self, layers, bidirect, dropout, batch_first):
super(GRUWithStateModel, self).__init__()
self.batch_first = batch_first
self.inner_model = torch.nn.GRU(
RNN_INPUT_SIZE,
RNN_HIDDEN_SIZE,
num_layers=layers,
bidirectional=bidirectional,
dropout=dropout,
batch_first=batch_first,
)
def forward(self, input: PackedSequence, hx):
return self.inner_model(input, hx)
class GRUWithoutStateModel(torch.nn.Module):
def __init__(self, layers, bidirect, dropout, batch_first):
super(GRUWithoutStateModel, self).__init__()
self.batch_first = batch_first
self.inner_model = torch.nn.GRU(
RNN_INPUT_SIZE,
RNN_HIDDEN_SIZE,
num_layers=layers,
bidirectional=bidirectional,
dropout=dropout,
batch_first=batch_first,
)
def forward(self, input: PackedSequence):
return self.inner_model(input)
class GRUNoSeqLengthWithoutStateModel(torch.nn.Module):
def __init__(self, layers, bidirect, dropout, batch_first):
super(GRUNoSeqLengthWithoutStateModel, self).__init__()
self.batch_first = batch_first
self.inner_model = torch.nn.GRU(
RNN_INPUT_SIZE,
RNN_HIDDEN_SIZE,
num_layers=layers,
bidirectional=bidirectional,
dropout=dropout,
batch_first=batch_first,
)
def forward(self, input):
return self.inner_model(input)
class GRUNoSeqLengthWithStateModel(torch.nn.Module):
def __init__(self, layers, bidirect, dropout, batch_first):
super(GRUNoSeqLengthWithStateModel, self).__init__()
self.batch_first = batch_first
self.inner_model = torch.nn.GRU(
RNN_INPUT_SIZE,
RNN_HIDDEN_SIZE,
num_layers=layers,
bidirectional=bidirectional,
dropout=dropout,
batch_first=batch_first,
)
def forward(self, input, hx):
return self.inner_model(input, hx)
batch_first = packed_sequence == 2
if packed_sequence:
if initial_state:
model = GRUWithStateModel(
layers=layers,
bidirect=bidirectional,
dropout=dropout,
batch_first=batch_first,
)
model = RnnModelWithPackedSequenceWithState(model, batch_first)
else:
model = GRUWithoutStateModel(
layers=layers,
bidirect=bidirectional,
dropout=dropout,
batch_first=batch_first,
)
model = RnnModelWithPackedSequenceWithoutState(model, batch_first)
else:
if initial_state:
model = GRUNoSeqLengthWithStateModel(
layers=layers,
bidirect=bidirectional,
dropout=dropout,
batch_first=batch_first,
)
else:
model = GRUNoSeqLengthWithoutStateModel(
layers=layers,
bidirect=bidirectional,
dropout=dropout,
batch_first=batch_first,
)
def make_input(batch_size):
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)
inputs = [inputs]
directions = 2 if bidirectional else 1
if initial_state:
h0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
inputs.append(h0)
if packed_sequence != 0:
inputs.append(torch.IntTensor(seq_lengths))
if len(inputs) == 1:
input = inputs[0]
else:
input = tuple(inputs)
return input
input = make_input(RNN_BATCH_SIZE)
self.run_test(model, input, batch_size=RNN_BATCH_SIZE)
# test that the model still runs with a different batch size
other_input = make_input(RNN_BATCH_SIZE + 1)
self.run_test(model, other_input, batch_size=RNN_BATCH_SIZE + 1)
@skipScriptTest() # TODO: https://msdata.visualstudio.com/Vienna/_workitems/edit/1253950
def test_transformer_encoder(self):
from torch.nn import TransformerEncoder, TransformerEncoderLayer
class MyModule(torch.nn.Module):
def __init__(self, ninp, nhead, nhid, dropout, nlayers):
super(MyModule, self).__init__()
encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
def forward(self, input):
return self.transformer_encoder(input)
x = torch.rand(10, 32, 512)
self.run_test(MyModule(512, 8, 2048, 0.0, 3), (x,), atol=1e-6)
@skipIfUnsupportedMinOpsetVersion(10)
def test_fake_quantize_per_tensor(self):
class FakeQuantizePerTensorModel(torch.nn.Module):
def forward(self, input):
scale = 1.0 / 127
zero_point = 0
quant_min = -128
quant_max = 127
return torch.fake_quantize_per_tensor_affine(
input, scale, zero_point, quant_min, quant_max
)
x = torch.randn(6, 4, 3, 3)
self.run_test(FakeQuantizePerTensorModel(), (x))
@skipIfUnsupportedMinOpsetVersion(13)
def test_fake_quantize_per_tensor_dynamic_scale_zeropoint(self):
class FakeQuantizePerTensorModel(torch.nn.Module):
def forward(self, input, scale, zero_point):
quant_min = -128
quant_max = 127
return torch.fake_quantize_per_tensor_affine(
input, scale, zero_point, quant_min, quant_max
)
x = torch.randn(6, 4, 3, 3)
scale = torch.tensor(1.0 / 127)
zero_point = torch.tensor(0)
self.run_test(FakeQuantizePerTensorModel(), (x, scale, zero_point))
@skipIfUnsupportedMinOpsetVersion(13)
def test_fake_quantize_per_channel(self):
class FakeQuantizePerChannelModel(torch.nn.Module):
def forward(self, input):
amax = torch.ones(4)
scale = amax / 127.0
zero_point = torch.zeros_like(amax, dtype=torch.int)
# Quantize twice to test differnet branches
y = torch.fake_quantize_per_channel_affine(
input, scale, zero_point, 1, 0, 255
)
return torch.fake_quantize_per_channel_affine(
y, scale, zero_point, 1, -128, 127
)
x = torch.randn(6, 4, 3, 3)
self.run_test(FakeQuantizePerChannelModel(), (x))
@skipIfUnsupportedMinOpsetVersion(13)
@skipScriptTest() # RuntimeError: Can't redefine method: forward on class: __torch__.torch.nn.modules.linear.Linear
def test_fake_quantize_activation(self):
from torch import quantization
m = torch.nn.Linear(1, 1)
m.qconfig = quantization.QConfig(
activation=quantization.default_fake_quant,
weight=quantization.default_per_channel_weight_fake_quant,
)
quantization.prepare_qat(m.train(), inplace=True)
m.apply(quantization.enable_observer)
m.apply(quantization.enable_fake_quant)
for module in m.modules():
if isinstance(module, quantization.FakeQuantize):
module.calculate_qparams()
m.apply(quantization.disable_observer)
m.eval()
# Fake quantize activation is a special case, as it restricts quantized range to be (0, 127),
# while standard 8bit quantization range is (-128, 127) or (0, 255).
# Set fixed weight, bias and inputs to test if ONNX handles the overflow correctly.
m.weight = torch.nn.Parameter(torch.tensor([[1.0], [1.0], [1.0]]))
m.bias = torch.nn.Parameter(torch.tensor([0.0]))
x = torch.tensor([[150.0], [127.0], [-5.0]])
self.run_test(m, x)
def test_batchnorm_training(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.bn1 = torch.nn.BatchNorm2d(3, affine=False)
self.cv1 = torch.nn.Conv2d(3, 3, 10)
self.bn2 = torch.nn.BatchNorm2d(3, affine=True)
self.cv2 = torch.nn.Conv2d(3, 3, 10)
self.bn3 = torch.nn.BatchNorm2d(3, affine=False)
def forward(self, x):
x = self.bn1(x)
x = self.cv1(x)
x = self.bn2(x)
x = self.cv2(x)
x = self.bn3(x)
return x
x = torch.randn(10, 3, 20, 20) * 2
model_export = MyModule()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.TRAINING,
rtol=1e-3,
atol=1e-5,
)
model_export.train()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.PRESERVE,
rtol=1e-3,
atol=1e-5,
)
def test_batchnorm_training_mode_fix_layer(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.bn1 = torch.nn.BatchNorm2d(3, affine=True)
self.cv1 = torch.nn.Conv2d(3, 3, 10)
self.bn2 = torch.nn.BatchNorm2d(3, affine=False)
self.cv2 = torch.nn.Conv2d(3, 3, 10)
self.bn3 = torch.nn.BatchNorm2d(3, affine=True)
self.bn3.eval()
def forward(self, x):
x = self.bn1(x)
x = self.cv1(x)
x = self.bn2(x)
x = self.cv2(x)
x = self.bn3(x)
return x
x = torch.randn(10, 3, 128, 128)
model_export = MyModule()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.TRAINING,
rtol=1e-3,
atol=1e-5,
)
model_export.train()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.PRESERVE,
rtol=1e-3,
atol=1e-5,
)
def test_batchnorm_eval_mode_train_layer(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.bn1 = torch.nn.BatchNorm2d(3, affine=True)
self.cv1 = torch.nn.Conv2d(3, 3, 10)
self.bn2 = torch.nn.BatchNorm2d(3, affine=False)
self.cv2 = torch.nn.Conv2d(3, 3, 10)
self.bn3 = torch.nn.BatchNorm2d(3, affine=True)
self.bn3.train()
def forward(self, x):
x = self.bn1(x)
x = self.cv1(x)
x = self.bn2(x)
x = self.cv2(x)
x = self.bn3(x)
return x
x = torch.randn(10, 3, 128, 128)
model_export = MyModule()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.EVAL,
rtol=1e-3,
atol=1e-5,
)
model_export.eval()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.PRESERVE,
rtol=1e-3,
atol=1e-5,
)
def test_instancenorm_training(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.in1 = torch.nn.InstanceNorm2d(3, affine=True)
self.cv1 = torch.nn.Conv2d(3, 3, 10)
self.in2 = torch.nn.InstanceNorm2d(3, affine=False)
self.cv2 = torch.nn.Conv2d(3, 3, 10)
self.in3 = torch.nn.InstanceNorm2d(3, affine=True)
def forward(self, x):
x = self.in1(x)
x = self.cv1(x)
x = self.in2(x)
x = self.cv2(x)
x = self.in3(x)
return x
x = torch.randn(10, 3, 128, 128)
model_export = MyModule()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.TRAINING,
rtol=1e-3,
atol=1e-5,
)
model_export.train()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.PRESERVE,
rtol=1e-3,
atol=1e-5,
)
def test_instancenorm_training_mode_fix_layer(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.in1 = torch.nn.InstanceNorm2d(3, affine=True)
self.cv1 = torch.nn.Conv2d(3, 3, 10)
self.in2 = torch.nn.InstanceNorm2d(3, affine=False)
self.cv2 = torch.nn.Conv2d(3, 3, 10)
self.in3 = torch.nn.InstanceNorm2d(3, affine=True)
self.in3.eval()
def forward(self, x):
x = self.in1(x)
x = self.cv1(x)
x = self.in2(x)
x = self.cv2(x)
x = self.in3(x)
return x
x = torch.randn(10, 3, 128, 128)
model_export = MyModule()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.TRAINING,
rtol=1e-3,
atol=1e-5,
)
model_export.train()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.PRESERVE,
rtol=1e-3,
atol=1e-5,
)
def test_instancenorm_eval_mode_train_layer(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.in1 = torch.nn.InstanceNorm2d(8, affine=True)
self.cv1 = torch.nn.Conv2d(8, 8, 10)
self.in2 = torch.nn.InstanceNorm2d(8, affine=False)
self.cv2 = torch.nn.Conv2d(8, 8, 10)
self.in3 = torch.nn.InstanceNorm2d(8, affine=True)
self.in3.train()
def forward(self, x):
x = self.in1(x)
x = self.cv1(x)
x = self.in2(x)
x = self.cv2(x)
x = self.in3(x)
return x
x = torch.randn(10, 8, 128, 128)
model_export = MyModule()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.EVAL,
rtol=1e-3,
atol=1e-5,
)
model_export.eval()
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.PRESERVE,
rtol=1e-3,
atol=1e-5,
)
@skipIfUnsupportedMinOpsetVersion(12)
def test_dropout_training(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.dropout = torch.nn.Dropout(0.4)
def forward(self, x):
dropout = self.dropout(x)
return dropout
model = MyModule()
x = torch.randn(10)
model.train()
ort_sess = convert_to_onnx(
model,
input=(x,),
opset_version=self.opset_version,
training=torch.onnx.TrainingMode.TRAINING,
)
ort_outs = run_ort(ort_sess, (x,))
assert not torch.all(torch.eq(x, torch.from_numpy(ort_outs[0])))
script_model = torch.jit.script(model)
output = model(x)
ort_sess = convert_to_onnx(
script_model,
input=(x,),
opset_version=self.opset_version,
training=torch.onnx.TrainingMode.TRAINING,
)
ort_outs = run_ort(ort_sess, (x,))
assert not torch.all(torch.eq(x, torch.from_numpy(ort_outs[0])))
@skipIfUnsupportedMinOpsetVersion(12)
def test_dropout_training_zero(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.dropout = torch.nn.Dropout(0.5)
def forward(self, x):
dropout = self.dropout(x)
return dropout
model = MyModule()
# ensure there are no zeros in the input
x = torch.randn(10, 3, 128, 128)
y = x.numpy()
y_mask = np.where(y == 0, 1, y)
input = torch.from_numpy(y_mask)
nb_elements = torch.numel(input)
model.train()
ort_sess = convert_to_onnx(
model,
input=(x,),
opset_version=self.opset_version,
training=torch.onnx.TrainingMode.TRAINING,
)
ort_outs = run_ort(ort_sess, (x,))
y = model(input)
output = y.cpu().numpy()
ort_mask = np.where(ort_outs[0] != 0, 1, 0)
pyt_mask = np.where(output != 0, 1, 0)
ratio_pytorch = np.sum(pyt_mask) / nb_elements
ratio_ort = np.sum(ort_mask) / nb_elements
np.testing.assert_allclose(ratio_pytorch, ratio_ort, rtol=0.01, atol=0.01)
script_model = torch.jit.script(model)
y = model(input)
output = y.cpu().numpy()
ort_sess = convert_to_onnx(
script_model,
input=(x,),
opset_version=self.opset_version,
training=torch.onnx.TrainingMode.TRAINING,
)
ort_outs = run_ort(ort_sess, (x,))
ort_mask = np.where(ort_outs[0] != 0, 1, 0)
pyt_mask = np.where(output != 0, 1, 0)
ratio_pytorch = np.sum(pyt_mask) / nb_elements
ratio_ort = np.sum(ort_mask) / nb_elements
np.testing.assert_allclose(ratio_pytorch, ratio_ort, rtol=0.01, atol=0.01)
def test_conv_bn(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.conv = torch.nn.Conv2d(
3, 16, kernel_size=1, stride=2, padding=3, bias=True
)
self.bn = torch.nn.BatchNorm2d(16, affine=True)
def forward(self, x):
x = self.conv(x)
bn = self.bn(x)
return bn
model_export = MyModule()
x = torch.randn(10, 3, 128, 128)
self.run_test(model_export, (x,), training=torch.onnx.TrainingMode.EVAL)
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.TRAINING,
rtol=1e-3,
atol=1e-5,
)
def test_multiple_conv_bn(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.conv1 = torch.nn.Conv2d(
3, 64, kernel_size=7, stride=2, padding=3, bias=False
)
self.conv2 = torch.nn.Conv2d(
64, 2, kernel_size=1, stride=1, padding=0, bias=False
)
self.conv3 = torch.nn.Conv2d(
2, 2, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn = torch.nn.BatchNorm2d(64)
self.bn2 = torch.nn.BatchNorm2d(2)
self.relu = torch.nn.ReLU(inplace=True)
self.maxpool = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.conv1(x)
x = self.bn(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn2(x)
x = self.relu(x)
return x
model_export = MyModule()
x = torch.randn(2, 3, 224, 224)
self.run_test(
model_export,
(x,),
training=torch.onnx.TrainingMode.TRAINING,
rtol=1e-3,
atol=1e-5,
)
self.run_test(model_export, (x,), training=torch.onnx.TrainingMode.EVAL)
def test_script_custom_class_error(self):
class BoxCoder(object):
def __init__(self, bbox_xform_clip: float) -> None:
self.bbox_xform_clip = bbox_xform_clip
def decode(self, rel_codes: Tensor, boxes: List[Tensor]) -> Tensor:
boxes = torch.cat(boxes, dim=0)
pred_ctr_x = (
torch.clamp(rel_codes[:, 0::4], max=self.bbox_xform_clip)
* boxes[:, 2]
)
return pred_ctr_x
class MyModule(torch.nn.Module):
__annotations__ = {
"box_coder": BoxCoder,
}
def __init__(self):
super(MyModule, self).__init__()
self.box_coder = BoxCoder(1.4)
def forward(self, box_regression: Tensor, proposals: List[Tensor]):
return self.box_coder.decode(box_regression, proposals)
model = torch.jit.script(MyModule())
box_regression = torch.randn([4, 4])
proposal = [torch.randn(2, 4), torch.randn(2, 4)]
with self.assertRaises(RuntimeError) as cm:
convert_to_onnx(model, input=(box_regression, proposal))
def test_initializer_sequence(self):
class MyModule(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(MyModule, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out
test_model = MyModule(3, 4, 10)
state_dict_list = [k for (k, v) in test_model.state_dict().items()]
named_params_list = [k for (k, v) in test_model.named_parameters()]
x = torch.randn(32, 3)
f = io.BytesIO()
torch.onnx._export(test_model, (x,), f, do_constant_folding=False)
loaded_model = onnx.load_from_string(f.getvalue())
actual_list = [p.name for p in loaded_model.graph.initializer]
assert actual_list == state_dict_list, (
"Initializers' sequence is not as same as state_dict(). Expected: ("
+ ", ".join(state_dict_list)
+ "). Actual:("
+ ", ".join(actual_list)
+ ")."
)
assert actual_list == named_params_list, (
"Initializers' sequence is not as same as named_parameters(). Expected: ("
+ ", ".join(named_params_list)
+ "). Actual:("
+ ", ".join(actual_list)
+ ")."
)
def test_initializer_sequence_script_model(self):
def list_is_expected(short_list, long_list) -> bool:
if len(short_list) > len(long_list):
return False
for i in range(len(short_list)):
if short_list[i] not in long_list[i]:
return False
return True
def loop(x, y):
for i in range(int(y)):
x = x + i
return x
class MyModule(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(MyModule, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, x, y):
x = loop(x, y)
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out
test_model = torch.jit.script(MyModule(3, 4, 10))
state_dict_list = [k for (k, v) in test_model.state_dict().items()]
named_params_list = [k for (k, v) in test_model.named_parameters()]
x = torch.ones(2, 3, dtype=torch.float)
y = torch.tensor(5, dtype=torch.long)
f = io.BytesIO()
torch.onnx.export(test_model, (x, y), f, do_constant_folding=False)
loaded_model = onnx.load_from_string(f.getvalue())
actual_list = [p.name for p in loaded_model.graph.initializer]
assert list_is_expected(state_dict_list, actual_list), (
"ScriptModel - Initializers' sequence is not as same as state_dict(). Expected: ("
+ ", ".join(state_dict_list)
+ "). Actual:("
+ ", ".join(actual_list)
+ ")."
)
assert list_is_expected(named_params_list, actual_list), (
"ScriptModel - Initializers' sequence is not as same as named_parameters(). Expected: ("
+ ", ".join(named_params_list)
+ "). Actual:("
+ ", ".join(actual_list)
+ ")."
)
@skipIfUnsupportedMinOpsetVersion(11)
def test_nms(self):
num_boxes = 100
boxes = torch.rand(num_boxes, 4)
boxes[:, 2:] += boxes[:, :2]
scores = torch.randn(num_boxes)
class Module(torch.nn.Module):
def forward(self, boxes, scores):
return ops.nms(boxes, scores, 0.5)
self.run_test(Module(), (boxes, scores))
@skipIfUnsupportedMinOpsetVersion(11)
def test_batched_nms(self):
num_boxes = 100
boxes = torch.rand(num_boxes, 4)
boxes[:, 2:] += boxes[:, :2]
scores = torch.randn(num_boxes)
idxs = torch.randint(0, 5, size=(num_boxes,))
class Module(torch.nn.Module):
def forward(self, boxes, scores, idxs):
return ops.batched_nms(boxes, scores, idxs, 0.5)
self.run_test(Module(), (boxes, scores, idxs))
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest()
def test_clip_boxes_to_image(self):
boxes = torch.randn(5, 4) * 500
boxes[:, 2:] += boxes[:, :2]
size = torch.randn(200, 300)
size_2 = torch.randn(300, 400)
class Module(torch.nn.Module):
def forward(self, boxes, size):
shape = (size.shape[0], size.shape[1])
return ops.boxes.clip_boxes_to_image(boxes, shape)
self.run_test(
Module(),
(boxes, size),
input_names=["boxes", "size"],
dynamic_axes={"size": [0, 1]},
test_with_inputs=[(boxes, size), (boxes, size_2)],
)
@skipIfUnsupportedMaxOpsetVersion(15) # TODO: Opset 16 RoiAlign result mismatch
@skipIfUnsupportedMinOpsetVersion(11)
def test_roi_align(self):
x = torch.rand(1, 1, 10, 10, dtype=torch.float32)
single_roi = torch.tensor([[0, 0, 0, 4, 4]], dtype=torch.float32)
model = ops.RoIAlign((5, 5), 1.0, 2)
self.run_test(model, (x, single_roi))
@skipIfUnsupportedMaxOpsetVersion(15) # TODO: Opset 16 RoiAlign result mismatch
@skipIfUnsupportedMinOpsetVersion(11)
def test_roi_align_aligned(self):
x = torch.rand(1, 1, 10, 10, dtype=torch.float32)
single_roi = torch.tensor([[0, 1.5, 1.5, 3, 3]], dtype=torch.float32)
model1 = ops.RoIAlign((5, 5), 1.0, 2, aligned=True)
self.run_test(model1, (x, single_roi))
x = torch.rand(1, 1, 10, 10, dtype=torch.float32)
single_roi = torch.tensor([[0, 0.2, 0.3, 4.5, 3.5]], dtype=torch.float32)
model2 = ops.RoIAlign((5, 5), 0.5, 3, aligned=True)
self.run_test(model2, (x, single_roi))
x = torch.rand(1, 1, 10, 10, dtype=torch.float32)
single_roi = torch.tensor([[0, 0.2, 0.3, 4.5, 3.5]], dtype=torch.float32)
model3 = ops.RoIAlign((5, 5), 1.8, 2, aligned=True)
self.run_test(model3, (x, single_roi))
x = torch.rand(1, 1, 10, 10, dtype=torch.float32)
single_roi = torch.tensor([[0, 0.2, 0.3, 4.5, 3.5]], dtype=torch.float32)
model4 = ops.RoIAlign((2, 2), 2.5, 0, aligned=True)
self.run_test(model4, (x, single_roi))
@skipIfUnsupportedMinOpsetVersion(11)
def test_roi_pool(self):
x = torch.rand(1, 1, 10, 10, dtype=torch.float32)
rois = torch.tensor([[0, 0, 0, 4, 4]], dtype=torch.float32)
pool_h = 5
pool_w = 5
model = ops.RoIPool((pool_h, pool_w), 2.0)
self.run_test(model, (x, rois))
@skipIfUnsupportedMinOpsetVersion(11)
def test_resize_images(self):
class TransformModule(torch.nn.Module):
def __init__(self):
super(TransformModule, self).__init__()
self.transform = _init_test_generalized_rcnn_transform()
def forward(self, images):
return self.transform.resize(images, None)[0]
input = torch.rand(3, 10, 20)
input_test = torch.rand(3, 100, 150)
self.run_test(
TransformModule(),
(input,),
input_names=["input1"],
dynamic_axes={"input1": [0, 1, 2]},
test_with_inputs=[(input,), (input_test,)],
)
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest()
def test_transform_images(self):
class TransformModule(torch.nn.Module):
def __init__(self):
super(TransformModule, self).__init__()
self.transform = _init_test_generalized_rcnn_transform()
def forward(self, images: List[Tensor]):
return self.transform(images)[0].tensors
input = torch.rand(3, 100, 200), torch.rand(3, 200, 200)
input_test = torch.rand(3, 100, 200), torch.rand(3, 200, 200)
self.run_test(
TransformModule(), (input,), test_with_inputs=[(input,), (input_test,)]
)
def get_features(self, images):
s0, s1 = images.shape[-2:]
features = [
("0", torch.rand(2, 256, s0 // 4, s1 // 4)),
("1", torch.rand(2, 256, s0 // 8, s1 // 8)),
("2", torch.rand(2, 256, s0 // 16, s1 // 16)),
("3", torch.rand(2, 256, s0 // 32, s1 // 32)),
("4", torch.rand(2, 256, s0 // 64, s1 // 64)),
]
features = OrderedDict(features)
return features
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest()
def test_rpn(self):
set_rng_seed(0)
class RPNModule(torch.nn.Module):
def __init__(self):
super(RPNModule, self).__init__()
self.rpn = _init_test_rpn()
def forward(self, images, features: Dict[str, Tensor]):
images_m = ImageList(
images, [(i.shape[-1], i.shape[-2]) for i in images]
)
return self.rpn(images_m, features)
images = torch.rand(2, 3, 150, 150)
features = self.get_features(images)
images2 = torch.rand(2, 3, 80, 80)
test_features = self.get_features(images2)
model = RPNModule()
model.eval()
model(images, features)
self.run_test(
model,
(images, features),
input_names=["input1", "input2", "input3", "input4", "input5", "input6"],
dynamic_axes={
"input1": [0, 1, 2, 3],
"input2": [0, 1, 2, 3],
"input3": [0, 1, 2, 3],
"input4": [0, 1, 2, 3],
"input5": [0, 1, 2, 3],
"input6": [0, 1, 2, 3],
},
test_with_inputs=[(images, features), (images2, test_features)],
dict_check=False,
)
@skipIfUnsupportedMaxOpsetVersion(15) # TODO: Opset 16 RoiAlign result mismatch
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest()
def test_multi_scale_roi_align(self):
class TransformModule(torch.nn.Module):
def __init__(self):
super(TransformModule, self).__init__()
self.model = ops.MultiScaleRoIAlign(["feat1", "feat2"], 3, 2)
self.image_sizes = [(512, 512)]
def forward(self, input: Dict[str, Tensor], boxes: List[Tensor]) -> Tensor:
return self.model(input, boxes, self.image_sizes)
i = OrderedDict()
i["feat1"] = torch.rand(1, 5, 64, 64)
i["feat2"] = torch.rand(1, 5, 16, 16)
boxes = torch.rand(6, 4) * 256
boxes[:, 2:] += boxes[:, :2]
i1 = OrderedDict()
i1["feat1"] = torch.rand(1, 5, 64, 64)
i1["feat2"] = torch.rand(1, 5, 16, 16)
boxes1 = torch.rand(6, 4) * 256
boxes1[:, 2:] += boxes1[:, :2]
self.run_test(
TransformModule(),
(
i,
[boxes],
),
test_with_inputs=[
(
i,
[boxes],
),
(
i1,
[boxes1],
),
],
)
@skipIfUnsupportedMinOpsetVersion(11)
@skipScriptTest()
def test_roi_heads(self):
class RoiHeadsModule(torch.nn.Module):
def __init__(self):
super(RoiHeadsModule, self).__init__()
self.transform = _init_test_generalized_rcnn_transform()
self.rpn = _init_test_rpn()
self.roi_heads = _init_test_roi_heads_faster_rcnn()
def forward(self, images, features: Dict[str, Tensor]):
original_image_sizes = [
(img.shape[-1], img.shape[-2]) for img in images
]
images_m = ImageList(
images, [(i.shape[-1], i.shape[-2]) for i in images]
)
proposals, _ = self.rpn(images_m, features)
detections, _ = self.roi_heads(
features, proposals, images_m.image_sizes
)
detections = self.transform.postprocess(
detections, images_m.image_sizes, original_image_sizes
)
return detections
images = torch.rand(2, 3, 100, 100)
features = self.get_features(images)
images2 = torch.rand(2, 3, 150, 150)
test_features = self.get_features(images2)
model = RoiHeadsModule()
model.eval()
model(images, features)
self.run_test(
model,
(images, features),
input_names=["input1", "input2", "input3", "input4", "input5", "input6"],
dynamic_axes={
"input1": [0, 1, 2, 3],
"input2": [0, 1, 2, 3],
"input3": [0, 1, 2, 3],
"input4": [0, 1, 2, 3],
"input5": [0, 1, 2, 3],
"input6": [0, 1, 2, 3],
},
test_with_inputs=[(images, features), (images2, test_features)],
dict_check=False,
)
def test_set_(self):
class M(torch.nn.Module):
def forward(self, x, y):
x.set_(y)
return x
x = torch.ones(2, 3)
y = torch.randn(4, 6)
self.run_test(M(), (x, y), remained_onnx_input_idx=[1])
y2 = torch.randn(5, 2)
self.run_test(
M(),
(x, y),
remained_onnx_input_idx=[1],
input_names=["x", "y"],
dynamic_axes={"x": [0, 1], "y": [0, 1]},
test_with_inputs=[(y, y2)],
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_set_attr_modules(self):
class InnerModule2(torch.nn.Module):
def __init__(self, embedding_dim):
super().__init__()
self.weights = InnerModule2.get_embedding(embedding_dim)
self.register_buffer("_float_tensor", torch.FloatTensor(1))
self.const = 2
@staticmethod
def get_embedding(embedding_dim: int):
emb = 4 / ((embedding_dim // 2) - 1)
emb = torch.exp(
torch.arange((embedding_dim // 2), dtype=torch.float) * -emb
)
return emb
def forward(self, input, incremental_state: Optional[Tensor] = None):
bsz, seq_len = input.shape[0], input.shape[1]
self.const = 3
if self.weights is None:
self.weights = InnerModule.get_embedding(self.embedding_dim)
self.weights = self.weights.to(self._float_tensor)
self.weights = self.weights * self.const
if incremental_state is not None:
pos = seq_len
return self.weights[1 + pos, :].expand(bsz, 1, -1)
return self.weights.index_select(
0, torch.ones((bsz * seq_len), dtype=torch.int64)
).view(bsz, seq_len, -1)
class InnerModule(torch.nn.Module):
def __init__(self, embedding_dim):
super().__init__()
self.weights = InnerModule.get_embedding(embedding_dim)
self.module = InnerModule2(embedding_dim=8)
@staticmethod
def get_embedding(embedding_dim: int):
emb = 4 / ((embedding_dim // 2) - 1)
emb = torch.exp(
torch.arange((embedding_dim // 2), dtype=torch.float) * -emb
)
return emb
def forward(self, x):
return self.module(x) + self.weights
class Module(torch.nn.Module):
def __init__(self):
super(Module, self).__init__()
self.module = InnerModule(embedding_dim=8)
def forward(self, x):
return self.module(x)
x = torch.randn(3, 256)
self.run_test(Module(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]})
self.run_test(Module(), (x,), remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_set_attr_modules_2(self):
class InnerModule(torch.nn.Module):
def __init__(self, embedding_dim):
super().__init__()
self.embedding_dim = embedding_dim
self.const = 2.5
self.weights = InnerModule.get_embedding(self.embedding_dim)
self.register_buffer("_float_tensor", torch.FloatTensor(1))
@staticmethod
def get_embedding(embedding_dim: int):
emb = 4 / ((embedding_dim // 2) - 1)
emb = torch.exp(
torch.arange((embedding_dim // 2), dtype=torch.float) * -emb
)
return emb
def forward(self, input, incremental_state: Optional[Tensor] = None):
bsz, seq_len = input.shape[0], input.shape[1]
self.const = 1.5
self.weights = InnerModule.get_embedding(self.embedding_dim)
return (
self.weights.index_select(
0, torch.ones((bsz * seq_len), dtype=torch.int64)
).view(bsz, seq_len, -1)
) * self.const
class Module(torch.nn.Module):
def __init__(self):
super(Module, self).__init__()
self.module = InnerModule(embedding_dim=8)
def forward(self, x):
return self.module(x)
x = torch.randn(3, 256)
self.run_test(Module(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]})
self.run_test(Module(), (x,), remained_onnx_input_idx=[])
def test_set_attr(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.conv = torch.nn.Conv1d(3, 10, 2)
self.b = False
def forward(self, box_regression, weight):
self.b = True
self.conv.weight = weight
w = torch.softmax(self.conv.weight, dim=0)
self.conv.weight = w + w
if self.b:
return box_regression + self.conv.weight
else:
return box_regression - self.conv.weight
model = torch.jit.script(MyModule())
weight = torch.ones(3, 2)
box_regression = torch.randn(3, 2)
self.run_test(model, (box_regression, weight))
@skipIfUnsupportedMinOpsetVersion(11)
def test_set_attr_2(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.conv = torch.nn.Conv1d(10, 3, 3)
self.conv.bias = torch.nn.Parameter(torch.zeros(3, 10, 3))
def set_cell_anchors(self, anchors):
if self.conv.bias is not None:
b = self.conv.bias
assert b is not None
self.conv.bias = anchors + b
elif self.conv.weight is not None:
self.conv.weight = torch.randn(3, 10)
self.conv.bias = self.conv.weight[:]
def forward(self, anchors) -> Optional[Tensor]:
self.set_cell_anchors(anchors)
return self.conv.bias
model = torch.jit.script(MyModule())
anchors = torch.ones(3, 10, 3)
self.run_test(model, (anchors))
@skipIfUnsupportedMinOpsetVersion(11)
def test_set_attr_3(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.conv = torch.nn.Conv1d(10, 3, 3)
self.conv.weight = torch.nn.Parameter(torch.zeros(3, 10))
self.conv.bias = torch.nn.Parameter(torch.zeros(3, 10, 3))
def set_cell_anchors(self, anchors, boxes):
self.conv.weight = torch.ones(3, 10)
if self.conv.bias is not None:
self.conv.bias = torch.randn(3, 10, 3)
self.conv.weight = anchors + self.conv.weight
boxes[:] = torch.zeros(2, 3)
def forward(self, anchors) -> Tuple[Tensor, Tensor]:
boxes = torch.ones(2, 2, 3)
self.set_cell_anchors(anchors, boxes)
if self.conv.bias is not None:
return self.conv.weight, boxes
return anchors, boxes
model = torch.jit.script(MyModule())
anchors = torch.rand(3, 10)
self.run_test(model, (anchors))
@skipIfUnsupportedMinOpsetVersion(11)
def test_set_attr_4(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.conv = torch.nn.Conv1d(10, 3, 3)
self.conv.bias = torch.nn.Parameter(torch.zeros(3, 10, 3))
def set_cell_anchors(self, anchors):
self.conv.weight = torch.zeros(10, 3)
if self.conv.bias is not None:
w = self.conv.bias
assert w is not None
self.conv.bias = anchors + w
else:
self.conv.bias = torch.ones(3, 10, 3)
def forward(self, feature_maps, anchors) -> Tuple[Tensor, Tensor]:
self.set_cell_anchors(anchors)
result = []
if self.conv.bias is not None:
a = self.conv.bias
assert a is not None
result += [a]
result += [feature_maps]
return result[0], result[1]
model = torch.jit.script(MyModule())
x = torch.rand(5, 11, 30)
anchors = torch.ones(3, 10, 3)
self.run_test(model, (x, anchors))
@skipIfUnsupportedMinOpsetVersion(11)
def test_set_attr_5(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.conv = torch.nn.Conv1d(10, 3, 3)
self.conv.bias = torch.nn.Parameter(torch.zeros(3, 10, 3))
def set_cell_anchors(self, anchors):
self.conv.weight = torch.arange(10)
for i in range(10):
if i == 3:
for j in range(10):
w = self.conv.weight
self.conv.weight = torch.arange(10) + w
self.conv.weight = self.conv.weight + torch.arange(10)
# NOTE: `is not None` and `assert` is for passing torchscript.
if self.conv.bias is not None:
a = self.conv.bias
assert a is not None
self.conv.bias = anchors + a
def forward(self, anchors):
self.set_cell_anchors(anchors)
return self.conv.weight, self.conv.bias
model = torch.jit.script(MyModule())
anchors = torch.ones(3, 10, 3)
self.run_test(model, (anchors))
@skipIfUnsupportedMinOpsetVersion(11)
def test_set_attr_in_loop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.conv = torch.nn.Conv1d(10, 3, 3)
self.conv.weight = torch.nn.Parameter(torch.zeros(3, 10))
self.conv.bias = torch.nn.Parameter(torch.zeros(3, 10, 3))
def set_cell_anchors(self, anchors, boxes):
self.conv.weight = torch.randn(3, 10)
for i in range(self.conv.weight.size(0)):
for j in range(10):
self.conv.bias = torch.randn(3, 10, 3)
self.conv.weight = anchors * i
boxes[j] += torch.ones(3, 3)
def forward(self, anchors) -> Tuple[Tensor, Tensor]:
boxes = torch.ones(10, 3, 3)
self.set_cell_anchors(anchors, boxes)
if self.conv.bias is not None:
return self.conv.weight, boxes
return anchors, boxes
model = torch.jit.script(MyModule())
anchors = torch.rand(10)
self.run_test(model, anchors)
@skipIfUnsupportedMinOpsetVersion(13)
def test_set_attr_in_loop_with_list(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.conv = torch.nn.Conv1d(10, 3, 3)
self.conv.weight = torch.nn.Parameter(torch.zeros(3, 10))
self.conv.bias = torch.nn.Parameter(torch.zeros(3, 10, 3))
self.boxes: List[Tensor] = [
torch.ones(1)
] # Workaround placeholder for TorchScript
def set_cell_anchors(self, anchors):
self.conv.weight = torch.randn(3, 10)
for i in range(self.conv.weight.size(0)):
for j in range(10):
self.conv.bias = torch.randn(3, 10, 3)
self.conv.weight = anchors * i
self.boxes.append(torch.ones(3, 3))
def forward(self, anchors) -> Tuple[Tensor, List[Tensor]]:
self.boxes = []
self.set_cell_anchors(anchors)
if self.conv.bias is not None:
return self.conv.weight, self.boxes
return anchors, self.boxes
model = torch.jit.script(MyModule())
anchors = torch.rand(10)
self.run_test(model, anchors)
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_if(self):
@torch.jit.script
def check_init(
input_data: Tensor, hidden_size: int, prev_state: Tensor
) -> Tuple[Tensor, Tensor]:
batch_size = input_data.size(0)
spatial_size_0 = input_data.size(2)
spatial_size_1 = input_data.size(3)
# generate empty prev_state, if None is provided
state_size = (2, batch_size, hidden_size, spatial_size_0, spatial_size_1)
state = torch.zeros(state_size, device=input_data.device)
state_copy = torch.zeros(state_size, device=input_data.device)
if prev_state.size(0) == 0:
state[:] = (
torch.zeros(batch_size, hidden_size, spatial_size_0, spatial_size_1)
+ state[:]
)
state_copy[:] = (
torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1)
* 2
)
state_copy[:] = (
torch.zeros(batch_size, hidden_size, spatial_size_0, spatial_size_1)
* 2
)
else:
state[:] = (
torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1)
* 4
)
return state, state_copy
class Example(torch.nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.hidden_size = hidden_size
def forward(self, input_data, prev_state):
prev_state = check_init(input_data, self.hidden_size, prev_state)
return prev_state[0], prev_state[1]
model = Example(10)
random_data = torch.rand((1, 5, 30, 30))
empty_tensor = torch.tensor([], dtype=torch.float).view(0, 0, 0, 0, 0)
self.run_test(
model,
(random_data, empty_tensor),
input_names=["random_data", "empty_tensor"],
dynamic_axes={"random_data": [0, 1, 2, 3], "empty_tensor": [0, 1, 2, 3, 4]},
)
self.run_test(model, (random_data, empty_tensor), remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_if_2(self):
@torch.jit.script
def check_init(
input_data: Tensor, hidden_size: int, prev_state: Tensor
) -> Tuple[Tensor, Tensor]:
batch_size = input_data.size(0)
spatial_size_0 = input_data.size(2)
spatial_size_1 = input_data.size(3)
# generate empty prev_state, if None is provided
state_size = (2, batch_size, hidden_size, spatial_size_0, spatial_size_1)
state = torch.zeros(state_size, device=input_data.device)
state_copy = torch.zeros(state_size, device=input_data.device)
if prev_state.size(0) == 0:
for i in range(2):
state[:] = (
torch.ones(
batch_size, hidden_size, spatial_size_0, spatial_size_1
)
* i
)
state_copy[:] = (
torch.ones(
batch_size, hidden_size, spatial_size_0, spatial_size_1
)
* i
)
elif prev_state.size(0) == 1:
s = state[:]
state[:] = prev_state + s
elif prev_state.size(0) == 2:
state[:] = (
torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1)
* 4
)
return state, state_copy
class Example(torch.nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.hidden_size = hidden_size
def forward(self, input_data, prev_state):
prev_state = check_init(input_data, self.hidden_size, prev_state)
return prev_state[0], prev_state[1]
model = Example(10)
random_data = torch.rand((1, 5, 30, 30))
empty_tensor = torch.tensor([], dtype=torch.float).view(0, 0, 0, 0, 0)
random_state = torch.rand((1, 1, 10, 30, 30))
self.run_test(
model,
(random_data, empty_tensor),
input_names=["data", "state"],
dynamic_axes={"data": [0, 1, 2], "state": [0, 1, 2, 3, 4]},
test_with_inputs=[(random_data, random_state)],
)
self.run_test(
model,
(random_data, empty_tensor),
input_names=["data", "state"],
dynamic_axes={"state": [0, 1, 2, 3, 4]},
test_with_inputs=[(random_data, random_state)],
remained_onnx_input_idx=[1],
)
self.run_test(model, (random_data, empty_tensor), remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_if_3(self):
@torch.jit.script
def check_init(
input_data: Tensor, hidden_size: int, prev_state: Tensor
) -> Tensor:
batch_size = input_data.size(0)
spatial_size_0 = input_data.size(2)
spatial_size_1 = input_data.size(3)
# generate empty prev_state, if None is provided
state_size = (2, batch_size, hidden_size, spatial_size_0, spatial_size_1)
state = torch.zeros(state_size, device=input_data.device)
if prev_state.size(0) < 2:
state = state * 3
if prev_state.size(0) == 0:
state[:] = (
torch.ones(
batch_size, hidden_size, spatial_size_0, spatial_size_1
)
* 3
)
else:
state = state + 2
return state
class Example(torch.nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.hidden_size = hidden_size
def forward(self, input_data, prev_state):
prev_state = check_init(input_data, self.hidden_size, prev_state)
return prev_state
model = Example(4)
random_data = torch.rand((1, 5, 4, 4))
empty_tensor = torch.tensor([], dtype=torch.float).view(0, 0, 0, 0, 0)
self.run_test(
model,
(random_data, empty_tensor),
input_names=["random_data", "empty_tensor"],
dynamic_axes={"random_data": [0, 1, 2, 3], "empty_tensor": [0, 1, 2, 3, 4]},
)
self.run_test(model, (random_data, empty_tensor), remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_if_4(self):
@torch.jit.script
def check_init(
input_data: Tensor, hidden_size: int, prev_state: Tensor
) -> Tensor:
batch_size = input_data.size(0)
spatial_size_0 = input_data.size(2)
spatial_size_1 = input_data.size(3)
# generate empty prev_state, if None is provided
state_size = (2, batch_size, hidden_size, spatial_size_0, spatial_size_1)
state = torch.zeros(state_size, device=input_data.device)
if prev_state.size(0) == 0:
state = state + 3
state[:] = (
torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1)
* 3
)
state = state + 3
state[:] = (
torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1)
* 4
)
else:
state = state + 2
return state
class Example(torch.nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.hidden_size = hidden_size
def forward(self, input_data, prev_state):
prev_state = check_init(input_data, self.hidden_size, prev_state)
return prev_state
model = Example(4)
random_data = torch.rand((1, 5, 4, 4))
empty_tensor = torch.tensor([], dtype=torch.float).view(0, 0, 0, 0, 0)
self.run_test(
model,
(random_data, empty_tensor),
input_names=["random_data", "empty_tensor"],
dynamic_axes={"random_data": [0, 1, 2, 3], "empty_tensor": [0, 1, 2, 3, 4]},
)
self.run_test(model, (random_data, empty_tensor), remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_if_5(self):
@torch.jit.script
def check_init(
input_data: Tensor, hidden_size: int, prev_state: Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
batch_size = input_data.size(0)
spatial_size_0 = input_data.size(2)
spatial_size_1 = input_data.size(3)
# generate empty prev_state, if None is provided
state_size = (2, batch_size, hidden_size, spatial_size_0, spatial_size_1)
state = torch.zeros(state_size, device=input_data.device)
state_ref = state
if prev_state.size(0) == 0:
state[:] = (
torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1)
* 3
)
state = state + 3
state[:] = (
torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1)
* 4
)
else:
state = state + 2
return state, state_ref
class Example(torch.nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.hidden_size = hidden_size
def forward(self, input_data, prev_state):
prev_state, state_ref = check_init(
input_data, self.hidden_size, prev_state
)
return prev_state, state_ref
model = Example(4)
random_data = torch.rand((1, 5, 4, 4))
empty_tensor = torch.tensor([], dtype=torch.float).view(0, 0, 0, 0, 0)
self.run_test(
model,
(random_data, empty_tensor),
input_names=["random_data", "empty_tensor"],
dynamic_axes={"random_data": [0, 1, 2, 3], "empty_tensor": [0, 1, 2, 3, 4]},
)
self.run_test(model, (random_data, empty_tensor), remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(11)
def test_list_append_in_block(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
for i in range(x.size(0)):
res.append(torch.matmul(x[i], y))
return res
model = torch.jit.script(ListModel())
x = torch.randn(16, 3, 4)
y = torch.randn(4, 5)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(13)
def test_list_append_in_nested_block(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
for i in range(x.size(0)):
for j in range(x.size(1)):
res.append(torch.matmul(x[i][j], y))
return res
model = torch.jit.script(ListModel())
x = torch.randn(4, 4, 3, 4)
y = torch.randn(4, 5)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(13)
def test_list_pop_in_block(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
elem = torch.matmul(x[0], y)
for i in range(x.size(0)):
res.append(torch.matmul(x[i], y))
for i in range(x.size(0)):
elem = res.pop()
for i in range(x.size(0)):
res.append(torch.matmul(x[i], y))
elem = res.pop()
return res.append(elem)
model = torch.jit.script(ListModel())
x = torch.randn(16, 3, 4)
y = torch.randn(4, 5)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(13)
def test_list_del_in_block(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
elem = torch.matmul(x[0], y)
for i in range(x.size(0)):
res.append(torch.matmul(x[i], y))
for i in range(x.size(0)):
del res[0]
for i in range(x.size(0)):
res.append(torch.matmul(x[i], y))
del res[0]
return res.append(elem)
model = torch.jit.script(ListModel())
x = torch.randn(16, 3, 4)
y = torch.randn(4, 5)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(11)
def test_list_unpack(self):
class ListModel(torch.nn.Module):
def forward(self, x, y):
res = []
elem = torch.matmul(x[0], y)
for i in range(x.size(0)):
res.append(torch.matmul(x[i], y))
a, b, c = res
return a, b
model = torch.jit.script(ListModel())
x = torch.randn(3, 3, 4)
y = torch.randn(4, 5)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_inplace_ops(self):
@torch.jit.script
def check_init(input_data: Tensor, hidden_size: int) -> Tensor:
batch_size = input_data.size(0)
spatial_size_0 = input_data.size(2)
spatial_size_1 = input_data.size(3)
# generate empty prev_state, if None is provided
state_size = (2, batch_size, hidden_size, spatial_size_0, spatial_size_1)
state = torch.zeros(state_size, device=input_data.device)
if input_data.size(0) == 1:
state[1] += (
torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1)
* 2
)
state[1] /= (
torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1)
* 3
)
for i in range(input_data.size(0)):
state[1] += torch.ones(
batch_size, hidden_size, spatial_size_0, spatial_size_1
)
state[1] /= (
torch.ones(batch_size, hidden_size, spatial_size_0, spatial_size_1)
* i
)
return state
class Example(torch.nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.hidden_size = hidden_size
def forward(self, input_data):
state = check_init(input_data, self.hidden_size)
return state
model = Example(10)
random_data = torch.rand((1, 5, 30, 30))
self.run_test(
model,
(random_data),
input_names=["random_data"],
dynamic_axes={"random_data": [0, 1, 2, 3]},
)
self.run_test(model, (random_data), remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(11)
def test_input_mask_model(self):
class InputMaskModel(torch.nn.Module):
def __init__(self, output_size):
super(InputMaskModel, self).__init__()
self.bias = torch.nn.Parameter(
torch.empty(output_size, dtype=torch.float)
)
with torch.no_grad():
self.bias.zero_()
def forward(self, model_input, y):
input_mask = (model_input <= 0) | (model_input > 25)
y[input_mask, :] = 0.0
output = y + self.bias
return output
output_size = 4
m = InputMaskModel(output_size)
x = torch.tensor([0, 4, 24, 25], dtype=torch.int64)
y = torch.tensor(
[
[0.1, 0.2, 0.3, 0.4],
[0.1, 0.2, 0.3, 0.4],
[0.1, 0.2, 0.3, 0.4],
[0.1, 0.2, 0.3, 0.4],
],
dtype=torch.float,
)
self.run_test(m, (x, y))
class InputMaskModel(torch.nn.Module):
def __init__(self, output_size):
super(InputMaskModel, self).__init__()
def forward(self, model_input_1, model_input_2, y):
input_mask_1 = (model_input_1 <= 0) | (model_input_1 > 25)
input_mask_2 = (model_input_2 < 1) | (model_input_2 >= 12)
y[input_mask_1, input_mask_2] = 0.0
return y
output_size = 4
m = InputMaskModel(output_size)
x1 = torch.tensor([0, 4, 24, 25], dtype=torch.int64)
x2 = torch.tensor([0, 3, 12, 15], dtype=torch.int64)
y = torch.tensor(
[
[0.1, 0.2, 0.3, 0.4],
[0.1, 0.2, 0.3, 0.4],
[0.1, 0.2, 0.3, 0.4],
[0.1, 0.2, 0.3, 0.4],
],
dtype=torch.float,
)
self.run_test(m, (x1, x2, y))
@skipScriptTest()
def test_unsafe_chunk(self):
class ChunkModel(torch.nn.Module):
def forward(self, x):
return torch.unsafe_chunk(x, 3, dim=1)
model = ChunkModel()
model.eval()
x = torch.randn(1, 18)
self.run_test(model, x, input_names=["x"])
def test_symbolic_shape_inference(self):
# ConstantOfShape is tested in test_embedding_bag
# Tile is tested in test_repeat
# test Shape, Reshape, Transpose, Gather
class ShapeModel(torch.nn.Module):
def forward(self, x, y):
shape = x.size()[:3] + (-1,) # shape [4], ("batch", 3, 4, -1)
y = y.reshape(shape) # batch, 3, 4, 10/batch
return y.transpose(1, 2)
model = ShapeModel()
model.eval()
x = torch.ones(2, 3, 4, 5)
y = torch.ones(3, 4, 5, 2)
self.run_test(
model,
(x, y),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1, 2, 3], "y": [0, 1, 2, 3]},
)
self.run_test(model, (x, y), remained_onnx_input_idx=[1])
class ViewModel(torch.nn.Module):
def forward(self, x):
return x.view(-1)
model = ViewModel()
model.eval()
x = torch.tensor(2.0)
self.run_test(model, (x,))
# test prim::ListConstruct for Reshape input 1
class ViewModel_2(torch.nn.Module):
def forward(self, x):
N, C, H, W = x.shape[0], x.shape[2], x.shape[3], x.shape[4]
x1 = x.view(N, -1, C, H, W)
x2 = x1.permute(0, 3, 4, 1, 2)
return x2.reshape(N, -1, C)
model = ViewModel_2()
model.eval()
x = torch.ones(2, 3, 4, 5, 6)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_symbolic_shape_inference_arange(self):
# test Range
class ArangeModel(torch.nn.Module):
def forward(self, signal):
frame_step = 2
outer_dimensions = signal.size()[:-2]
frames, frame_length = signal.size()[-2:]
subframe_length = signal.size()[0]
subframe_step = frame_step // subframe_length
subframes_per_frame = frame_length // subframe_length
output_size = frame_step * (frames - 1) + frame_length
output_subframes = output_size // subframe_length
frame = torch.arange(0, output_subframes)
return frame
model = ArangeModel()
model.eval()
M, C, K, N = 1, 2, 3, 4
x = torch.randint(5, (M, C, K, N))
y = torch.randint(5, (M, C + 1, K + 1, N + 1))
self.run_test(model, x, input_names=["x"], dynamic_axes={"x": [0, 1, 2, 3]})
self.run_test(model, x, remained_onnx_input_idx=[])
self.run_test(
model,
x,
input_names=["x"],
dynamic_axes={"x": [0, 1, 2, 3]},
test_with_inputs=[(x,), (y,)],
)
@skipIfUnsupportedMinOpsetVersion(11)
def test_symbolic_shape_inference_box(self):
# test NonZero
class BoxModel(torch.nn.Module):
def forward(self, boxes):
min_size = 1e-2
ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1]
keep = (ws >= min_size) & (hs >= min_size)
keep = torch.where(keep)[0]
return keep
model = BoxModel()
model.eval()
x = torch.ones(2, 4)
y = torch.ones(3, 5)
self.run_test(model, x)
self.run_test(
model,
x,
input_names=["x"],
dynamic_axes={"x": [0, 1]},
test_with_inputs=[(x,), (y,)],
)
@skipIfUnsupportedMinOpsetVersion(11)
def test_symbolic_shape_inference_box_if(self):
# test If
class BoxIfModel(torch.nn.Module):
def forward(self, boxes, scores):
score_thresh = 0.0
inds = torch.where(scores > score_thresh)[0]
boxes_1 = boxes[inds]
if boxes_1.numel() > 3:
return boxes_1
else:
return boxes_1 * 2
model = BoxIfModel()
model.eval()
boxes = torch.ones(2, 4)
scores = torch.ones(1, 4)
self.run_test(model, (boxes, scores))
@skipIfUnsupportedMinOpsetVersion(11)
def test_symbolic_shape_inference_arange_2(self):
# test Range
class ArangeModel(torch.nn.Module):
def forward(self, start):
return torch.arange(start.size(0), 8.5, 1.5, dtype=torch.int64)
x = torch.randn(2, 3, 4)
self.run_test(
ArangeModel(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1, 2]}
)
self.run_test(ArangeModel(), (x,), remained_onnx_input_idx=[])
class ArangeModel2(torch.nn.Module):
def forward(self, start):
return torch.arange(start.size(0), 8.5, 1.5, dtype=torch.double)
x = torch.randn(2, 3, 4)
self.run_test(
ArangeModel2(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1, 2]}
)
self.run_test(ArangeModel2(), (x,), remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_symbolic_shape_inference_nonzero(self):
class OneLikeModel(torch.nn.Module):
def forward(self, x):
ones = torch.ones_like(
x,
dtype=torch.float,
layout=torch.strided,
device=torch.device("cpu"),
)
return torch.nonzero(ones)
x = torch.randn(2)
self.run_test(OneLikeModel(), x, input_names=["x"], dynamic_axes={"x": [0]})
self.run_test(OneLikeModel(), x, remained_onnx_input_idx=[])
x = torch.randn(2, 3, 4)
self.run_test(
OneLikeModel(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]}
)
self.run_test(OneLikeModel(), x, remained_onnx_input_idx=[])
class ZeroLikeModel(torch.nn.Module):
def forward(self, x):
zeros = torch.zeros_like(
x,
dtype=torch.float,
layout=torch.strided,
device=torch.device("cpu"),
)
return torch.nonzero(zeros)
x = torch.randn(2)
self.run_test(ZeroLikeModel(), x, input_names=["x"], dynamic_axes={"x": [0]})
self.run_test(ZeroLikeModel(), x, remained_onnx_input_idx=[])
x = torch.randn(2, 3, 4)
self.run_test(
ZeroLikeModel(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]}
)
self.run_test(ZeroLikeModel(), x, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_symbolic_shape_inference_expand_1(self):
class ExpandModel(torch.nn.Module):
def forward(self, x):
return x.expand(4, 6, 2)
x = torch.randn(6, 1, requires_grad=True)
self.run_test(ExpandModel(), (x,))
@skipIfUnsupportedMinOpsetVersion(9)
@skipScriptTest() # Test code not scriptable
def test_symbolic_shape_inference_expand_2(self):
class M(torch.nn.Module):
def forward(self, x):
input_shape = x.size()
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length)
causal_mask = (
seq_ids[None, None, :].repeat(batch_size, seq_length, 1)
<= seq_ids[None, :, None]
)
return causal_mask.transpose(0, 1)
x = torch.randn(3, 16)
self.run_test(M(), (x,), input_names=["x"], dynamic_axes={"x": [0, 1]})
self.run_test(M(), (x,), remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(10)
@skipScriptTest() # Test code not scriptable
def test_symbolic_shape_inference_slice(self):
class M(torch.nn.Module):
def forward(self, x, position_bias):
input_shape = x.size()
batch_size, seq_length = input_shape
position_bias = position_bias[:, :, -seq_length:, :]
return position_bias.transpose(0, 1)
x = torch.randn(3, 16)
position_bias = torch.randn(1, 3, 20, 8)
self.run_test(
M(),
(x, position_bias),
input_names=["x", "position_bias"],
dynamic_axes={"x": [0, 1], "position_bias": [0, 1, 2, 3]},
)
self.run_test(M(), (x, position_bias), remained_onnx_input_idx=[1])
def test_symbolic_shape_inference_slice_2(self):
class M(torch.nn.Module):
def forward(self, position_bias):
position_bias = position_bias[:, :, -2:, :]
return position_bias.transpose(0, 1)
position_bias = torch.randn(1, 3, 20, 8)
self.run_test(M(), (position_bias,))
@skipIfUnsupportedMinOpsetVersion(9)
@skipScriptTest()
def test_symbolic_shape_inference_time(self):
input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
h0 = torch.randn(1, BATCH_SIZE, RNN_HIDDEN_SIZE)
c0 = torch.randn(1, BATCH_SIZE, RNN_HIDDEN_SIZE)
model_lstm = torch.nn.LSTM(
RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False
)
self.run_test(
model_lstm,
(input, (h0, c0)),
input_names=["x", "y"],
dynamic_axes={"x": [0, 1]},
)
model_gru = torch.nn.GRU(
RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False, bias=False
)
self.run_test(
model_gru, (input, h0), input_names=["x", "y"], dynamic_axes={"x": [0, 1]}
)
model_rnn = torch.nn.RNN(
RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False, bias=False
)
self.run_test(
model_rnn, (input, h0), input_names=["x", "y"], dynamic_axes={"x": [0, 1]}
)
def test_symbolic_shape_inference_dynamic_axes(self):
class M(torch.nn.Module):
def forward(self, input_ids):
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
return input_ids.transpose(0, 1)
x = torch.randn(3, 16)
self.run_test(
M(),
(x,),
input_names=["input_ids"],
dynamic_axes={"input_ids": {0: "batch", 1: "sequence"}},
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_hann_window_periodic(self):
class HannWindowModule_Periodic(torch.nn.Module):
def __init__(self):
super(HannWindowModule_Periodic, self).__init__()
self.window_length = 0
def forward(self, x, window_length: int):
self.window_length = window_length
return torch.add(
x,
torch.hann_window(
self.window_length, periodic=True, dtype=torch.float
),
)
win_length = 100
x = torch.randn(win_length)
module = HannWindowModule_Periodic()
self.run_test(module, (x, win_length))
@skipIfUnsupportedMinOpsetVersion(9)
def test_hann_window_not_periodic(self):
class HannWindowModule_NotPeriodic(torch.nn.Module):
def __init__(self):
super(HannWindowModule_NotPeriodic, self).__init__()
self.window_length = 0
def forward(self, x, window_length: int):
self.window_length = window_length
return torch.add(
x,
torch.hann_window(
self.window_length, periodic=False, dtype=torch.float
),
)
win_length = 100
x = torch.randn(win_length)
module = HannWindowModule_NotPeriodic()
self.run_test(module, (x, win_length))
@skipIfUnsupportedMinOpsetVersion(9)
@skipScriptTest()
def test_hann_window_default_values(self):
class HannWindowModule(torch.nn.Module):
def __init__(self):
super(HannWindowModule, self).__init__()
self.window_length = 0
def forward(self, x, window_length: int):
import torch.nn.functional as F
self.window_length = window_length
return torch.add(x, F.relu(torch.hann_window(self.window_length)))
win_length = 100
x = torch.randn(win_length, dtype=torch.float)
module = HannWindowModule()
output = module(x, win_length)
self.run_test(module, (x, win_length))
@skipIfUnsupportedMinOpsetVersion(12)
@skipScriptTest()
def test_tensordot_dim_count(self):
class M(torch.nn.Module):
def forward(self, x, y):
output = torch.tensordot(x, y, 2)
return output
x = torch.randint(6, (7, 5, 3, 4))
y = torch.randint(6, (3, 4, 9, 2))
self.run_test(M(), (x, y))
@skipIfUnsupportedMinOpsetVersion(12)
def test_tensordot_dim_list(self):
class M(torch.nn.Module):
def forward(self, x, y):
output = torch.tensordot(x, y, ([1, -2, -1], [1, 0, 3]))
return output
x = torch.randint(6, (7, 4, 3, 5, 2))
y = torch.randint(6, (5, 4, 4, 2, 6))
self.run_test(M(), (x, y))
@skipIfUnsupportedMinOpsetVersion(12)
@skipScriptTest()
def test_tensordot_dynamic_dim(self):
class M(torch.nn.Module):
def forward(self, x, y):
output = torch.tensordot(x, y, 2)
return output
x = torch.randint(6, (7, 5, 3, 4))
y = torch.randint(6, (3, 4, 9, 2))
new_x = torch.randint(6, (8, 6, 2, 5))
new_y = torch.randint(6, (2, 5, 3, 4))
self.run_test(
M(),
(x, y),
test_with_inputs=[(new_x, new_y)],
input_names=["input_x", "input_y"],
dynamic_axes={"input_x": [0, 1, 2, 3], "input_y": [0, 1, 2, 3]},
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_to_device(self):
class M_ToDevice(torch.nn.Module):
def forward(self, x, y):
return x.to(y.device), y
class M_ToDeviceDtype(torch.nn.Module):
def forward(self, x, y):
return x.to(y.device, dtype=torch.long), y
x = torch.randn(6)
y = torch.randn(6)
self.run_test(M_ToDevice(), (x, y))
self.run_test(M_ToDeviceDtype(), (x, y))
@skipIfUnsupportedMinOpsetVersion(9)
@skipScriptTest()
def test_fill(self):
class FillModule(torch.nn.Module):
def forward(self, x, filled_value: int):
return x.fill_(filled_value)
x = torch.randn((4, 5, 6))
filled_value = 7
self.run_test(FillModule(), (x, filled_value))
class FillScalarModule(torch.nn.Module):
def forward(self, x):
res = x + 2
res.fill_(2.5)
return res, x
x = torch.ones(2, 3, 4, dtype=torch.long)
self.run_test(FillScalarModule(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_add_normal(self):
class M(torch.nn.Module):
def __init__(self, dim, index, updates):
super(M, self).__init__()
self.dim = dim
self.index = index
self.updates = updates
def forward(self, x):
x.index_add_(self.dim, self.index, self.updates)
return x
x = torch.ones(5, 4, 3)
updates = torch.tensor([[1], [4], [7], [3], [2]], dtype=torch.float)
index = torch.tensor([0, 2, 3, 1, 4])
self.run_test(M(0, index, updates), (x,))
updates = torch.tensor(
[[[1, 5, 7], [2, 4, 5], [5, 5, 6], [2, 3, 4]]], dtype=torch.float
)
index = torch.tensor([0, 2, 3, 1])
self.run_test(M(1, index, updates), (x,))
updates = torch.tensor(
[[[1, 2, 3], [4, 5, 6], [7, 8, 9], [2, 3, 4]]], dtype=torch.float
)
index = torch.tensor([0, 2, 1])
self.run_test(M(2, index, updates), (x,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_add_dim_size_differ(self):
class M(torch.nn.Module):
def __init__(self, dim, index, updates):
super(M, self).__init__()
self.dim = dim
self.index = index
self.updates = updates
def forward(self, x):
x.index_add_(self.dim, self.index, self.updates)
return x
x = torch.ones(5, 4, 3)
updates = torch.tensor([[[1, 5, 7], [2, 4, 5], [5, 5, 6]]], dtype=torch.float)
index = torch.tensor([0, 2, 1])
self.run_test(M(1, index, updates), (x,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_add_in_loop(self):
class M(torch.nn.Module):
def __init__(self, dim, index, updates, loop_count):
super(M, self).__init__()
self.dim = dim
self.index = index
self.updates = updates
self.loop_count = loop_count
def forward(self, x):
for i in range(self.loop_count):
x.index_add_(self.dim, self.index, self.updates)
return x
x = torch.ones(5, 4, 3)
updates = torch.tensor(
[[[1, 5, 7], [2, 4, 5], [5, 5, 6], [2, 3, 4]]], dtype=torch.float
)
index = torch.tensor([0, 2, 3, 1])
loop_count = torch.randint(20, (1,))[0].item()
self.run_test(M(1, index, updates, loop_count), (x,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_add_if(self):
class M(torch.nn.Module):
def __init__(self, dim, updates, index_true, index_false):
super(M, self).__init__()
self.dim = dim
self.updates = updates
self.index_true = index_true
self.index_false = index_false
def forward(self, x, cond):
if cond:
x.index_add_(self.dim, self.index_true, self.updates)
else:
x.index_add_(self.dim, self.index_false, self.updates)
return x
x = torch.ones(5, 4, 3)
updates = torch.tensor(
[[[1, 5, 7], [2, 4, 5], [5, 5, 6], [2, 3, 4]]], dtype=torch.float
)
index_true = torch.tensor([0, 2, 3, 1])
index_false = torch.tensor([1, 0, 2, 3])
cond = torch.tensor(1, dtype=torch.bool)
self.run_test(
torch.jit.script(M(1, updates, index_true, index_false)), (x, cond)
)
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_add_dynamic_axes(self):
class M(torch.nn.Module):
def __init__(self, dim, index, updates):
super(M, self).__init__()
self.dim = dim
self.index = index
self.updates = updates
def forward(self, x):
x.index_add_(self.dim, self.index, self.updates)
return x
x = torch.ones(5, 4, 3)
y = torch.ones(7, 8, 3)
updates = torch.tensor(
[[[1, 5, 7], [2, 4, 5], [5, 5, 6], [2, 3, 4]]], dtype=torch.float
)
index = torch.tensor([0, 2, 3, 1])
self.run_test(
M(1, index, updates),
(x,),
test_with_inputs=[y],
input_names=["input_1"],
dynamic_axes={"input_1": [0, 1]},
)
def test_roll(self):
class M(torch.nn.Module):
def __init__(self, shifts, dims):
super(M, self).__init__()
self.shifts = shifts
self.dims = dims
def forward(self, x):
return torch.roll(x, self.shifts, self.dims)
x = torch.randn(2, 3, 4)
self.run_test(M([1, 1], [1, 0]), (x,))
self.run_test(M([0, 1, 2], [1, 0, 2]), (x,))
self.run_test(M(2, 1), (x,))
self.run_test(M([-1, 3], [-2, -1]), (x,))
def test_sum(self):
class M(torch.nn.Module):
def forward(self, x):
return torch.sum(x)
x = torch.ones(12, 3)
self.run_test(M(), (x,), input_names=["x"], dynamic_axes={"x": [0]})
def test_sum_empty_tensor(self):
class M(torch.nn.Module):
def forward(self, x):
return x[0:0].sum(), x.sum()
x = torch.ones(12)
self.run_test(M(), (x,))
x = torch.ones(2, 0, 3)
self.run_test(M(), (x,))
x = torch.ones(0)
self.run_test(M(), (x,))
@skipIfUnsupportedMinOpsetVersion(11)
def test_broad_cast_tensors(self):
class M(torch.nn.Module):
def forward(self, x, y):
m = torch.broadcast_tensors(x, y)
return m
x = torch.randint(5, (1,))
y = torch.randint(5, (5,))
self.run_test(M(), (x, y))
x = torch.randint(5, (4, 2, 1, 4))
y = torch.randint(5, (2, 3, 1))
self.run_test(M(), (x, y))
x = torch.randn(2, 1, 4)
y = torch.randn(5, 2, 3, 1)
self.run_test(M(), (x, y))
@skipScriptTest()
@skipIfUnsupportedMinOpsetVersion(11)
def test_dist_normal(self):
class M(torch.nn.Module):
def forward(self, x, y):
return torch.distributions.Normal(x, y).sample().size(0), x, y
self.run_test(M(), (torch.tensor([0.0]), torch.tensor([[1.0], [2.0]])))
self.run_test(M(), (torch.tensor([0.0]), torch.tensor([1.0])))
self.run_test(
M(),
(
torch.tensor([[[0.0], [10.0]], [[2.0], [8.0]], [[2.0], [8.0]]]),
torch.tensor([[1.0], [3.0]]),
),
)
@skipScriptTest()
@skipIfUnsupportedMinOpsetVersion(11)
def test_dist_normal_correctness(self):
class M(torch.nn.Module):
def forward(self, x, y):
return torch.distributions.Normal(x, y).sample([20000])
expected_mean = 5.0
expected_std = 10.0
model_export = M()
dummy_input = (torch.tensor([expected_mean]), torch.tensor([expected_std]))
ort_sess = convert_to_onnx(
model_export,
input=dummy_input,
opset_version=self.opset_version,
training=torch.onnx.TrainingMode.EVAL,
)
ort_out = run_ort(ort_sess, inputs=dummy_input)
actual_std = np.std(ort_out)
actual_mean = np.mean(ort_out)
assert (
abs(abs(actual_mean) - expected_mean) <= expected_mean * 0.1
), "the gap of mean between ort outputs and expected one is unacceptable."
assert (
abs(abs(actual_std) - expected_std) <= expected_std * 0.1
), "the gap of variance between ort outputs and expected one is unacceptable."
@skipScriptTest()
@skipIfUnsupportedMinOpsetVersion(11)
def test_dist_uniform(self):
class M(torch.nn.Module):
def forward(self, x, y):
return torch.distributions.Uniform(x, y).sample().size(0), x, y
self.run_test(M(), (torch.tensor([0.0]), torch.tensor([10.0])))
self.run_test(M(), (torch.tensor([[0.0], [6.0]]), torch.tensor([[1.0], [7.0]])))
self.run_test(
M(), (torch.tensor([1.0]), torch.tensor([[10.0], [7.0], [9.0], [20.0]]))
)
@skipScriptTest()
@skipIfUnsupportedMinOpsetVersion(11)
def test_dist_uniform_correctness(self):
class M(torch.nn.Module):
def forward(self, x, y):
return torch.distributions.Uniform(x, y).sample([10000])
expected_min = 5.0
expected_max = 10.0
expected_mean = (expected_min + expected_max) / 2
model_export = M()
dummy_input = (torch.tensor([expected_min]), torch.tensor([expected_max]))
ort_sess = convert_to_onnx(
model_export,
input=dummy_input,
opset_version=self.opset_version,
training=torch.onnx.TrainingMode.EVAL,
)
ort_out = run_ort(ort_sess, inputs=dummy_input)
actual_min = np.min(ort_out)
actual_max = np.max(ort_out)
actual_mean = np.mean(ort_out)
assert (
actual_min >= expected_min
), "the minimum value of ort outputs is out of scope."
assert (
actual_max <= expected_max
), "the maximum value of ort outputs is out of scope."
assert (
abs(actual_mean - expected_mean) <= expected_mean * 0.05
), "the mean value of ort outputs is out of scope."
@skipIfUnsupportedMinOpsetVersion(13)
def test_sequence_to_int(self):
class M(torch.nn.Module):
def forward(self, x):
result = torch.tensor([2 for i in range(x.size()[0])], dtype=torch.int)
return x, result
x = torch.randn(10, 5)
self.run_test(M(), (x,))
@skipIfUnsupportedMinOpsetVersion(13)
def test_sequence_to_float(self):
class M(torch.nn.Module):
def forward(self, x):
result = torch.tensor(
[1.1 for i in range(x.size()[0])], dtype=torch.float
)
return x, result
x = torch.randn(10, 5)
self.run_test(M(), (x,))
@skipIfUnsupportedMinOpsetVersion(13)
def test_sequence_to_bool(self):
class M(torch.nn.Module):
def forward(self, x):
result = torch.tensor(
[False for i in range(x.size()[0])], dtype=torch.bool
)
return x, result
x = torch.randn(10, 5)
self.run_test(M(), (x,))
def test_onnx_checker_invalid_graph(self):
class CustomAddModule(torch.nn.Module):
def forward(self, x, y):
return torch.add(x, y)
def symbolic_custom_invalid_add(g, input, other, alpha=None):
return g.op("Add", input, other, invalid_attr_i=1)
register_custom_op_symbolic("::add", symbolic_custom_invalid_add, 1)
x = torch.randn(2, 3, 4)
y = torch.randn(2, 3, 4)
test_model = CustomAddModule()
f = io.BytesIO()
try:
with self.assertRaises(CheckerError) as cm:
torch.onnx.export(test_model, (x, y), f)
finally:
unregister_custom_op_symbolic("::add", 1)
self.assertTrue(f.getvalue(), "ONNX graph was not exported.")
loaded_model = onnx.load_from_string(f.getvalue())
def test_tuple_output_from_if_with_raised_exception(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, t: Tensor) -> Tuple[Tensor, Tensor]:
if float(t) < 0:
raise Exception("Negative input")
else:
return torch.zeros(5), torch.zeros(5)
x = torch.zeros(1)
self.run_test(torch.jit.script(M()), (x,))
def test_shape_value_map(self):
class RSoftMax(torch.nn.Module):
def __init__(self, radix, cardinality):
super().__init__()
self.radix = radix
self.cardinality = cardinality
def forward(self, x):
batch = x.size(0)
x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2)
x = F.softmax(x, dim=1)
x = x.reshape(batch, -1)
return x
radix = 2
cardinality = 1
x = torch.randn(10, 1, 128, 1)
f = io.BytesIO()
torch.onnx.export(
RSoftMax(radix, cardinality),
(x,),
f,
input_names=["x"],
dynamic_axes={"x": [0]},
)
loaded_model = onnx.load_from_string(f.getvalue())
self.assertEqual(
loaded_model.graph.output[0].type.tensor_type.shape.dim[1].dim_value, 128
)
# NOTE: For quantization tests, choose scale and zero point carefully
# such that inputs and outputs do not always overflow/underflow.
# Otherwise test results could be inaccurate.
@skipIfUnsupportedMinOpsetVersion(10)
def test_quantized_linear(self):
model = torch.nn.quantized.Linear(4, 8)
# Set fixed weight to avoid flaky test.
weight = torch.quantize_per_tensor(
torch.arange(32, dtype=torch.float).view(8, 4), 0.5, 0, torch.qint8
)
# Set non-zero bias.
bias = torch.arange(8, dtype=torch.float)
model.set_weight_bias(weight, bias)
# Set fixed input to avoid flaky test.
input = torch.randn(4, 4)
input = torch.arange(16, dtype=torch.float).view(4, 4) - 8
input_tensor = torch.quantize_per_tensor(input, 0.5, 128, torch.quint8)
self.run_test(model, input_tensor)
@skipIfUnsupportedMinOpsetVersion(10)
def test_quantized_conv2d(self):
model = torch.nn.quantized.Conv2d(16, 33, 3, stride=2)
# Manually initialize model weight and bias to random numbers.
# By default all zeros.
q_weight = torch.quantize_per_tensor(
torch.randn(33, 16, 3, 3), 0.5, 0, torch.qint8
)
bias = torch.arange(33).to(torch.float) - 16
model.set_weight_bias(q_weight, bias)
input = torch.randn(3, 16, 32, 32)
q_input = torch.quantize_per_tensor(input, 0.5, 128, torch.quint8)
self.run_test(model, q_input)
@skipIfUnsupportedMinOpsetVersion(10)
def test_quantized_adaptive_avg_pool2d(self):
model = torch.nn.AdaptiveAvgPool2d((5, 7))
input = torch.randn(4, 3, 10, 14)
q_input = torch.quantize_per_tensor(input, 0.2, 128, torch.quint8)
self.run_test(model, q_input)
@skipIfUnsupportedMinOpsetVersion(10)
def test_quantized_conv2d_relu(self):
model = torch.nn.intrinsic.quantized.ConvReLU2d(16, 33, 3, stride=2)
# Manually initialize model weight and bias to random numbers.
# By default all zeros.
q_weight = torch.quantize_per_tensor(
torch.randn(33, 16, 3, 3), 0.5, 0, torch.qint8
)
bias = torch.arange(33).to(torch.float) - 16
model.set_weight_bias(q_weight, bias)
input = torch.randn(3, 16, 32, 32)
q_input = torch.quantize_per_tensor(input, 0.5, 128, torch.quint8)
self.run_test(model, q_input)
@skipIfUnsupportedMinOpsetVersion(10)
def test_quantized_hardswish(self):
model = torch.nn.quantized.Hardswish(1.0, 0)
input = torch.randn(2, 6)
q_input = torch.quantize_per_tensor(input, 0.26, 128, torch.quint8)
self.run_test(model, q_input)
@skipIfUnsupportedMinOpsetVersion(10)
def test_quantized_hardsigmoid(self):
model = torch.nn.Hardsigmoid()
input = torch.randn(2, 6)
q_input = torch.quantize_per_tensor(input, 0.26, 128, torch.quint8)
self.run_test(model, q_input)
@skipIfUnsupportedMinOpsetVersion(10)
def test_quantized_flatten(self):
class FlattenModel(torch.nn.Module):
def forward(self, input):
return torch.flatten(input)
x = torch.quantize_per_tensor(torch.randn(1, 2, 3, 4), 1, 0, torch.quint8)
self.run_test(FlattenModel(), x)
@skipIfUnsupportedMinOpsetVersion(10)
@skipScriptTest() # torch.jit.frontend.FrontendError: Cannot instantiate class 'QFunctional' in a script function:
def test_quantized_arithmetic_qfunctional(self):
x = torch.quantize_per_tensor(torch.randn(3, 4), 0.2, 128, torch.quint8)
y = torch.quantize_per_tensor(torch.randn(3, 4), 0.2, 128, torch.quint8)
class ArithmeticModel(torch.nn.Module):
def forward(self, x, y):
o = torch.nn.quantized.QFunctional().add(x, y)
o = torch.nn.quantized.QFunctional().mul(o, x)
return o
self.run_test(ArithmeticModel(), (x, y))
@skipIfUnsupportedMinOpsetVersion(10)
def test_quantized_arithmetic(self):
x = torch.quantize_per_tensor(torch.randn(3, 4), 0.2, 128, torch.quint8)
y = torch.quantize_per_tensor(torch.randn(3, 4), 0.2, 128, torch.quint8)
class ArithmeticModel2(torch.nn.Module):
def forward(self, x, y):
o = torch.ops.quantized.add(x, y, 0.4, 100)
o = torch.ops.quantized.mul(o, x, 0.4, 100)
return o
self.run_test(ArithmeticModel2(), (x, y))
@skipIfUnsupportedMinOpsetVersion(10)
def test_quantize_per_tensor(self):
class Module(torch.nn.Module):
def forward(self, x):
return (
torch.quantize_per_tensor(x, 0.2, 0, torch.qint8),
torch.quantize_per_tensor(x, 0.2, 128, torch.quint8),
)
x = torch.randn(4, 6)
self.run_test(Module(), x)
@skipIfUnsupportedMinOpsetVersion(10)
def test_dequantize(self):
class Module(torch.nn.Module):
def forward(self, x):
return torch.dequantize(x)
x = torch.quantize_per_tensor(torch.randn(3, 4), 0.2, 0, torch.qint8)
self.run_test(Module(), x)
@skipIfUnsupportedMinOpsetVersion(13)
def test_qat_linear_per_channel(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = torch.quantization.QuantStub()
self.linear = torch.nn.Linear(4, 3)
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.linear(x)
x = self.dequant(x)
return x
model = M()
model.qconfig = torch.quantization.get_default_qconfig("fbgemm")
model = torch.quantization.prepare_qat(model)
# Set fixed weight and bias to avoid flaky test.
model.linear.weight = torch.nn.Parameter(
_construct_tensor_for_quantization_test((3, 4))
)
model.linear.bias = torch.nn.Parameter(torch.arange(3, dtype=torch.float))
model = torch.quantization.convert(model)
# Set fixed input to avoid flaky test.
input = _construct_tensor_for_quantization_test((4, 4), offset=-8)
self.run_test(model, input)
@skipIfUnsupportedMinOpsetVersion(13)
def test_qat_relu(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = torch.quantization.QuantStub()
self.relu = torch.nn.ReLU()
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.relu(x)
x = self.dequant(x)
return x
model = M()
model.qconfig = torch.quantization.get_default_qconfig("fbgemm")
model = torch.quantization.prepare_qat(model)
model = torch.quantization.convert(model)
input = torch.randn(8, 4)
self.run_test(model, input)
@skipIfUnsupportedMinOpsetVersion(13)
def test_qat_conv2d(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = torch.quantization.QuantStub()
self.conv = torch.nn.Conv2d(2, 4, 3, stride=2)
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.dequant(x)
return x
model = M()
model.qconfig = torch.quantization.get_default_qconfig("fbgemm")
model = torch.quantization.prepare_qat(model)
# Set fixed weight and bias to avoid flaky test.
model.conv.weight = torch.nn.Parameter(
_construct_tensor_for_quantization_test((2, 4, 3, 3), max_val=2)
)
model.conv.bias = torch.nn.Parameter(torch.tensor([0.0, 1.0]))
model = torch.quantization.convert(model)
# Set fixed input to avoid flaky test.
input = _construct_tensor_for_quantization_test(
(3, 4, 8, 8), offset=-384, max_val=12
)
self.run_test(model, input)
@skipIfUnsupportedMinOpsetVersion(13)
def test_qat_conv2d_relu(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = torch.quantization.QuantStub()
self.conv = torch.nn.Conv2d(2, 4, 3, stride=2)
self.relu = torch.nn.ReLU()
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.relu(x)
x = self.dequant(x)
return x
model = M()
model.qconfig = torch.quantization.get_default_qconfig("fbgemm")
model = torch.quantization.prepare_qat(model)
# Set fixed weight and bias to avoid flaky test.
model.conv.weight = torch.nn.Parameter(
_construct_tensor_for_quantization_test((2, 4, 3, 3), max_val=2)
)
model.conv.bias = torch.nn.Parameter(torch.tensor([0.0, 1.0]))
model = torch.quantization.convert(model)
# Set fixed input to avoid flaky test.
input = _construct_tensor_for_quantization_test(
(3, 4, 8, 8), offset=-384, max_val=12
)
self.run_test(model, input)
@skipIfUnsupportedMinOpsetVersion(13)
def test_qat_conv2d_relu_fused(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = torch.quantization.QuantStub()
self.conv = torch.nn.Conv2d(2, 4, 3, stride=2)
self.relu = torch.nn.ReLU()
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.relu(x)
x = self.dequant(x)
return x
model = M()
model.qconfig = torch.quantization.get_default_qconfig("fbgemm")
model = torch.quantization.fuse_modules(model.eval(), [["conv", "relu"]])
model = torch.quantization.prepare_qat(model.train())
# Set fixed weight and bias to avoid flaky test.
model.conv.weight = torch.nn.Parameter(
_construct_tensor_for_quantization_test((2, 4, 3, 3), max_val=2)
)
model.conv.bias = torch.nn.Parameter(torch.tensor([0.0, 1.0]))
model = torch.quantization.convert(model)
# Set fixed input to avoid flaky test.
input = _construct_tensor_for_quantization_test(
(3, 4, 8, 8), offset=-384, max_val=12
)
self.run_test(model, input)
@skipIfUnsupportedMinOpsetVersion(10)
def test_qat_maxpool2d(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = torch.quantization.QuantStub()
self.pool = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.pool(x)
x = self.dequant(x)
return x
model = M()
model.qconfig = torch.quantization.get_default_qconfig("fbgemm")
model = torch.quantization.prepare_qat(model.train())
model = torch.quantization.convert(model)
# Set fixed input to avoid flaky test.
input = _construct_tensor_for_quantization_test((4, 4, 3, 2))
self.run_test(model, input)
@skipIfUnsupportedMinOpsetVersion(9)
def test_convolution_allow_tf32(self):
class Module(torch.nn.Module):
def __init__(self, allow_tf32):
super().__init__()
self.allow_tf32 = allow_tf32
weight = torch.rand(32, 3, 3, 3)
self.weight = torch.nn.Parameter(weight)
def forward(self, x):
if self.allow_tf32:
return torch._convolution(
x,
self.weight,
None,
[2, 2],
[0, 0],
[1, 1],
False,
[0, 0],
1,
False,
False,
True,
True,
)
else:
return torch._convolution(
x,
self.weight,
None,
[2, 2],
[0, 0],
[1, 1],
False,
[0, 0],
1,
False,
False,
True,
)
x = torch.randn(1, 3, 224, 224)
self.run_test(Module(False), x, rtol=1e-3, atol=1e-6)
self.run_test(Module(True), x, rtol=1e-3, atol=1e-6)
@skipIfUnsupportedMinOpsetVersion(16)
def test_grid_sample(self):
n, c, h_in, w_in, h_out, w_out = 1, 1, 3, 2, 2, 4
class GridSampleModule(torch.nn.Module):
def __init__(self, mode, padding_mode, align_corners) -> None:
super().__init__()
self.mode, self.padding_mode, self.align_corners = (
mode,
padding_mode,
align_corners,
)
def forward(self, input, grid):
return torch.nn.functional.grid_sample(
input, grid, self.mode, self.padding_mode, self.align_corners
)
for mode, padding_mode, align_corners in itertools.product(
("bilinear", "nearest", "bicubic"),
("zeros", "border", "reflection"),
(True, False),
):
atol_rtol = {}
if (mode, padding_mode) == ("bicubic", "border"):
if align_corners:
atol_rtol.update({"atol": 0.3, "rtol": 0.4})
else:
atol_rtol.update({"atol": 0.02, "rtol": 0.02})
input, grid = torch.randn(n, c, h_in, w_in), torch.randn(n, h_out, w_out, 2)
self.run_test(
GridSampleModule(mode, padding_mode, align_corners),
(input, grid),
**atol_rtol,
)
def make_test(
name,
base,
layer,
bidirectional,
initial_state,
variable_length,
dropout,
script_test_min_opset_version,
**extra_kwargs,
):
test_name = str(
"_".join(
[
"test",
name,
layer[1],
bidirectional[1],
initial_state[1],
variable_length[1],
dropout[1],
]
)
)
# Cannot export with older opsets because of "ConstantFill" op
# ConstantFill was a temp op removed at opset 8. This is no longer supported by onnxruntime
# There are still some issues prevent us from enabling script test for these scenarios:
# test_gru_*:
# Operator aten::as_tensor is not supported by exporter yet.
# - https://msdata.visualstudio.com/Vienna/_workitems/edit/1055382
# Operator aten::_pack_padded_sequence is not supported by exporter yet.
# - https://msdata.visualstudio.com/Vienna/_workitems/edit/1055384
@skipScriptTest()
@skipIfUnsupportedMinOpsetVersion(9)
def f(self):
self.is_script_test_enabled = (
self.opset_version >= script_test_min_opset_version
)
self._dispatch_rnn_test(
base,
layers=layer[0],
bidirectional=bidirectional[0],
initial_state=initial_state[0],
packed_sequence=variable_length[0],
dropout=dropout[0],
**extra_kwargs,
)
f.__name__ = test_name
setattr(_TestONNXRuntime, f.__name__, f)
def setup_rnn_tests():
layers_opts = [(1, "unilayer"), (3, "trilayer")]
bidirectional_opts = [(False, "forward"), (True, "bidirectional")]
initial_state_opts = [(True, "with_initial_state"), (False, "no_initial_state")]
variable_length_opts = [
(0, "without_sequence_lengths"),
(1, "with_variable_length_sequences"),
(2, "with_batch_first_sequence_lengths"),
]
dropout_opts = [(0.2, "with_dropout"), (0.0, "without_dropout")]
test_count = 0
for (
layer,
bidirectional,
initial_state,
variable_length,
dropout,
) in itertools.product(
layers_opts,
bidirectional_opts,
initial_state_opts,
variable_length_opts,
dropout_opts,
):
for base, name, extra_kwargs in (
("elman", "elman_relu", {"nonlinearity": "relu"}),
("elman", "elman_tanh", {"nonlinearity": "tanh"}),
("lstm", "lstm", {}),
("gru", "gru", {}),
):
# Need Add between list of tensors
script_test_min_opset_version = 11
if ( # compiling in script mode fails with errors like:
# torch.jit.frontend.UnsupportedNodeError: annotated assignments
# without assigned value aren't supported
# https://msdata.visualstudio.com/Vienna/_workitems/edit/1160723
base == "elman"
or
# compiling in script mode fails with errors like:
# RuntimeError: Arguments for call are not valid.
# https://msdata.visualstudio.com/Vienna/_workitems/edit/1160723
base == "lstm"
):
script_test_min_opset_version = float("inf")
make_test(
name,
base,
layer,
bidirectional,
initial_state,
variable_length,
dropout,
script_test_min_opset_version,
**extra_kwargs,
)
test_count += 1
# sanity check that a representative example does exist
_TestONNXRuntime.test_gru_trilayer_forward_with_initial_state_without_sequence_lengths_with_dropout
# make sure no one accidentally disables all the tests without
# noticing
if test_count != 192:
raise ValueError("Expected 192 tests but found {}".format(test_count))
setup_rnn_tests()
def MakeTestCase(opset_version: int, keep_initializers_as_inputs: bool = True) -> type:
name = f"TestONNXRuntime_opset{opset_version}"
if not keep_initializers_as_inputs:
name += "_IRv4"
return type(
str(name),
(unittest.TestCase,),
dict(
_TestONNXRuntime.__dict__,
opset_version=opset_version,
keep_initializers_as_inputs=keep_initializers_as_inputs,
),
)
TestONNXRuntime_opset7 = MakeTestCase(7)
TestONNXRuntime_opset8 = MakeTestCase(8)
TestONNXRuntime_opset9 = MakeTestCase(9)
TestONNXRuntime_opset9_IRv4 = MakeTestCase(9, keep_initializers_as_inputs=False)
TestONNXRuntime_opset10 = MakeTestCase(10)
TestONNXRuntime_opset10_IRv4 = MakeTestCase(10, keep_initializers_as_inputs=False)
TestONNXRuntime_opset11 = MakeTestCase(11)
TestONNXRuntime_opset11_IRv4 = MakeTestCase(11, keep_initializers_as_inputs=False)
TestONNXRuntime_opset12 = MakeTestCase(12)
TestONNXRuntime_opset12_IRv4 = MakeTestCase(12, keep_initializers_as_inputs=False)
TestONNXRuntime_opset13 = MakeTestCase(13, keep_initializers_as_inputs=False)
TestONNXRuntime_opset14 = MakeTestCase(14, keep_initializers_as_inputs=False)
TestONNXRuntime_opset15 = MakeTestCase(15, keep_initializers_as_inputs=False)
TestONNXRuntime_opset16 = MakeTestCase(16, keep_initializers_as_inputs=False)
if __name__ == "__main__":
unittest.main()
| 35.249981
| 120
| 0.540454
|
d391cc4647e3e439d312c3407ed3fdc964c5c6fd
| 3,481
|
py
|
Python
|
cli/plot1D_evolution.py
|
hannes-holey/hans
|
9604eedd70d54f3d4e2058fbc5b911e92e005e4f
|
[
"MIT"
] | 1
|
2022-02-03T09:31:24.000Z
|
2022-02-03T09:31:24.000Z
|
cli/plot1D_evolution.py
|
hannes-holey/hans
|
9604eedd70d54f3d4e2058fbc5b911e92e005e4f
|
[
"MIT"
] | 6
|
2022-02-03T09:24:24.000Z
|
2022-02-07T09:25:16.000Z
|
cli/plot1D_evolution.py
|
hannes-holey/hans
|
9604eedd70d54f3d4e2058fbc5b911e92e005e4f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
MIT License
Copyright 2021 Hannes Holey
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from argparse import ArgumentParser
import matplotlib.pyplot as plt
from hans.plottools import DatasetSelector
def get_parser():
parser = ArgumentParser()
parser.add_argument('-p', dest="path", default="data", help="path (default: data)")
parser.add_argument('-v', dest="key", default=None, choices=[None, "rho", "p", "jx", "jy"], help="variable (default: None)")
parser.add_argument('-d', dest="dir", default="x", choices=["x", "y"], help="cutting direction (default: x)")
parser.add_argument('-f', dest="freq", type=int, default=1, help="plot frequency (default: 1)")
return parser
if __name__ == "__main__":
ylabels = {"rho": r"Density $\rho$",
"p": r"Pressure $p$",
"jx": r"Momentum density $j_x$",
"jy": r"Momentum denisty $j_y$"}
cmap = plt.cm.coolwarm
parser = get_parser()
args = parser.parse_args()
files = DatasetSelector(args.path, mode="single")
if args.key is None:
data = files.get_centerlines(freq=args.freq, dir=args.dir)
fig, ax = plt.subplots(2, 2, sharex=True, figsize=(6.4, 4.8))
for fn, fdata in data.items():
ax[1, 0].set_xlabel(rf"Distance ${args.dir}$")
ax[1, 1].set_xlabel(rf"Distance ${args.dir}$")
maxT = max(fdata["rho"])
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=0, vmax=maxT))
for (key, tdict), axis in zip(fdata.items(), ax.flat):
for time, (xdata, ydata) in tdict.items():
axis.plot(xdata, ydata, color=cmap(time/maxT))
axis.set_ylabel(ylabels[key])
fig.colorbar(sm, ax=ax.ravel().tolist(), label='time $t$', extend='max')
else:
data = files.get_centerlines(key=args.key, freq=args.freq, dir=args.dir)
fig, ax = plt.subplots(1, figsize=(6.4, 4.8), tight_layout=True)
for fn, fdata in data.items():
maxT = max(fdata[args.key])
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=0, vmax=maxT))
for time, (xdata, ydata) in fdata[args.key].items():
ax.plot(xdata, ydata, color=cmap(time/maxT))
ax.set_ylabel(ylabels[args.key])
ax.set_xlabel(rf"Distance ${args.dir}$")
fig.colorbar(sm, ax=ax, label='time $t$', extend='max')
plt.show()
| 38.252747
| 128
| 0.652399
|
eed46449f4612bdd29e734bb8244717c03115bf6
| 2,386
|
py
|
Python
|
autograd_optim_curve.py
|
shizuo-kaji/demo_autograd
|
ad3de6ec5c30780eef985f360c24005331642533
|
[
"MIT"
] | 1
|
2020-03-08T23:53:30.000Z
|
2020-03-08T23:53:30.000Z
|
autograd_optim_curve.py
|
shizuo-kaji/demo_autograd
|
ad3de6ec5c30780eef985f360c24005331642533
|
[
"MIT"
] | null | null | null |
autograd_optim_curve.py
|
shizuo-kaji/demo_autograd
|
ad3de6ec5c30780eef985f360c24005331642533
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
## autograd+scipy demo for optimising curves
#%%
import autograd.numpy as np
from autograd import grad, jacobian, hessian
from scipy.optimize import minimize,NonlinearConstraint,LinearConstraint
import matplotlib.pyplot as plt
#%% squared sum of the segment length
def total_length(X):
x=np.reshape(X,(-1,2))
t=x[1:]-x[:-1]
return np.sum(t*t)
# cos of adjacent tangents
def curvature(X):
x=np.reshape(X,(-1,2))
t=x[1:]-x[:-1]
d2=np.sum(t*t, axis=1)
K = 1-((t[1:,0]*t[:-1,0]+t[1:,1]*t[:-1,1])/np.sqrt(d2[1:]*d2[:-1]))
return K
def squared_variation(u):
return np.sum( (u[1:]-u[:-1])**2 )
# fix coords of end points, and y[N//3] > 1 and y[2N//3] < -1
def constraints(X):
x=np.reshape(X,(-1,2))
return np.array([x[0,0],x[0,1],x[N//3,1],x[2*N//3,1],x[N,0],x[N,1]])
def linear_combination_of_hessians(fun, argnum=0, *args, **kwargs):
functionhessian = hessian(fun, argnum, *args, **kwargs)
#not using wrap_nary_f because we need to do the docstring on our own
def linear_combination_of_hessians(*funargs, **funkwargs):
return np.tensordot(functionhessian(*funargs[:-1], **funkwargs), funargs[-1], axes=(0, 0))
return linear_combination_of_hessians
#%% setup
N=9 # number of segments
weight = 0.1 # try setting = 0.0
# target is the weighted sum of total length and curvature
target = lambda x: total_length(x) + weight*np.sum(curvature(x)**2)
# initial point
x0 = np.zeros((N+1,2))
x0[:,0] = np.linspace(0,1,N+1)
x0[1:-1,1] = np.random.uniform(-0.5,0.5,N-1)
X0 = x0.ravel()
# jacobian and hessian by autograd
jaco = jacobian(target)
hess = hessian(target)
constraints_hess = linear_combination_of_hessians(constraints)
constraints_jac = jacobian(constraints)
# non-linear inequality constraints
hard_constraint = NonlinearConstraint(constraints, [0,0,1,-np.inf,1,0],[0,0,np.inf,-1,1,0], jac=constraints_jac, hess=constraints_hess)
#%% optimise!
res = minimize(target, X0, method = 'trust-constr',
options={'xtol': 1e-10, 'gtol': 1e-8, 'disp': True, 'verbose': 1}, jac = jaco, hess=hess, constraints=[hard_constraint])
#%% plot result
plt.plot(x0[:,0],x0[:,1])
print("initial (blue):", target(x0),total_length(x0),np.sum(curvature(x0)**2))
x=np.reshape(res.x,(-1,2))
plt.plot(x[:,0],x[:,1])
print("optimised (orange):", target(x),total_length(x),np.sum(curvature(x)**2))
# %%
| 32.243243
| 135
| 0.668483
|
f79ac1842e56dc34e7c1e23eef7124c614ccbc9a
| 17,586
|
py
|
Python
|
examples/pytorch/graphsage/experimental/train_dist_unsupervised.py
|
yzh119/dgl
|
6a7c1eb2323383739585259c70c8b9065ca95d1e
|
[
"Apache-2.0"
] | 2
|
2020-07-24T19:26:51.000Z
|
2021-08-21T21:04:11.000Z
|
examples/pytorch/graphsage/experimental/train_dist_unsupervised.py
|
yzh119/dgl
|
6a7c1eb2323383739585259c70c8b9065ca95d1e
|
[
"Apache-2.0"
] | null | null | null |
examples/pytorch/graphsage/experimental/train_dist_unsupervised.py
|
yzh119/dgl
|
6a7c1eb2323383739585259c70c8b9065ca95d1e
|
[
"Apache-2.0"
] | null | null | null |
import os
os.environ['DGLBACKEND']='pytorch'
from multiprocessing import Process
import argparse, time, math
import numpy as np
from functools import wraps
import tqdm
import sklearn.linear_model as lm
import sklearn.metrics as skm
import dgl
from dgl import DGLGraph
from dgl.data import register_data_args, load_data
from dgl.data.utils import load_graphs
import dgl.function as fn
import dgl.nn.pytorch as dglnn
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.multiprocessing as mp
from torch.utils.data import DataLoader
#from pyinstrument import Profiler
from train_sampling import SAGE
class NegativeSampler(object):
def __init__(self, g, neg_nseeds):
self.neg_nseeds = neg_nseeds
def __call__(self, num_samples):
# select local neg nodes as seeds
return self.neg_nseeds[th.randint(self.neg_nseeds.shape[0], (num_samples,))]
class NeighborSampler(object):
def __init__(self, g, fanouts, neg_nseeds, sample_neighbors, num_negs, remove_edge):
self.g = g
self.fanouts = fanouts
self.sample_neighbors = sample_neighbors
self.neg_sampler = NegativeSampler(g, neg_nseeds)
self.num_negs = num_negs
self.remove_edge = remove_edge
def sample_blocks(self, seed_edges):
n_edges = len(seed_edges)
seed_edges = th.LongTensor(np.asarray(seed_edges))
heads, tails = self.g.find_edges(seed_edges)
neg_tails = self.neg_sampler(self.num_negs * n_edges)
neg_heads = heads.view(-1, 1).expand(n_edges, self.num_negs).flatten()
# Maintain the correspondence between heads, tails and negative tails as two
# graphs.
# pos_graph contains the correspondence between each head and its positive tail.
# neg_graph contains the correspondence between each head and its negative tails.
# Both pos_graph and neg_graph are first constructed with the same node space as
# the original graph. Then they are compacted together with dgl.compact_graphs.
pos_graph = dgl.graph((heads, tails), num_nodes=self.g.number_of_nodes())
neg_graph = dgl.graph((neg_heads, neg_tails), num_nodes=self.g.number_of_nodes())
pos_graph, neg_graph = dgl.compact_graphs([pos_graph, neg_graph])
seeds = pos_graph.ndata[dgl.NID]
blocks = []
for fanout in self.fanouts:
# For each seed node, sample ``fanout`` neighbors.
frontier = self.sample_neighbors(self.g, seeds, fanout, replace=True)
if self.remove_edge:
# Remove all edges between heads and tails, as well as heads and neg_tails.
_, _, edge_ids = frontier.edge_ids(
th.cat([heads, tails, neg_heads, neg_tails]),
th.cat([tails, heads, neg_tails, neg_heads]),
return_uv=True)
frontier = dgl.remove_edges(frontier, edge_ids)
# Then we compact the frontier into a bipartite graph for message passing.
block = dgl.to_block(frontier, seeds)
# Obtain the seed nodes for next layer.
seeds = block.srcdata[dgl.NID]
blocks.insert(0, block)
# Pre-generate CSR format that it can be used in training directly
return pos_graph, neg_graph, blocks
class PosNeighborSampler(object):
def __init__(self, g, fanouts, sample_neighbors):
self.g = g
self.fanouts = fanouts
self.sample_neighbors = sample_neighbors
def sample_blocks(self, seeds):
seeds = th.LongTensor(np.asarray(seeds))
blocks = []
for fanout in self.fanouts:
# For each seed node, sample ``fanout`` neighbors.
frontier = self.sample_neighbors(self.g, seeds, fanout, replace=True)
# Then we compact the frontier into a bipartite graph for message passing.
block = dgl.to_block(frontier, seeds)
# Obtain the seed nodes for next layer.
seeds = block.srcdata[dgl.NID]
blocks.insert(0, block)
return blocks
class DistSAGE(SAGE):
def __init__(self, in_feats, n_hidden, n_classes, n_layers,
activation, dropout):
super(DistSAGE, self).__init__(in_feats, n_hidden, n_classes, n_layers,
activation, dropout)
def inference(self, g, x, batch_size, device):
"""
Inference with the GraphSAGE model on full neighbors (i.e. without neighbor sampling).
g : the entire graph.
x : the input of entire node set.
The inference code is written in a fashion that it could handle any number of nodes and
layers.
"""
# During inference with sampling, multi-layer blocks are very inefficient because
# lots of computations in the first few layers are repeated.
# Therefore, we compute the representation of all nodes layer by layer. The nodes
# on each layer are of course splitted in batches.
# TODO: can we standardize this?
nodes = dgl.distributed.node_split(np.arange(g.number_of_nodes()),
g.get_partition_book(), force_even=True)
y = dgl.distributed.DistTensor(g, (g.number_of_nodes(), self.n_hidden), th.float32, 'h',
persistent=True)
for l, layer in enumerate(self.layers):
if l == len(self.layers) - 1:
y = dgl.distributed.DistTensor(g, (g.number_of_nodes(), self.n_classes),
th.float32, 'h_last', persistent=True)
sampler = PosNeighborSampler(g, [-1], dgl.distributed.sample_neighbors)
print('|V|={}, eval batch size: {}'.format(g.number_of_nodes(), batch_size))
# Create PyTorch DataLoader for constructing blocks
dataloader = DataLoader(
dataset=nodes,
batch_size=batch_size,
collate_fn=sampler.sample_blocks,
shuffle=False,
drop_last=False,
num_workers=args.num_workers)
for blocks in tqdm.tqdm(dataloader):
block = blocks[0]
input_nodes = block.srcdata[dgl.NID]
output_nodes = block.dstdata[dgl.NID]
h = x[input_nodes].to(device)
h_dst = h[:block.number_of_dst_nodes()]
h = layer(block, (h, h_dst))
if l != len(self.layers) - 1:
h = self.activation(h)
h = self.dropout(h)
y[output_nodes] = h.cpu()
x = y
g.barrier()
return y
def load_subtensor(g, input_nodes, device):
"""
Copys features and labels of a set of nodes onto GPU.
"""
batch_inputs = g.ndata['features'][input_nodes].to(device)
return batch_inputs
class CrossEntropyLoss(nn.Module):
def forward(self, block_outputs, pos_graph, neg_graph):
with pos_graph.local_scope():
pos_graph.ndata['h'] = block_outputs
pos_graph.apply_edges(fn.u_dot_v('h', 'h', 'score'))
pos_score = pos_graph.edata['score']
with neg_graph.local_scope():
neg_graph.ndata['h'] = block_outputs
neg_graph.apply_edges(fn.u_dot_v('h', 'h', 'score'))
neg_score = neg_graph.edata['score']
score = th.cat([pos_score, neg_score])
label = th.cat([th.ones_like(pos_score), th.zeros_like(neg_score)]).long()
loss = F.binary_cross_entropy_with_logits(score, label.float())
return loss
def generate_emb(model, g, inputs, batch_size, device):
"""
Generate embeddings for each node
g : The entire graph.
inputs : The features of all the nodes.
batch_size : Number of nodes to compute at the same time.
device : The GPU device to evaluate on.
"""
model.eval()
with th.no_grad():
pred = model.inference(g, inputs, batch_size, device)
return pred
def compute_acc(emb, labels, train_nids, val_nids, test_nids):
"""
Compute the accuracy of prediction given the labels.
We will fist train a LogisticRegression model using the trained embeddings,
the training set, validation set and test set is provided as the arguments.
The final result is predicted by the lr model.
emb: The pretrained embeddings
labels: The ground truth
train_nids: The training set node ids
val_nids: The validation set node ids
test_nids: The test set node ids
"""
emb = emb[np.arange(labels.shape[0])].cpu().numpy()
train_nids = train_nids.cpu().numpy()
val_nids = val_nids.cpu().numpy()
test_nids = test_nids.cpu().numpy()
labels = labels.cpu().numpy()
emb = (emb - emb.mean(0, keepdims=True)) / emb.std(0, keepdims=True)
lr = lm.LogisticRegression(multi_class='multinomial', max_iter=10000)
lr.fit(emb[train_nids], labels[train_nids])
pred = lr.predict(emb)
eval_acc = skm.accuracy_score(labels[val_nids], pred[val_nids])
test_acc = skm.accuracy_score(labels[test_nids], pred[test_nids])
return eval_acc, test_acc
def run(args, device, data):
# Unpack data
train_eids, train_nids, in_feats, g, global_train_nid, global_valid_nid, global_test_nid, labels = data
# Create sampler
sampler = NeighborSampler(g, [int(fanout) for fanout in args.fan_out.split(',')], train_nids,
dgl.distributed.sample_neighbors, args.num_negs, args.remove_edge)
# Create PyTorch DataLoader for constructing blocks
dataloader = DataLoader(
dataset=train_eids.numpy(),
batch_size=args.batch_size,
collate_fn=sampler.sample_blocks,
shuffle=True,
drop_last=False,
num_workers=args.num_workers)
# Define model and optimizer
model = DistSAGE(in_feats, args.num_hidden, args.num_hidden, args.num_layers, F.relu, args.dropout)
model = model.to(device)
if not args.standalone:
model = th.nn.parallel.DistributedDataParallel(model)
loss_fcn = CrossEntropyLoss()
loss_fcn = loss_fcn.to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
# Training loop
#profiler = Profiler()
#profiler.start()
epoch = 0
for epoch in range(args.num_epochs):
sample_time = 0
copy_time = 0
forward_time = 0
backward_time = 0
update_time = 0
num_seeds = 0
num_inputs = 0
step_time = []
iter_t = []
sample_t = []
feat_copy_t = []
forward_t = []
backward_t = []
update_t = []
iter_tput = []
start = time.time()
# Loop over the dataloader to sample the computation dependency graph as a list of
# blocks.
for step, (pos_graph, neg_graph, blocks) in enumerate(dataloader):
tic_step = time.time()
sample_t.append(tic_step - start)
# The nodes for input lies at the LHS side of the first block.
# The nodes for output lies at the RHS side of the last block.
input_nodes = blocks[0].srcdata[dgl.NID]
# Load the input features as well as output labels
batch_inputs = load_subtensor(g, input_nodes, device)
copy_time = time.time()
feat_copy_t.append(copy_time - tic_step)
# Compute loss and prediction
batch_pred = model(blocks, batch_inputs)
loss = loss_fcn(batch_pred, pos_graph, neg_graph)
forward_end = time.time()
optimizer.zero_grad()
loss.backward()
compute_end = time.time()
forward_t.append(forward_end - copy_time)
backward_t.append(compute_end - forward_end)
# Aggregate gradients in multiple nodes.
optimizer.step()
update_t.append(time.time() - compute_end)
pos_edges = pos_graph.number_of_edges()
neg_edges = neg_graph.number_of_edges()
step_t = time.time() - start
step_time.append(step_t)
iter_tput.append(pos_edges / step_t)
num_seeds += pos_edges
if step % args.log_every == 0:
print('[{}] Epoch {:05d} | Step {:05d} | Loss {:.4f} | Speed (samples/sec) {:.4f} | time {:.3f} s' \
'| sample {:.3f} | copy {:.3f} | forward {:.3f} | backward {:.3f} | update {:.3f}'.format(
g.rank(), epoch, step, loss.item(), np.mean(iter_tput[3:]), np.sum(step_time[-args.log_every:]),
np.sum(sample_t[-args.log_every:]), np.sum(feat_copy_t[-args.log_every:]), np.sum(forward_t[-args.log_every:]),
np.sum(backward_t[-args.log_every:]), np.sum(update_t[-args.log_every:])))
start = time.time()
print('[{}]Epoch Time(s): {:.4f}, sample: {:.4f}, data copy: {:.4f}, forward: {:.4f}, backward: {:.4f}, update: {:.4f}, #seeds: {}, #inputs: {}'.format(
g.rank(), np.sum(step_time), np.sum(sample_t), np.sum(feat_copy_t), np.sum(forward_t), np.sum(backward_t), np.sum(update_t), num_seeds, num_inputs))
epoch += 1
# evaluate the embedding using LogisticRegression
if args.standalone:
pred = generate_emb(model,g, g.ndata['features'], args.batch_size_eval, device)
else:
pred = generate_emb(model.module, g, g.ndata['features'], args.batch_size_eval, device)
if g.rank() == 0:
eval_acc, test_acc = compute_acc(pred, labels, global_train_nid, global_valid_nid, global_test_nid)
print('eval acc {:.4f}; test acc {:.4f}'.format(eval_acc, test_acc))
# sync for eval and test
if not args.standalone:
th.distributed.barrier()
if not args.standalone:
g._client.barrier()
# save features into file
if g.rank() == 0:
th.save(pred, 'emb.pt')
else:
feat = g.ndata['features']
th.save(pred, 'emb.pt')
def main(args):
if not args.standalone:
th.distributed.init_process_group(backend='gloo')
g = dgl.distributed.DistGraph(args.ip_config, args.graph_name, conf_file=args.conf_path)
print('rank:', g.rank())
print('number of edges', g.number_of_edges())
train_eids = dgl.distributed.edge_split(th.ones((g.number_of_edges(),), dtype=th.bool), g.get_partition_book(), force_even=True)
train_nids = dgl.distributed.node_split(th.ones((g.number_of_nodes(),), dtype=th.bool), g.get_partition_book())
global_train_nid = th.LongTensor(np.nonzero(g.ndata['train_mask'][np.arange(g.number_of_nodes())]))
global_valid_nid = th.LongTensor(np.nonzero(g.ndata['val_mask'][np.arange(g.number_of_nodes())]))
global_test_nid = th.LongTensor(np.nonzero(g.ndata['test_mask'][np.arange(g.number_of_nodes())]))
labels = g.ndata['labels'][np.arange(g.number_of_nodes())]
device = th.device('cpu')
# Pack data
in_feats = g.ndata['features'].shape[1]
global_train_nid = global_train_nid.squeeze()
global_valid_nid = global_valid_nid.squeeze()
global_test_nid = global_test_nid.squeeze()
print("number of train {}".format(global_train_nid.shape[0]))
print("number of valid {}".format(global_valid_nid.shape[0]))
print("number of test {}".format(global_test_nid.shape[0]))
data = train_eids, train_nids, in_feats, g, global_train_nid, global_valid_nid, global_test_nid, labels
run(args, device, data)
print("parent ends")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='GCN')
register_data_args(parser)
parser.add_argument('--graph-name', type=str, help='graph name')
parser.add_argument('--id', type=int, help='the partition id')
parser.add_argument('--ip_config', type=str, help='The file for IP configuration')
parser.add_argument('--conf_path', type=str, help='The path to the partition config file')
parser.add_argument('--num-client', type=int, help='The number of clients')
parser.add_argument('--n-classes', type=int, help='the number of classes')
parser.add_argument('--gpu', type=int, default=0,
help="GPU device ID. Use -1 for CPU training")
parser.add_argument('--num-epochs', type=int, default=20)
parser.add_argument('--num-hidden', type=int, default=16)
parser.add_argument('--num-layers', type=int, default=2)
parser.add_argument('--fan-out', type=str, default='10,25')
parser.add_argument('--batch-size', type=int, default=1000)
parser.add_argument('--batch-size-eval', type=int, default=100000)
parser.add_argument('--log-every', type=int, default=20)
parser.add_argument('--eval-every', type=int, default=5)
parser.add_argument('--lr', type=float, default=0.003)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--num-workers', type=int, default=0,
help="Number of sampling processes. Use 0 for no extra process.")
parser.add_argument('--local_rank', type=int, help='get rank of the process')
parser.add_argument('--standalone', action='store_true', help='run in the standalone mode')
parser.add_argument('--num-negs', type=int, default=1)
parser.add_argument('--neg-share', default=False, action='store_true',
help="sharing neg nodes for positive nodes")
parser.add_argument('--remove-edge', default=False, action='store_true',
help="whether to remove edges during sampling")
args = parser.parse_args()
print(args)
main(args)
| 42.892683
| 160
| 0.6396
|
a452e82334ec8afe9c77d97df93577f902af7935
| 63,692
|
py
|
Python
|
gpiozero/output_devices.py
|
MarcoGorelli/gpiozero
|
39b2eaccfb8283324f0188df7f2164ed9bd230ec
|
[
"BSD-3-Clause"
] | null | null | null |
gpiozero/output_devices.py
|
MarcoGorelli/gpiozero
|
39b2eaccfb8283324f0188df7f2164ed9bd230ec
|
[
"BSD-3-Clause"
] | null | null | null |
gpiozero/output_devices.py
|
MarcoGorelli/gpiozero
|
39b2eaccfb8283324f0188df7f2164ed9bd230ec
|
[
"BSD-3-Clause"
] | null | null | null |
# GPIO Zero: a library for controlling the Raspberry Pi's GPIO pins
# Copyright (c) 2016-2019 Andrew Scheller <github@loowis.durge.org>
# Copyright (c) 2015-2019 Dave Jones <dave@waveform.org.uk>
# Copyright (c) 2015-2019 Ben Nuttall <ben@bennuttall.com>
# Copyright (c) 2019 tuftii <3215045+tuftii@users.noreply.github.com>
# Copyright (c) 2019 tuftii <pi@raspberrypi>
# Copyright (c) 2016 Ian Harcombe <ian.harcombe@gmail.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
absolute_import,
division,
)
str = type('')
from threading import Lock
from itertools import repeat, cycle, chain
from colorzero import Color
from collections import OrderedDict
try:
from math import log2
except ImportError:
from .compat import log2
import warnings
from .exc import OutputDeviceBadValue, GPIOPinMissing, PWMSoftwareFallback
from .devices import GPIODevice, Device, CompositeDevice
from .mixins import SourceMixin
from .threads import GPIOThread
from .tones import Tone
try:
from .pins.pigpio import PiGPIOFactory
except ImportError:
PiGPIOFactory = None
class OutputDevice(SourceMixin, GPIODevice):
"""
Represents a generic GPIO output device.
This class extends :class:`GPIODevice` to add facilities common to GPIO
output devices: an :meth:`on` method to switch the device on, a
corresponding :meth:`off` method, and a :meth:`toggle` method.
:type pin: int or str
:param pin:
The GPIO pin that the device is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param bool active_high:
If :data:`True` (the default), the :meth:`on` method will set the GPIO
to HIGH. If :data:`False`, the :meth:`on` method will set the GPIO to
LOW (the :meth:`off` method always does the opposite).
:type initial_value: bool or None
:param initial_value:
If :data:`False` (the default), the device will be off initially. If
:data:`None`, the device will be left in whatever state the pin is
found in when configured for output (warning: this can be on). If
:data:`True`, the device will be switched on initially.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
def __init__(
self, pin=None, active_high=True, initial_value=False,
pin_factory=None):
super(OutputDevice, self).__init__(pin, pin_factory=pin_factory)
self._lock = Lock()
self.active_high = active_high
if initial_value is None:
self.pin.function = 'output'
else:
self.pin.output_with_state(self._value_to_state(initial_value))
def _value_to_state(self, value):
return bool(self._active_state if value else self._inactive_state)
def _write(self, value):
try:
self.pin.state = self._value_to_state(value)
except AttributeError:
self._check_open()
raise
def on(self):
"""
Turns the device on.
"""
self._write(True)
def off(self):
"""
Turns the device off.
"""
self._write(False)
def toggle(self):
"""
Reverse the state of the device. If it's on, turn it off; if it's off,
turn it on.
"""
with self._lock:
if self.is_active:
self.off()
else:
self.on()
@property
def value(self):
"""
Returns 1 if the device is currently active and 0 otherwise. Setting
this property changes the state of the device.
"""
return super(OutputDevice, self).value
@value.setter
def value(self, value):
self._write(value)
@property
def active_high(self):
"""
When :data:`True`, the :attr:`value` property is :data:`True` when the
device's :attr:`~GPIODevice.pin` is high. When :data:`False` the
:attr:`value` property is :data:`True` when the device's pin is low
(i.e. the value is inverted).
This property can be set after construction; be warned that changing it
will invert :attr:`value` (i.e. changing this property doesn't change
the device's pin state - it just changes how that state is
interpreted).
"""
return self._active_state
@active_high.setter
def active_high(self, value):
self._active_state = True if value else False
self._inactive_state = False if value else True
def __repr__(self):
try:
return '<gpiozero.%s object on pin %r, active_high=%s, is_active=%s>' % (
self.__class__.__name__, self.pin, self.active_high, self.is_active)
except:
return super(OutputDevice, self).__repr__()
class DigitalOutputDevice(OutputDevice):
"""
Represents a generic output device with typical on/off behaviour.
This class extends :class:`OutputDevice` with a :meth:`blink` method which
uses an optional background thread to handle toggling the device state
without further interaction.
:type pin: int or str
:param pin:
The GPIO pin that the device is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param bool active_high:
If :data:`True` (the default), the :meth:`on` method will set the GPIO
to HIGH. If :data:`False`, the :meth:`on` method will set the GPIO to
LOW (the :meth:`off` method always does the opposite).
:type initial_value: bool or None
:param initial_value:
If :data:`False` (the default), the device will be off initially. If
:data:`None`, the device will be left in whatever state the pin is
found in when configured for output (warning: this can be on). If
:data:`True`, the device will be switched on initially.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
def __init__(
self, pin=None, active_high=True, initial_value=False,
pin_factory=None):
self._blink_thread = None
self._controller = None
super(DigitalOutputDevice, self).__init__(
pin, active_high, initial_value, pin_factory=pin_factory
)
@property
def value(self):
return super(DigitalOutputDevice, self).value
@value.setter
def value(self, value):
self._stop_blink()
self._write(value)
def close(self):
self._stop_blink()
super(DigitalOutputDevice, self).close()
def on(self):
self._stop_blink()
self._write(True)
def off(self):
self._stop_blink()
self._write(False)
def blink(self, on_time=1, off_time=1, n=None, background=True):
"""
Make the device turn on and off repeatedly.
:param float on_time:
Number of seconds on. Defaults to 1 second.
:param float off_time:
Number of seconds off. Defaults to 1 second.
:type n: int or None
:param n:
Number of times to blink; :data:`None` (the default) means forever.
:param bool background:
If :data:`True` (the default), start a background thread to
continue blinking and return immediately. If :data:`False`, only
return when the blink is finished (warning: the default value of
*n* will result in this method never returning).
"""
self._stop_blink()
self._blink_thread = GPIOThread(
target=self._blink_device, args=(on_time, off_time, n)
)
self._blink_thread.start()
if not background:
self._blink_thread.join()
self._blink_thread = None
def _stop_blink(self):
if getattr(self, '_controller', None):
self._controller._stop_blink(self)
self._controller = None
if getattr(self, '_blink_thread', None):
self._blink_thread.stop()
self._blink_thread = None
def _blink_device(self, on_time, off_time, n):
iterable = repeat(0) if n is None else repeat(0, n)
for _ in iterable:
self._write(True)
if self._blink_thread.stopping.wait(on_time):
break
self._write(False)
if self._blink_thread.stopping.wait(off_time):
break
class LED(DigitalOutputDevice):
"""
Extends :class:`DigitalOutputDevice` and represents a light emitting diode
(LED).
Connect the cathode (short leg, flat side) of the LED to a ground pin;
connect the anode (longer leg) to a limiting resistor; connect the other
side of the limiting resistor to a GPIO pin (the limiting resistor can be
placed either side of the LED).
The following example will light the LED::
from gpiozero import LED
led = LED(17)
led.on()
:type pin: int or str
:param pin:
The GPIO pin which the LED is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param bool active_high:
If :data:`True` (the default), the LED will operate normally with the
circuit described above. If :data:`False` you should wire the cathode
to the GPIO pin, and the anode to a 3V3 pin (via a limiting resistor).
:type initial_value: bool or None
:param initial_value:
If :data:`False` (the default), the LED will be off initially. If
:data:`None`, the LED will be left in whatever state the pin is found
in when configured for output (warning: this can be on). If
:data:`True`, the LED will be switched on initially.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
pass
LED.is_lit = LED.is_active
class Buzzer(DigitalOutputDevice):
"""
Extends :class:`DigitalOutputDevice` and represents a digital buzzer
component.
.. note::
This interface is only capable of simple on/off commands, and is not
capable of playing a variety of tones (see :class:`TonalBuzzer`).
Connect the cathode (negative pin) of the buzzer to a ground pin; connect
the other side to any GPIO pin.
The following example will sound the buzzer::
from gpiozero import Buzzer
bz = Buzzer(3)
bz.on()
:type pin: int or str
:param pin:
The GPIO pin which the buzzer is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param bool active_high:
If :data:`True` (the default), the buzzer will operate normally with
the circuit described above. If :data:`False` you should wire the
cathode to the GPIO pin, and the anode to a 3V3 pin.
:type initial_value: bool or None
:param initial_value:
If :data:`False` (the default), the buzzer will be silent initially. If
:data:`None`, the buzzer will be left in whatever state the pin is
found in when configured for output (warning: this can be on). If
:data:`True`, the buzzer will be switched on initially.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
pass
Buzzer.beep = Buzzer.blink
class PWMOutputDevice(OutputDevice):
"""
Generic output device configured for pulse-width modulation (PWM).
:type pin: int or str
:param pin:
The GPIO pin that the device is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param bool active_high:
If :data:`True` (the default), the :meth:`on` method will set the GPIO
to HIGH. If :data:`False`, the :meth:`on` method will set the GPIO to
LOW (the :meth:`off` method always does the opposite).
:param float initial_value:
If 0 (the default), the device's duty cycle will be 0 initially.
Other values between 0 and 1 can be specified as an initial duty cycle.
Note that :data:`None` cannot be specified (unlike the parent class) as
there is no way to tell PWM not to alter the state of the pin.
:param int frequency:
The frequency (in Hz) of pulses emitted to drive the device. Defaults
to 100Hz.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
def __init__(
self, pin=None, active_high=True, initial_value=0, frequency=100,
pin_factory=None):
self._blink_thread = None
self._controller = None
if not 0 <= initial_value <= 1:
raise OutputDeviceBadValue("initial_value must be between 0 and 1")
super(PWMOutputDevice, self).__init__(
pin, active_high, initial_value=None, pin_factory=pin_factory
)
try:
# XXX need a way of setting these together
self.pin.frequency = frequency
self.value = initial_value
except:
self.close()
raise
def close(self):
try:
self._stop_blink()
except AttributeError:
pass
try:
self.pin.frequency = None
except AttributeError:
# If the pin's already None, ignore the exception
pass
super(PWMOutputDevice, self).close()
def _state_to_value(self, state):
return float(state if self.active_high else 1 - state)
def _value_to_state(self, value):
return float(value if self.active_high else 1 - value)
def _write(self, value):
if not 0 <= value <= 1:
raise OutputDeviceBadValue("PWM value must be between 0 and 1")
super(PWMOutputDevice, self)._write(value)
@property
def value(self):
"""
The duty cycle of the PWM device. 0.0 is off, 1.0 is fully on. Values
in between may be specified for varying levels of power in the device.
"""
return super(PWMOutputDevice, self).value
@value.setter
def value(self, value):
self._stop_blink()
self._write(value)
def on(self):
self._stop_blink()
self._write(1)
def off(self):
self._stop_blink()
self._write(0)
def toggle(self):
"""
Toggle the state of the device. If the device is currently off
(:attr:`value` is 0.0), this changes it to "fully" on (:attr:`value` is
1.0). If the device has a duty cycle (:attr:`value`) of 0.1, this will
toggle it to 0.9, and so on.
"""
self._stop_blink()
self.value = 1 - self.value
@property
def is_active(self):
"""
Returns :data:`True` if the device is currently active (:attr:`value`
is non-zero) and :data:`False` otherwise.
"""
return self.value != 0
@property
def frequency(self):
"""
The frequency of the pulses used with the PWM device, in Hz. The
default is 100Hz.
"""
return self.pin.frequency
@frequency.setter
def frequency(self, value):
self.pin.frequency = value
def blink(
self, on_time=1, off_time=1, fade_in_time=0, fade_out_time=0,
n=None, background=True):
"""
Make the device turn on and off repeatedly.
:param float on_time:
Number of seconds on. Defaults to 1 second.
:param float off_time:
Number of seconds off. Defaults to 1 second.
:param float fade_in_time:
Number of seconds to spend fading in. Defaults to 0.
:param float fade_out_time:
Number of seconds to spend fading out. Defaults to 0.
:type n: int or None
:param n:
Number of times to blink; :data:`None` (the default) means forever.
:param bool background:
If :data:`True` (the default), start a background thread to
continue blinking and return immediately. If :data:`False`, only
return when the blink is finished (warning: the default value of
*n* will result in this method never returning).
"""
self._stop_blink()
self._blink_thread = GPIOThread(
target=self._blink_device,
args=(on_time, off_time, fade_in_time, fade_out_time, n)
)
self._blink_thread.start()
if not background:
self._blink_thread.join()
self._blink_thread = None
def pulse(self, fade_in_time=1, fade_out_time=1, n=None, background=True):
"""
Make the device fade in and out repeatedly.
:param float fade_in_time:
Number of seconds to spend fading in. Defaults to 1.
:param float fade_out_time:
Number of seconds to spend fading out. Defaults to 1.
:type n: int or None
:param n:
Number of times to pulse; :data:`None` (the default) means forever.
:param bool background:
If :data:`True` (the default), start a background thread to
continue pulsing and return immediately. If :data:`False`, only
return when the pulse is finished (warning: the default value of
*n* will result in this method never returning).
"""
on_time = off_time = 0
self.blink(
on_time, off_time, fade_in_time, fade_out_time, n, background
)
def _stop_blink(self):
if self._controller:
self._controller._stop_blink(self)
self._controller = None
if self._blink_thread:
self._blink_thread.stop()
self._blink_thread = None
def _blink_device(
self, on_time, off_time, fade_in_time, fade_out_time, n, fps=25):
sequence = []
if fade_in_time > 0:
sequence += [
(i * (1 / fps) / fade_in_time, 1 / fps)
for i in range(int(fps * fade_in_time))
]
sequence.append((1, on_time))
if fade_out_time > 0:
sequence += [
(1 - (i * (1 / fps) / fade_out_time), 1 / fps)
for i in range(int(fps * fade_out_time))
]
sequence.append((0, off_time))
sequence = (
cycle(sequence) if n is None else
chain.from_iterable(repeat(sequence, n))
)
for value, delay in sequence:
self._write(value)
if self._blink_thread.stopping.wait(delay):
break
class TonalBuzzer(SourceMixin, CompositeDevice):
"""
Extends :class:`CompositeDevice` and represents a tonal buzzer.
:type pin: int or str
:param pin:
The GPIO pin which the buzzer is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param float initial_value:
If :data:`None` (the default), the buzzer will be off initially. Values
between -1 and 1 can be specified as an initial value for the buzzer.
:type mid_tone: int or str
:param mid_tone:
The tone which is represented the device's middle value (0). The
default is "A4" (MIDI note 69).
:param int octaves:
The number of octaves to allow away from the base note. The default is
1, meaning a value of -1 goes one octave below the base note, and one
above, i.e. from A3 to A5 with the default base note of A4.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
.. note::
Note that this class does not currently work with
:class:`~gpiozero.pins.pigpio.PiGPIOFactory`.
"""
def __init__(self, pin=None, initial_value=None, mid_tone=Tone("A4"),
octaves=1, pin_factory=None):
self._mid_tone = None
super(TonalBuzzer, self).__init__(
pwm_device=PWMOutputDevice(
pin=pin, pin_factory=pin_factory
), pin_factory=pin_factory)
try:
self._mid_tone = Tone(mid_tone)
if not (0 < octaves <= 9):
raise ValueError('octaves must be between 1 and 9')
self._octaves = octaves
try:
self.min_tone.note
except ValueError:
raise ValueError(
'%r is too low for %d octaves' %
(self._mid_tone, self._octaves))
try:
self.max_tone.note
except ValueError:
raise ValueError(
'%r is too high for %d octaves' %
(self._mid_tone, self._octaves))
self.value = initial_value
except:
self.close()
raise
def __repr__(self):
try:
if self.value is None:
return '<gpiozero.TonalBuzzer object on pin %r, silent>' % (
self.pwm_device.pin,)
else:
return '<gpiozero.TonalBuzzer object on pin %r, playing %s>' % (
self.pwm_device.pin, self.tone.note)
except:
return super(TonalBuzzer, self).__repr__()
def play(self, tone):
"""
Play the given *tone*. This can either be an instance of
:class:`~gpiozero.tones.Tone` or can be anything that could be used to
construct an instance of :class:`~gpiozero.tones.Tone`.
For example::
>>> from gpiozero import TonalBuzzer
>>> from gpiozero.tones import Tone
>>> b = TonalBuzzer(17)
>>> b.play(Tone("A4"))
>>> b.play(Tone(220.0)) # Hz
>>> b.play(Tone(60)) # middle C in MIDI notation
>>> b.play("A4")
>>> b.play(220.0)
>>> b.play(60)
"""
if tone is None:
self.value = None
else:
if not isinstance(tone, Tone):
tone = Tone(tone)
freq = tone.frequency
if self.min_tone.frequency <= tone <= self.max_tone.frequency:
self.pwm_device.pin.frequency = freq
self.pwm_device.value = 0.5
else:
raise ValueError("tone is out of the device's range")
def stop(self):
"""
Turn the buzzer off. This is equivalent to setting :attr:`value` to
:data:`None`.
"""
self.value = None
@property
def tone(self):
"""
Returns the :class:`~gpiozero.tones.Tone` that the buzzer is currently
playing, or :data:`None` if the buzzer is silent. This property can
also be set to play the specified tone.
"""
if self.pwm_device.pin.frequency is None:
return None
else:
return Tone.from_frequency(self.pwm_device.pin.frequency)
@tone.setter
def tone(self, value):
self.play(value)
@property
def value(self):
"""
Represents the state of the buzzer as a value between -1 (representing
the minimum tone) and 1 (representing the maximum tone). This can also
be the special value :data:`None` indicating that the buzzer is
currently silent.
"""
if self.pwm_device.pin.frequency is None:
return None
else:
try:
return log2(
self.pwm_device.pin.frequency / self.mid_tone.frequency
) / self.octaves
except ZeroDivisionError:
return 0.0
@value.setter
def value(self, value):
if value is None:
self.pwm_device.pin.frequency = None
elif -1 <= value <= 1:
freq = self.mid_tone.frequency * 2 ** (self.octaves * value)
self.pwm_device.pin.frequency = freq
self.pwm_device.value = 0.5
else:
raise OutputDeviceBadValue(
'TonalBuzzer value must be between -1 and 1, or None')
@property
def is_active(self):
"""
Returns :data:`True` if the buzzer is currently playing, otherwise
:data:`False`.
"""
return self.value is not None
@property
def octaves(self):
"""
The number of octaves available (above and below mid_tone).
"""
return self._octaves
@property
def min_tone(self):
"""
The lowest tone that the buzzer can play, i.e. the tone played
when :attr:`value` is -1.
"""
return self._mid_tone.down(12 * self.octaves)
@property
def mid_tone(self):
"""
The middle tone available, i.e. the tone played when :attr:`value` is
0.
"""
return self._mid_tone
@property
def max_tone(self):
"""
The highest tone that the buzzer can play, i.e. the tone played when
:attr:`value` is 1.
"""
return self._mid_tone.up(12 * self.octaves)
class PWMLED(PWMOutputDevice):
"""
Extends :class:`PWMOutputDevice` and represents a light emitting diode
(LED) with variable brightness.
A typical configuration of such a device is to connect a GPIO pin to the
anode (long leg) of the LED, and the cathode (short leg) to ground, with
an optional resistor to prevent the LED from burning out.
:type pin: int or str
:param pin:
The GPIO pin which the LED is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param bool active_high:
If :data:`True` (the default), the :meth:`on` method will set the GPIO
to HIGH. If :data:`False`, the :meth:`on` method will set the GPIO to
LOW (the :meth:`off` method always does the opposite).
:param float initial_value:
If ``0`` (the default), the LED will be off initially. Other values
between 0 and 1 can be specified as an initial brightness for the LED.
Note that :data:`None` cannot be specified (unlike the parent class) as
there is no way to tell PWM not to alter the state of the pin.
:param int frequency:
The frequency (in Hz) of pulses emitted to drive the LED. Defaults
to 100Hz.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
pass
PWMLED.is_lit = PWMLED.is_active
class RGBLED(SourceMixin, Device):
"""
Extends :class:`Device` and represents a full color LED component (composed
of red, green, and blue LEDs).
Connect the common cathode (longest leg) to a ground pin; connect each of
the other legs (representing the red, green, and blue anodes) to any GPIO
pins. You should use three limiting resistors (one per anode).
The following code will make the LED yellow::
from gpiozero import RGBLED
led = RGBLED(2, 3, 4)
led.color = (1, 1, 0)
The `colorzero`_ library is also supported::
from gpiozero import RGBLED
from colorzero import Color
led = RGBLED(2, 3, 4)
led.color = Color('yellow')
:type red: int or str
:param red:
The GPIO pin that controls the red component of the RGB LED. See
:ref:`pin-numbering` for valid pin numbers. If this is :data:`None` a
:exc:`GPIODeviceError` will be raised.
:type green: int or str
:param green:
The GPIO pin that controls the green component of the RGB LED.
:type blue: int or str
:param blue:
The GPIO pin that controls the blue component of the RGB LED.
:param bool active_high:
Set to :data:`True` (the default) for common cathode RGB LEDs. If you
are using a common anode RGB LED, set this to :data:`False`.
:type initial_value: ~colorzero.Color or tuple
:param initial_value:
The initial color for the RGB LED. Defaults to black ``(0, 0, 0)``.
:param bool pwm:
If :data:`True` (the default), construct :class:`PWMLED` instances for
each component of the RGBLED. If :data:`False`, construct regular
:class:`LED` instances, which prevents smooth color graduations.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
.. _colorzero: https://colorzero.readthedocs.io/
"""
def __init__(
self, red=None, green=None, blue=None, active_high=True,
initial_value=(0, 0, 0), pwm=True, pin_factory=None):
self._leds = ()
self._blink_thread = None
if not all(p is not None for p in [red, green, blue]):
raise GPIOPinMissing('red, green, and blue pins must be provided')
LEDClass = PWMLED if pwm else LED
super(RGBLED, self).__init__(pin_factory=pin_factory)
self._leds = tuple(
LEDClass(pin, active_high, pin_factory=pin_factory)
for pin in (red, green, blue)
)
self.value = initial_value
def close(self):
if getattr(self, '_leds', None):
self._stop_blink()
for led in self._leds:
led.close()
self._leds = ()
super(RGBLED, self).close()
@property
def closed(self):
return len(self._leds) == 0
@property
def value(self):
"""
Represents the color of the LED as an RGB 3-tuple of ``(red, green,
blue)`` where each value is between 0 and 1 if *pwm* was :data:`True`
when the class was constructed (and only 0 or 1 if not).
For example, red would be ``(1, 0, 0)`` and yellow would be ``(1, 1,
0)``, while orange would be ``(1, 0.5, 0)``.
"""
return tuple(led.value for led in self._leds)
@value.setter
def value(self, value):
for component in value:
if not 0 <= component <= 1:
raise OutputDeviceBadValue(
'each RGB color component must be between 0 and 1')
if isinstance(self._leds[0], LED):
if component not in (0, 1):
raise OutputDeviceBadValue(
'each RGB color component must be 0 or 1 with non-PWM '
'RGBLEDs')
self._stop_blink()
for led, v in zip(self._leds, value):
led.value = v
@property
def is_active(self):
"""
Returns :data:`True` if the LED is currently active (not black) and
:data:`False` otherwise.
"""
return self.value != (0, 0, 0)
is_lit = is_active
@property
def color(self):
"""
Represents the color of the LED as a :class:`~colorzero.Color` object.
"""
return Color(*self.value)
@color.setter
def color(self, value):
self.value = value
@property
def red(self):
"""
Represents the red element of the LED as a :class:`~colorzero.Red`
object.
"""
return self.color.red
@red.setter
def red(self, value):
self._stop_blink()
r, g, b = self.value
self.value = value, g, b
@property
def green(self):
"""
Represents the green element of the LED as a :class:`~colorzero.Green`
object.
"""
return self.color.green
@green.setter
def green(self, value):
self._stop_blink()
r, g, b = self.value
self.value = r, value, b
@property
def blue(self):
"""
Represents the blue element of the LED as a :class:`~colorzero.Blue`
object.
"""
return self.color.blue
@blue.setter
def blue(self, value):
self._stop_blink()
r, g, b = self.value
self.value = r, g, value
def on(self):
"""
Turn the LED on. This equivalent to setting the LED color to white
``(1, 1, 1)``.
"""
self.value = (1, 1, 1)
def off(self):
"""
Turn the LED off. This is equivalent to setting the LED color to black
``(0, 0, 0)``.
"""
self.value = (0, 0, 0)
def toggle(self):
"""
Toggle the state of the device. If the device is currently off
(:attr:`value` is ``(0, 0, 0)``), this changes it to "fully" on
(:attr:`value` is ``(1, 1, 1)``). If the device has a specific color,
this method inverts the color.
"""
r, g, b = self.value
self.value = (1 - r, 1 - g, 1 - b)
def blink(
self, on_time=1, off_time=1, fade_in_time=0, fade_out_time=0,
on_color=(1, 1, 1), off_color=(0, 0, 0), n=None, background=True):
"""
Make the device turn on and off repeatedly.
:param float on_time:
Number of seconds on. Defaults to 1 second.
:param float off_time:
Number of seconds off. Defaults to 1 second.
:param float fade_in_time:
Number of seconds to spend fading in. Defaults to 0. Must be 0 if
*pwm* was :data:`False` when the class was constructed
(:exc:`ValueError` will be raised if not).
:param float fade_out_time:
Number of seconds to spend fading out. Defaults to 0. Must be 0 if
*pwm* was :data:`False` when the class was constructed
(:exc:`ValueError` will be raised if not).
:type on_color: ~colorzero.Color or tuple
:param on_color:
The color to use when the LED is "on". Defaults to white.
:type off_color: ~colorzero.Color or tuple
:param off_color:
The color to use when the LED is "off". Defaults to black.
:type n: int or None
:param n:
Number of times to blink; :data:`None` (the default) means forever.
:param bool background:
If :data:`True` (the default), start a background thread to
continue blinking and return immediately. If :data:`False`, only
return when the blink is finished (warning: the default value of
*n* will result in this method never returning).
"""
if isinstance(self._leds[0], LED):
if fade_in_time:
raise ValueError('fade_in_time must be 0 with non-PWM RGBLEDs')
if fade_out_time:
raise ValueError('fade_out_time must be 0 with non-PWM RGBLEDs')
self._stop_blink()
self._blink_thread = GPIOThread(
target=self._blink_device,
args=(
on_time, off_time, fade_in_time, fade_out_time,
on_color, off_color, n
)
)
self._blink_thread.start()
if not background:
self._blink_thread.join()
self._blink_thread = None
def pulse(
self, fade_in_time=1, fade_out_time=1,
on_color=(1, 1, 1), off_color=(0, 0, 0), n=None, background=True):
"""
Make the device fade in and out repeatedly.
:param float fade_in_time:
Number of seconds to spend fading in. Defaults to 1.
:param float fade_out_time:
Number of seconds to spend fading out. Defaults to 1.
:type on_color: ~colorzero.Color or tuple
:param on_color:
The color to use when the LED is "on". Defaults to white.
:type off_color: ~colorzero.Color or tuple
:param off_color:
The color to use when the LED is "off". Defaults to black.
:type n: int or None
:param n:
Number of times to pulse; :data:`None` (the default) means forever.
:param bool background:
If :data:`True` (the default), start a background thread to
continue pulsing and return immediately. If :data:`False`, only
return when the pulse is finished (warning: the default value of
*n* will result in this method never returning).
"""
on_time = off_time = 0
self.blink(
on_time, off_time, fade_in_time, fade_out_time,
on_color, off_color, n, background
)
def _stop_blink(self, led=None):
# If this is called with a single led, we stop all blinking anyway
if self._blink_thread:
self._blink_thread.stop()
self._blink_thread = None
def _blink_device(
self, on_time, off_time, fade_in_time, fade_out_time, on_color,
off_color, n, fps=25):
# Define a simple lambda to perform linear interpolation between
# off_color and on_color
lerp = lambda t, fade_in: tuple(
(1 - t) * off + t * on
if fade_in else
(1 - t) * on + t * off
for off, on in zip(off_color, on_color)
)
sequence = []
if fade_in_time > 0:
sequence += [
(lerp(i * (1 / fps) / fade_in_time, True), 1 / fps)
for i in range(int(fps * fade_in_time))
]
sequence.append((on_color, on_time))
if fade_out_time > 0:
sequence += [
(lerp(i * (1 / fps) / fade_out_time, False), 1 / fps)
for i in range(int(fps * fade_out_time))
]
sequence.append((off_color, off_time))
sequence = (
cycle(sequence) if n is None else
chain.from_iterable(repeat(sequence, n))
)
for l in self._leds:
l._controller = self
for value, delay in sequence:
for l, v in zip(self._leds, value):
l._write(v)
if self._blink_thread.stopping.wait(delay):
break
class Motor(SourceMixin, CompositeDevice):
"""
Extends :class:`CompositeDevice` and represents a generic motor
connected to a bi-directional motor driver circuit (i.e. an `H-bridge`_).
Attach an `H-bridge`_ motor controller to your Pi; connect a power source
(e.g. a battery pack or the 5V pin) to the controller; connect the outputs
of the controller board to the two terminals of the motor; connect the
inputs of the controller board to two GPIO pins.
.. _H-bridge: https://en.wikipedia.org/wiki/H_bridge
The following code will make the motor turn "forwards"::
from gpiozero import Motor
motor = Motor(17, 18)
motor.forward()
:type forward: int or str
:param forward:
The GPIO pin that the forward input of the motor driver chip is
connected to. See :ref:`pin-numbering` for valid pin numbers. If this
is :data:`None` a :exc:`GPIODeviceError` will be raised.
:type backward: int or str
:param backward:
The GPIO pin that the backward input of the motor driver chip is
connected to. See :ref:`pin-numbering` for valid pin numbers. If this
is :data:`None` a :exc:`GPIODeviceError` will be raised.
:type enable: int or str or None
:param enable:
The GPIO pin that enables the motor. Required for *some* motor
controller boards. See :ref:`pin-numbering` for valid pin numbers.
:param bool pwm:
If :data:`True` (the default), construct :class:`PWMOutputDevice`
instances for the motor controller pins, allowing both direction and
variable speed control. If :data:`False`, construct
:class:`DigitalOutputDevice` instances, allowing only direction
control.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
def __init__(self, forward=None, backward=None, enable=None, pwm=True,
pin_factory=None):
if not all(p is not None for p in [forward, backward]):
raise GPIOPinMissing(
'forward and backward pins must be provided'
)
PinClass = PWMOutputDevice if pwm else DigitalOutputDevice
devices = OrderedDict((
('forward_device', PinClass(forward)),
('backward_device', PinClass(backward)),
))
if enable is not None:
devices['enable_device'] = DigitalOutputDevice(enable,
initial_value=True)
super(Motor, self).__init__(_order=devices.keys(), **devices)
@property
def value(self):
"""
Represents the speed of the motor as a floating point value between -1
(full speed backward) and 1 (full speed forward), with 0 representing
stopped.
"""
return self.forward_device.value - self.backward_device.value
@value.setter
def value(self, value):
if not -1 <= value <= 1:
raise OutputDeviceBadValue("Motor value must be between -1 and 1")
if value > 0:
try:
self.forward(value)
except ValueError as e:
raise OutputDeviceBadValue(e)
elif value < 0:
try:
self.backward(-value)
except ValueError as e:
raise OutputDeviceBadValue(e)
else:
self.stop()
@property
def is_active(self):
"""
Returns :data:`True` if the motor is currently running and
:data:`False` otherwise.
"""
return self.value != 0
def forward(self, speed=1):
"""
Drive the motor forwards.
:param float speed:
The speed at which the motor should turn. Can be any value between
0 (stopped) and the default 1 (maximum speed) if *pwm* was
:data:`True` when the class was constructed (and only 0 or 1 if
not).
"""
if not 0 <= speed <= 1:
raise ValueError('forward speed must be between 0 and 1')
if isinstance(self.forward_device, DigitalOutputDevice):
if speed not in (0, 1):
raise ValueError(
'forward speed must be 0 or 1 with non-PWM Motors')
self.backward_device.off()
self.forward_device.value = speed
def backward(self, speed=1):
"""
Drive the motor backwards.
:param float speed:
The speed at which the motor should turn. Can be any value between
0 (stopped) and the default 1 (maximum speed) if *pwm* was
:data:`True` when the class was constructed (and only 0 or 1 if
not).
"""
if not 0 <= speed <= 1:
raise ValueError('backward speed must be between 0 and 1')
if isinstance(self.backward_device, DigitalOutputDevice):
if speed not in (0, 1):
raise ValueError(
'backward speed must be 0 or 1 with non-PWM Motors')
self.forward_device.off()
self.backward_device.value = speed
def reverse(self):
"""
Reverse the current direction of the motor. If the motor is currently
idle this does nothing. Otherwise, the motor's direction will be
reversed at the current speed.
"""
self.value = -self.value
def stop(self):
"""
Stop the motor.
"""
self.forward_device.off()
self.backward_device.off()
class PhaseEnableMotor(SourceMixin, CompositeDevice):
"""
Extends :class:`CompositeDevice` and represents a generic motor connected
to a Phase/Enable motor driver circuit; the phase of the driver controls
whether the motor turns forwards or backwards, while enable controls the
speed with PWM.
The following code will make the motor turn "forwards"::
from gpiozero import PhaseEnableMotor
motor = PhaseEnableMotor(12, 5)
motor.forward()
:type phase: int or str
:param phase:
The GPIO pin that the phase (direction) input of the motor driver chip
is connected to. See :ref:`pin-numbering` for valid pin numbers. If
this is :data:`None` a :exc:`GPIODeviceError` will be raised.
:type enable: int or str
:param enable:
The GPIO pin that the enable (speed) input of the motor driver chip
is connected to. See :ref:`pin-numbering` for valid pin numbers. If
this is :data:`None` a :exc:`GPIODeviceError` will be raised.
:param bool pwm:
If :data:`True` (the default), construct :class:`PWMOutputDevice`
instances for the motor controller pins, allowing both direction and
variable speed control. If :data:`False`, construct
:class:`DigitalOutputDevice` instances, allowing only direction
control.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
def __init__(self, phase=None, enable=None, pwm=True, pin_factory=None):
if not all([phase, enable]):
raise GPIOPinMissing('phase and enable pins must be provided')
PinClass = PWMOutputDevice if pwm else DigitalOutputDevice
super(PhaseEnableMotor, self).__init__(
phase_device=DigitalOutputDevice(phase, pin_factory=pin_factory),
enable_device=PinClass(enable, pin_factory=pin_factory),
_order=('phase_device', 'enable_device'),
pin_factory=pin_factory
)
@property
def value(self):
"""
Represents the speed of the motor as a floating point value between -1
(full speed backward) and 1 (full speed forward).
"""
return (
-self.enable_device.value
if self.phase_device.is_active else
self.enable_device.value
)
@value.setter
def value(self, value):
if not -1 <= value <= 1:
raise OutputDeviceBadValue("Motor value must be between -1 and 1")
if value > 0:
self.forward(value)
elif value < 0:
self.backward(-value)
else:
self.stop()
@property
def is_active(self):
"""
Returns :data:`True` if the motor is currently running and
:data:`False` otherwise.
"""
return self.value != 0
def forward(self, speed=1):
"""
Drive the motor forwards.
:param float speed:
The speed at which the motor should turn. Can be any value between
0 (stopped) and the default 1 (maximum speed).
"""
if isinstance(self.enable_device, DigitalOutputDevice):
if speed not in (0, 1):
raise ValueError(
'forward speed must be 0 or 1 with non-PWM Motors')
self.enable_device.off()
self.phase_device.off()
self.enable_device.value = speed
def backward(self, speed=1):
"""
Drive the motor backwards.
:param float speed:
The speed at which the motor should turn. Can be any value between
0 (stopped) and the default 1 (maximum speed).
"""
if isinstance(self.enable_device, DigitalOutputDevice):
if speed not in (0, 1):
raise ValueError(
'backward speed must be 0 or 1 with non-PWM Motors')
self.enable_device.off()
self.phase_device.on()
self.enable_device.value = speed
def reverse(self):
"""
Reverse the current direction of the motor. If the motor is currently
idle this does nothing. Otherwise, the motor's direction will be
reversed at the current speed.
"""
self.value = -self.value
def stop(self):
"""
Stop the motor.
"""
self.enable_device.off()
class Servo(SourceMixin, CompositeDevice):
"""
Extends :class:`CompositeDevice` and represents a PWM-controlled servo
motor connected to a GPIO pin.
Connect a power source (e.g. a battery pack or the 5V pin) to the power
cable of the servo (this is typically colored red); connect the ground
cable of the servo (typically colored black or brown) to the negative of
your battery pack, or a GND pin; connect the final cable (typically colored
white or orange) to the GPIO pin you wish to use for controlling the servo.
The following code will make the servo move between its minimum, maximum,
and mid-point positions with a pause between each::
from gpiozero import Servo
from time import sleep
servo = Servo(17)
while True:
servo.min()
sleep(1)
servo.mid()
sleep(1)
servo.max()
sleep(1)
You can also use the :attr:`value` property to move the servo to a
particular position, on a scale from -1 (min) to 1 (max) where 0 is the
mid-point::
from gpiozero import Servo
servo = Servo(17)
servo.value = 0.5
.. note::
To reduce servo jitter, use the pigpio pin driver rather than the default
RPi.GPIO driver (pigpio uses DMA sampling for much more precise edge
timing). See :ref:`changing-pin-factory` for further information.
:type pin: int or str
:param pin:
The GPIO pin that the servo is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param float initial_value:
If ``0`` (the default), the device's mid-point will be set initially.
Other values between -1 and +1 can be specified as an initial position.
:data:`None` means to start the servo un-controlled (see
:attr:`value`).
:param float min_pulse_width:
The pulse width corresponding to the servo's minimum position. This
defaults to 1ms.
:param float max_pulse_width:
The pulse width corresponding to the servo's maximum position. This
defaults to 2ms.
:param float frame_width:
The length of time between servo control pulses measured in seconds.
This defaults to 20ms which is a common value for servos.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
def __init__(
self, pin=None, initial_value=0.0,
min_pulse_width=1/1000, max_pulse_width=2/1000,
frame_width=20/1000, pin_factory=None):
if min_pulse_width >= max_pulse_width:
raise ValueError('min_pulse_width must be less than max_pulse_width')
if max_pulse_width >= frame_width:
raise ValueError('max_pulse_width must be less than frame_width')
self._frame_width = frame_width
self._min_dc = min_pulse_width / frame_width
self._dc_range = (max_pulse_width - min_pulse_width) / frame_width
self._min_value = -1
self._value_range = 2
super(Servo, self).__init__(
pwm_device=PWMOutputDevice(
pin, frequency=int(1 / frame_width), pin_factory=pin_factory
),
pin_factory=pin_factory
)
if PiGPIOFactory is None or not isinstance(self.pin_factory, PiGPIOFactory):
warnings.warn(PWMSoftwareFallback(
'To reduce servo jitter, use the pigpio pin factory.'
'See https://gpiozero.readthedocs.io/en/stable/api_output.html#servo for more info'
))
try:
self.value = initial_value
except:
self.close()
raise
@property
def frame_width(self):
"""
The time between control pulses, measured in seconds.
"""
return self._frame_width
@property
def min_pulse_width(self):
"""
The control pulse width corresponding to the servo's minimum position,
measured in seconds.
"""
return self._min_dc * self.frame_width
@property
def max_pulse_width(self):
"""
The control pulse width corresponding to the servo's maximum position,
measured in seconds.
"""
return (self._dc_range * self.frame_width) + self.min_pulse_width
@property
def pulse_width(self):
"""
Returns the current pulse width controlling the servo.
"""
if self.pwm_device.pin.frequency is None:
return None
else:
return self.pwm_device.pin.state * self.frame_width
def min(self):
"""
Set the servo to its minimum position.
"""
self.value = -1
def mid(self):
"""
Set the servo to its mid-point position.
"""
self.value = 0
def max(self):
"""
Set the servo to its maximum position.
"""
self.value = 1
def detach(self):
"""
Temporarily disable control of the servo. This is equivalent to
setting :attr:`value` to :data:`None`.
"""
self.value = None
def _get_value(self):
if self.pwm_device.pin.frequency is None:
return None
else:
return (
((self.pwm_device.pin.state - self._min_dc) / self._dc_range) *
self._value_range + self._min_value)
@property
def value(self):
"""
Represents the position of the servo as a value between -1 (the minimum
position) and +1 (the maximum position). This can also be the special
value :data:`None` indicating that the servo is currently
"uncontrolled", i.e. that no control signal is being sent. Typically
this means the servo's position remains unchanged, but that it can be
moved by hand.
"""
result = self._get_value()
if result is None:
return result
else:
# NOTE: This round() only exists to ensure we don't confuse people
# by returning 2.220446049250313e-16 as the default initial value
# instead of 0. The reason _get_value and _set_value are split
# out is for descendents that require the un-rounded values for
# accuracy
return round(result, 14)
@value.setter
def value(self, value):
if value is None:
self.pwm_device.pin.frequency = None
elif -1 <= value <= 1:
self.pwm_device.pin.frequency = int(1 / self.frame_width)
self.pwm_device.pin.state = (
self._min_dc + self._dc_range *
((value - self._min_value) / self._value_range)
)
else:
raise OutputDeviceBadValue(
"Servo value must be between -1 and 1, or None")
@property
def is_active(self):
return self.value is not None
class AngularServo(Servo):
"""
Extends :class:`Servo` and represents a rotational PWM-controlled servo
motor which can be set to particular angles (assuming valid minimum and
maximum angles are provided to the constructor).
Connect a power source (e.g. a battery pack or the 5V pin) to the power
cable of the servo (this is typically colored red); connect the ground
cable of the servo (typically colored black or brown) to the negative of
your battery pack, or a GND pin; connect the final cable (typically colored
white or orange) to the GPIO pin you wish to use for controlling the servo.
Next, calibrate the angles that the servo can rotate to. In an interactive
Python session, construct a :class:`Servo` instance. The servo should move
to its mid-point by default. Set the servo to its minimum value, and
measure the angle from the mid-point. Set the servo to its maximum value,
and again measure the angle::
>>> from gpiozero import Servo
>>> s = Servo(17)
>>> s.min() # measure the angle
>>> s.max() # measure the angle
You should now be able to construct an :class:`AngularServo` instance
with the correct bounds::
>>> from gpiozero import AngularServo
>>> s = AngularServo(17, min_angle=-42, max_angle=44)
>>> s.angle = 0.0
>>> s.angle
0.0
>>> s.angle = 15
>>> s.angle
15.0
.. note::
You can set *min_angle* greater than *max_angle* if you wish to reverse
the sense of the angles (e.g. ``min_angle=45, max_angle=-45``). This
can be useful with servos that rotate in the opposite direction to your
expectations of minimum and maximum.
:type pin: int or str
:param pin:
The GPIO pin that the servo is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param float initial_angle:
Sets the servo's initial angle to the specified value. The default is
0. The value specified must be between *min_angle* and *max_angle*
inclusive. :data:`None` means to start the servo un-controlled (see
:attr:`value`).
:param float min_angle:
Sets the minimum angle that the servo can rotate to. This defaults to
-90, but should be set to whatever you measure from your servo during
calibration.
:param float max_angle:
Sets the maximum angle that the servo can rotate to. This defaults to
90, but should be set to whatever you measure from your servo during
calibration.
:param float min_pulse_width:
The pulse width corresponding to the servo's minimum position. This
defaults to 1ms.
:param float max_pulse_width:
The pulse width corresponding to the servo's maximum position. This
defaults to 2ms.
:param float frame_width:
The length of time between servo control pulses measured in seconds.
This defaults to 20ms which is a common value for servos.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
def __init__(
self, pin=None, initial_angle=0.0,
min_angle=-90, max_angle=90,
min_pulse_width=1/1000, max_pulse_width=2/1000,
frame_width=20/1000, pin_factory=None):
self._min_angle = min_angle
self._angular_range = max_angle - min_angle
if initial_angle is None:
initial_value = None
elif ((min_angle <= initial_angle <= max_angle) or
(max_angle <= initial_angle <= min_angle)):
initial_value = 2 * ((initial_angle - min_angle) / self._angular_range) - 1
else:
raise OutputDeviceBadValue(
"AngularServo angle must be between %s and %s, or None" %
(min_angle, max_angle))
super(AngularServo, self).__init__(
pin, initial_value, min_pulse_width, max_pulse_width, frame_width,
pin_factory=pin_factory
)
@property
def min_angle(self):
"""
The minimum angle that the servo will rotate to when :meth:`min` is
called.
"""
return self._min_angle
@property
def max_angle(self):
"""
The maximum angle that the servo will rotate to when :meth:`max` is
called.
"""
return self._min_angle + self._angular_range
@property
def angle(self):
"""
The position of the servo as an angle measured in degrees. This will
only be accurate if :attr:`min_angle` and :attr:`max_angle` have been
set appropriately in the constructor.
This can also be the special value :data:`None` indicating that the
servo is currently "uncontrolled", i.e. that no control signal is being
sent. Typically this means the servo's position remains unchanged, but
that it can be moved by hand.
"""
result = self._get_value()
if result is None:
return None
else:
# NOTE: Why round(n, 12) here instead of 14? Angle ranges can be
# much larger than -1..1 so we need a little more rounding to
# smooth off the rough corners!
return round(
self._angular_range *
((result - self._min_value) / self._value_range) +
self._min_angle, 12)
@angle.setter
def angle(self, angle):
if angle is None:
self.value = None
elif ((self.min_angle <= angle <= self.max_angle) or
(self.max_angle <= angle <= self.min_angle)):
self.value = (
self._value_range *
((angle - self._min_angle) / self._angular_range) +
self._min_value)
else:
raise OutputDeviceBadValue(
"AngularServo angle must be between %s and %s, or None" %
(self.min_angle, self.max_angle))
| 35.072687
| 99
| 0.605759
|
ac21fc5be5189a6cb91e747faf0560f9cb8a4def
| 303
|
py
|
Python
|
Beginner/Countdown Timer/countdown.py
|
kaar07/Python-Projects
|
5ab8211c69f7fc5dedf19862a4377b4182411a55
|
[
"Apache-2.0"
] | 6
|
2021-04-20T06:15:41.000Z
|
2021-05-07T17:58:19.000Z
|
Beginner/Countdown Timer/countdown.py
|
kaar07/Python-Projects
|
5ab8211c69f7fc5dedf19862a4377b4182411a55
|
[
"Apache-2.0"
] | 11
|
2021-05-15T12:09:09.000Z
|
2022-03-12T00:57:46.000Z
|
Beginner/Countdown Timer/countdown.py
|
kaar07/Python-Projects
|
5ab8211c69f7fc5dedf19862a4377b4182411a55
|
[
"Apache-2.0"
] | 5
|
2021-05-19T07:40:18.000Z
|
2021-09-25T11:11:59.000Z
|
import time
def countdown(_t):
while _t:
minutes, seconds = divmod(_t, 60)
timer = '{:02d}:{:02d}'.format(minutes, seconds)
print(timer, end="\r")
time.sleep(1)
_t -= 1
print('Time\'s Up!!')
t = input("Enter the time in seconds: ")
countdown(int(t))
| 17.823529
| 56
| 0.544554
|
8fd8a4d0343b547a8d02a845ccf624167ff3f0a5
| 409
|
py
|
Python
|
ferien/__init__.py
|
HazardDede/ferien-api
|
d4659790a9ccfa75cb117b629aac9380c6c6a0e5
|
[
"MIT"
] | 4
|
2020-02-18T12:55:27.000Z
|
2021-07-30T14:29:07.000Z
|
ferien/__init__.py
|
HazardDede/ferien-api
|
d4659790a9ccfa75cb117b629aac9380c6c6a0e5
|
[
"MIT"
] | 3
|
2020-06-22T15:11:50.000Z
|
2020-09-10T20:45:07.000Z
|
ferien/__init__.py
|
HazardDede/ferien-api
|
d4659790a9ccfa75cb117b629aac9380c6c6a0e5
|
[
"MIT"
] | 2
|
2020-09-07T12:44:23.000Z
|
2021-04-15T07:59:49.000Z
|
"""ferien-api public members."""
from .sync_ import (
state_codes,
all_vacations,
state_vacations,
current_vacation,
next_vacation
)
from .async_ import (
all_vacations_async,
state_vacations_async
)
__all__ = [
'state_codes',
'all_vacations',
'all_vacations_async',
'current_vacation',
'next_vacation',
'state_vacations',
'state_vacations_async'
]
| 15.730769
| 32
| 0.674817
|
5031c27a053f2ff2483033162e9a829d517889d5
| 927
|
py
|
Python
|
tests/xml/test_userElement.py
|
plivo/plivo-python
|
81d1ec08bbdaad4eeec907bea2529d44af1e9f16
|
[
"MIT"
] | 42
|
2015-01-16T07:56:16.000Z
|
2021-08-20T04:45:39.000Z
|
tests/xml/test_userElement.py
|
plivo/plivo-python
|
81d1ec08bbdaad4eeec907bea2529d44af1e9f16
|
[
"MIT"
] | 70
|
2015-01-30T04:11:04.000Z
|
2022-03-29T21:04:55.000Z
|
tests/xml/test_userElement.py
|
plivo/plivo-python
|
81d1ec08bbdaad4eeec907bea2529d44af1e9f16
|
[
"MIT"
] | 65
|
2015-04-10T22:17:57.000Z
|
2021-06-06T13:09:31.000Z
|
from unittest import TestCase
from plivo import plivoxml
from tests import PlivoXmlTestCase
class UserElementTest(TestCase, PlivoXmlTestCase):
def test_set_methods(self):
expected_response = '<Response><Dial><User sendDigits="wwww2410" sendOnPreanswer="true" ' \
'sipHeaders="head1=val1,head2=val2">This is Test</User></Dial></Response>'
content = 'This is Test'
send_digits = 'wwww2410'
send_on_preanswer = True
sip_headers = 'head1=val1,head2=val2'
element = plivoxml.ResponseElement()
response = element.add(
plivoxml.DialElement().add(
plivoxml.UserElement(content).set_send_digits(send_digits).set_send_on_preanswer(
send_on_preanswer
).set_sip_headers(sip_headers)
)
).to_string(False)
self.assertXmlEqual(response, expected_response)
| 34.333333
| 102
| 0.647249
|
35f221dfd2b7ac80521b69697ba55aba6619624d
| 1,175
|
py
|
Python
|
tensor2tensor/bin/translate_joint.py
|
cgebe/tensor2tensor
|
8e2389021643774f81a3af643e55a856896aef5c
|
[
"Apache-2.0"
] | null | null | null |
tensor2tensor/bin/translate_joint.py
|
cgebe/tensor2tensor
|
8e2389021643774f81a3af643e55a856896aef5c
|
[
"Apache-2.0"
] | null | null | null |
tensor2tensor/bin/translate_joint.py
|
cgebe/tensor2tensor
|
8e2389021643774f81a3af643e55a856896aef5c
|
[
"Apache-2.0"
] | 1
|
2018-07-26T18:31:58.000Z
|
2018-07-26T18:31:58.000Z
|
import os
TRANSLATE_PROBLEMS = [
"translate_csde_legal32k",
#"translate_csen_legal32k",
#"translate_cses_legal32k",
#"translate_csfr_legal32k",
#"translate_csit_legal32k",
#"translate_cssv_legal32k",
"translate_deen_legal32k",
#"translate_dees_legal32k",
#"translate_defr_legal32k",
#"translate_deit_legal32k",
#"translate_desv_legal32k",
"translate_enes_legal32k",
#"translate_enfr_legal32k",
#"translate_enit_legal32k",
#"translate_ensv_legal32k",
"translate_esfr_legal32k",
#"translate_esit_legal32k",
#"translate_essv_legal32k",
"translate_frit_legal32k",
#"translate_frsv_legal32k",
"translate_itsv_legal32k"
]
def main():
joint = ""
for problem in TRANSLATE_PROBLEMS:
joint += problem+"-"
joint = joint[:-1]
successful = False
while not successful:
print(joint)
cmd = "python ./t2t-trainer --data_dir=$DATA_DIR --output_dir=$TRAIN_DIR/translate/joint-diverse --worker_gpu=4 --train_steps=500000 --model=multi_model --hparams_set=multimodel_legal --problems="+joint
if os.system(cmd) == 0:
successful = True
main()
| 26.704545
| 210
| 0.68766
|
928b5b45805b5ec3a984bcaeffb70b3f504c1d21
| 1,846
|
py
|
Python
|
graph/dijkstra2.py
|
TennielMiao/uab-contest-tools
|
675da96f8ad40533e2402cc2c19b00275fd33e4e
|
[
"MIT"
] | 4
|
2018-01-29T21:35:06.000Z
|
2018-02-17T03:25:44.000Z
|
graph/dijkstra2.py
|
TennielMiao/uab-contest-tools
|
675da96f8ad40533e2402cc2c19b00275fd33e4e
|
[
"MIT"
] | null | null | null |
graph/dijkstra2.py
|
TennielMiao/uab-contest-tools
|
675da96f8ad40533e2402cc2c19b00275fd33e4e
|
[
"MIT"
] | null | null | null |
import math
import heapq
class Node:
def __init__(self, name, distance=math.inf, prev=None):
self.name = name
self.distance = distance
self.prev = prev
def __gt__(self, other):
return self.distance > other.distance
def __lt__(self, other):
return self.distance < other.distance
def visit_node(node, conns):
for conn in conns:
if node in conn:
source = conn.index(node)
target = 1 - source
if conn[target].distance > conn[source].distance + conn[2]:
conn[target].distance = conn[source].distance + conn[2]
conn[target].prev = conn[source]
def dijkstra(target, nodes, connections):
backup = []
for i in nodes:
backup.append(i)
while target in nodes:
heapq.heapify(nodes)
node = heapq.heappop(nodes)
visit_node(node, connections)
current = target.prev
print("Go to", target.name)
while current is not None:
print("from", current.name)
current = current.prev
print("Total distance", target.distance)
conns = []
s = Node("s", 0)
a = Node("a")
b = Node("b")
c = Node("c")
d = Node("d")
e = Node("e")
f = Node("f")
g = Node("g")
h = Node("h")
i = Node("i")
j = Node("j")
k = Node("k")
l = Node("l")
nodes = [a, b, c, d, e, f, g, h, i, j, k, l, s]
conns.append((s, a, 7))
conns.append((a, b, 3))
conns.append((s, b, 2))
conns.append((b, d, 4))
conns.append((d, f, 5))
conns.append((f, h, 3))
conns.append((b, h, 1))
conns.append((h, g, 2))
conns.append((g, e, 20))
conns.append((s, a, 7))
conns.append((s, c, 3))
conns.append((c, l, 2))
conns.append((l, i, 4))
conns.append((l, j, 4))
conns.append((j, i, 6))
conns.append((i, k, 5))
conns.append((j, k, 4))
conns.append((k, e, 5))
dijkstra(e, nodes, conns)
| 22.240964
| 71
| 0.567172
|
cc7ca89edbc80697a22c307a68cbf0c53e4d0a9e
| 62,760
|
py
|
Python
|
google-cloud-sdk/.install/.backup/lib/googlecloudsdk/third_party/appengine/datastore/datastore_pbs.py
|
KaranToor/MA450
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
[
"Apache-2.0"
] | 1
|
2017-11-29T18:52:27.000Z
|
2017-11-29T18:52:27.000Z
|
google-cloud-sdk/.install/.backup/lib/googlecloudsdk/third_party/appengine/datastore/datastore_pbs.py
|
KaranToor/MA450
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-sdk/.install/.backup/lib/googlecloudsdk/third_party/appengine/datastore/datastore_pbs.py
|
KaranToor/MA450
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
[
"Apache-2.0"
] | 1
|
2020-07-25T12:09:01.000Z
|
2020-07-25T12:09:01.000Z
|
# !/usr/bin/python2.4 # pylint: disable=g-unknown-interpreter
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for converting between v3 and v1 datastore protocol buffers.
This module is internal and should not be used by client applications.
"""
# WARNING: This file is externally viewable by our users. All comments from
# this file will be stripped. The docstrings will NOT. Do not put sensitive
# information in docstrings. If you must communicate internal information in
# this source file, please place them in comments only.
# NOTE: This module is heavily based on the Java classes in
# c.g.apphosting.datastore.service. Changes made here are likely to be needed
# there as well.
# pylint: disable=g-bad-name
from googlecloudsdk.third_party.appengine.googlestorage.onestore.v3 import entity_pb
from googlecloudsdk.third_party.appengine.datastore import datastore_v4_pb
from googlecloudsdk.third_party.appengine.datastore import entity_v4_pb
_MIN_CLOUD_DATASTORE_VERSION = (4, 0, 0, 'b1')
_CLOUD_DATASTORE_ENABLED = False
# pylint: disable=g-import-not-at-top,unused-import
try:
from googledatastore import v1beta3
import googledatastore
# We also need to ensure that this is the right version.
if googledatastore.VERSION >= _MIN_CLOUD_DATASTORE_VERSION:
_CLOUD_DATASTORE_ENABLED = True
except ImportError:
pass
except AttributeError:
# Earlier versions of googledatastore do not have a VERSION.
pass
MISSING_CLOUD_DATASTORE_MESSAGE = (
'Could not import googledatastore. This library must be installed with '
'version >= %s to use the Cloud Datastore API.' %
'.'.join([str(v) for v in _MIN_CLOUD_DATASTORE_VERSION]))
# Meanings.
MEANING_ATOM_CATEGORY = 1
MEANING_URL = 2
MEANING_ATOM_TITLE = 3
MEANING_ATOM_CONTENT = 4
MEANING_ATOM_SUMMARY = 5
MEANING_ATOM_AUTHOR = 6
MEANING_NON_RFC_3339_TIMESTAMP = 7
MEANING_GD_EMAIL = 8
MEANING_GEORSS_POINT = 9
MEANING_GD_IM = 10
MEANING_GD_PHONENUMBER = 11
MEANING_GD_POSTALADDRESS = 12
MEANING_PERCENT = 13
MEANING_TEXT = 15
MEANING_BYTESTRING = 16
MEANING_BLOBKEY = 17
MEANING_INDEX_ONLY = 18
MEANING_PREDEFINED_ENTITY_USER = 20
MEANING_PREDEFINED_ENTITY_POINT = 21
MEANING_ZLIB = 22
MEANING_POINT_WITHOUT_V3_MEANING = 23
MEANING_EMPTY_LIST = 24
# URI meanings.
URI_MEANING_ZLIB = 'ZLIB'
# Limits. Must be kept in sync with
# java/com/google/apphosting/datastore/service/common/DatastoreHelper.java.
MAX_URL_CHARS = 2083
MAX_INDEXED_STRING_CHARS = 500
MAX_INDEXED_BLOB_BYTES = 500
MAX_PARTITION_ID_LENGTH = 100
MAX_DATASET_ID_SECTION_LENGTH = 100
# Each app id section can be 100 characters and are separated by a single
# character. Note that this can only do preliminary validation, since each
# partition must also be less than 100 characters.
MAX_DATASET_ID_LENGTH = MAX_DATASET_ID_SECTION_LENGTH * 3 + 2
MAX_KEY_PATH_LENGTH = 100
# Property names for predefined point entities.
PROPERTY_NAME_X = 'x'
PROPERTY_NAME_Y = 'y'
# Property names for predefined user entities.
PROPERTY_NAME_EMAIL = 'email'
PROPERTY_NAME_AUTH_DOMAIN = 'auth_domain'
PROPERTY_NAME_USER_ID = 'user_id'
PROPERTY_NAME_INTERNAL_ID = 'internal_id'
PROPERTY_NAME_FEDERATED_IDENTITY = 'federated_identity'
PROPERTY_NAME_FEDERATED_PROVIDER = 'federated_provider'
# Other property names.
PROPERTY_NAME_KEY = '__key__'
DEFAULT_GAIA_ID = 0
# See com.google.apphosting.datastore.service.common.ValidationUtils.
RFC_3339_MIN_MICROSECONDS_INCLUSIVE = -62135596800 * 1000 * 1000
RFC_3339_MAX_MICROSECONDS_INCLUSIVE = 253402300799 * 1000 * 1000 + 999999
def v4_key_to_string(v4_key):
"""Generates a string representing a key's path.
The output makes no effort to qualify special characters in strings.
The key need not be valid, but if any of the key path elements have
both a name and an ID the name is ignored.
Args:
v4_key: an entity_v4_pb.Key
Returns:
a string representing the key's path
"""
path_element_strings = []
for path_element in v4_key.path_element_list():
if path_element.has_id():
id_or_name = str(path_element.id())
elif path_element.has_name():
id_or_name = path_element.name()
else:
id_or_name = ''
path_element_strings.append('%s: %s' % (path_element.kind(), id_or_name))
return '[%s]' % ', '.join(path_element_strings)
def is_complete_v4_key(v4_key):
"""Returns True if a key specifies an ID or name, False otherwise.
Args:
v4_key: an entity_v4_pb.Key
Returns:
True if the key specifies an ID or name, False otherwise.
"""
assert len(v4_key.path_element_list()) >= 1
last_element = v4_key.path_element(len(v4_key.path_element_list()) - 1)
return last_element.has_id() or last_element.has_name()
def v1_key_to_string(v1_key):
"""Generates a string representing a key's path.
The output makes no effort to qualify special characters in strings.
The key need not be valid, but if any of the key path elements have
both a name and an ID the name is ignored.
Args:
v1_key: an googledatastore.Key
Returns:
a string representing the key's path
"""
path_element_strings = []
for path_element in v1_key.path:
field = path_element.WhichOneof('id_type')
if field == 'id':
id_or_name = str(path_element.id)
elif field == 'name':
id_or_name = path_element.name
else:
id_or_name = ''
path_element_strings.append('%s: %s' % (path_element.kind, id_or_name))
return '[%s]' % ', '.join(path_element_strings)
def is_complete_v1_key(v1_key):
"""Returns True if a key specifies an ID or name, False otherwise.
Args:
v1_key: an googledatastore.Key
Returns:
True if the key specifies an ID or name, False otherwise.
"""
assert len(v1_key.path) >= 1
last_element = v1_key.path[len(v1_key.path) - 1]
return last_element.WhichOneof('id_type') is not None
def is_complete_v3_key(v3_key):
"""Returns True if a key specifies an ID or name, False otherwise.
Args:
v3_key: a datastore_pb.Reference
Returns:
True if the key specifies an ID or name, False otherwise.
"""
assert v3_key.path().element_size() >= 1
last_element = v3_key.path().element_list()[-1]
return ((last_element.has_id() and last_element.id() != 0) or
(last_element.has_name() and last_element.name() != ''))
def get_v1_mutation_key_and_entity(v1_mutation):
"""Returns the v1 key and entity for a v1 mutation proto, if applicable.
Args:
v1_mutation: a googledatastore.Mutation
Returns:
a tuple (googledatastore.Key for this mutation,
googledatastore.Entity or None if the mutation is a deletion)
"""
if v1_mutation.HasField('delete'):
return v1_mutation.delete, None
else:
v1_entity = getattr(v1_mutation, v1_mutation.WhichOneof('operation'))
return v1_entity.key, v1_entity
def is_valid_utf8(s):
if isinstance(s, unicode):
return True
try:
s.decode('utf-8')
return True
except UnicodeDecodeError:
return False
def check_conversion(condition, message):
"""Asserts a conversion condition and raises an error if it's not met.
Args:
condition: (boolean) condition to enforce
message: error message
Raises:
InvalidConversionError: if condition is not met
"""
if not condition:
raise InvalidConversionError(message)
def is_in_rfc_3339_bounds(microseconds):
return (RFC_3339_MIN_MICROSECONDS_INCLUSIVE <= microseconds
<= RFC_3339_MAX_MICROSECONDS_INCLUSIVE)
# TODO(user): Move into datastore_errors?
class InvalidConversionError(Exception):
"""Raised when conversion fails."""
pass
class IdResolver(object):
"""A class that can handle project id <--> application id transformations."""
def __init__(self, app_ids=()):
"""Create a new IdResolver.
Args:
app_ids: A list of application ids with application id shard set. i.e.
s~my_app or e~my_app.
"""
resolver_map = {}
for app_id in app_ids:
resolver_map[self.resolve_project_id(app_id)] = app_id
self._resolver_map = resolver_map
def resolve_project_id(self, app_id):
"""Converts an application id to a project id.
Args:
app_id: The application id.
Returns:
The project id.
"""
return app_id.rsplit('~')[-1]
def resolve_app_id(self, project_id):
"""Converts a project id to an application id.
Args:
project_id: The project id.
Returns:
The application id.
Raises:
InvalidConversionError: if the application is unknown for the project id.
"""
check_conversion(project_id in self._resolver_map,
'Cannot determine application id for provided project id: '
'"%s".'
% project_id)
return self._resolver_map[project_id]
class _IdentityIdResolver(IdResolver):
"""An IdResolver that resolve app_id == project_id."""
def resolve_project_id(self, app_id):
return app_id
def resolve_app_id(self, project_id):
return project_id
class _EntityConverter(object):
"""Converter for entities and keys."""
def __init__(self, id_resolver):
"""Creates a new EntityConverter.
Args:
id_resolver: an IdResolver object for converting
project_id <--> application_id
"""
self._id_resolver = id_resolver
def v4_to_v3_reference(self, v4_key, v3_ref):
"""Converts a v4 Key to a v3 Reference.
Args:
v4_key: an entity_v4_pb.Key
v3_ref: an entity_pb.Reference to populate
"""
v3_ref.Clear()
if v4_key.has_partition_id():
if v4_key.partition_id().has_dataset_id():
v3_ref.set_app(v4_key.partition_id().dataset_id())
if v4_key.partition_id().has_namespace():
v3_ref.set_name_space(v4_key.partition_id().namespace())
for v4_element in v4_key.path_element_list():
v3_element = v3_ref.mutable_path().add_element()
v3_element.set_type(v4_element.kind())
if v4_element.has_id():
v3_element.set_id(v4_element.id())
if v4_element.has_name():
v3_element.set_name(v4_element.name())
def v4_to_v3_references(self, v4_keys):
"""Converts a list of v4 Keys to a list of v3 References.
Args:
v4_keys: a list of entity_v4_pb.Key objects
Returns:
a list of entity_pb.Reference objects
"""
v3_refs = []
for v4_key in v4_keys:
v3_ref = entity_pb.Reference()
self.v4_to_v3_reference(v4_key, v3_ref)
v3_refs.append(v3_ref)
return v3_refs
def v3_to_v4_key(self, v3_ref, v4_key):
"""Converts a v3 Reference to a v4 Key.
Args:
v3_ref: an entity_pb.Reference
v4_key: an entity_v4_pb.Key to populate
"""
v4_key.Clear()
if not v3_ref.app():
return
v4_key.mutable_partition_id().set_dataset_id(v3_ref.app())
if v3_ref.name_space():
v4_key.mutable_partition_id().set_namespace(v3_ref.name_space())
for v3_element in v3_ref.path().element_list():
v4_element = v4_key.add_path_element()
v4_element.set_kind(v3_element.type())
if v3_element.has_id():
v4_element.set_id(v3_element.id())
if v3_element.has_name():
v4_element.set_name(v3_element.name())
def v3_to_v4_keys(self, v3_refs):
"""Converts a list of v3 References to a list of v4 Keys.
Args:
v3_refs: a list of entity_pb.Reference objects
Returns:
a list of entity_v4_pb.Key objects
"""
v4_keys = []
for v3_ref in v3_refs:
v4_key = entity_v4_pb.Key()
self.v3_to_v4_key(v3_ref, v4_key)
v4_keys.append(v4_key)
return v4_keys
def v4_to_v3_entity(self, v4_entity, v3_entity, is_projection=False):
"""Converts a v4 Entity to a v3 EntityProto.
Args:
v4_entity: an entity_v4_pb.Entity
v3_entity: an entity_pb.EntityProto to populate
is_projection: True if the v4_entity is from a projection query.
"""
v3_entity.Clear()
for v4_property in v4_entity.property_list():
property_name = v4_property.name()
v4_value = v4_property.value()
if v4_value.list_value_list():
for v4_sub_value in v4_value.list_value_list():
self.__add_v3_property_from_v4(
property_name, True, is_projection, v4_sub_value, v3_entity)
else:
self.__add_v3_property_from_v4(
property_name, False, is_projection, v4_value, v3_entity)
if v4_entity.has_key():
v4_key = v4_entity.key()
self.v4_to_v3_reference(v4_key, v3_entity.mutable_key())
v3_ref = v3_entity.key()
self.v3_reference_to_group(v3_ref, v3_entity.mutable_entity_group())
else:
# Do NOT set v3_entity.key or .entity_group, even though they
# are required.
pass
def v3_to_v4_entity(self, v3_entity, v4_entity):
"""Converts a v3 EntityProto to a v4 Entity.
Args:
v3_entity: an entity_pb.EntityProto
v4_entity: an entity_v4_pb.Proto to populate
"""
v4_entity.Clear()
self.v3_to_v4_key(v3_entity.key(), v4_entity.mutable_key())
if not v3_entity.key().has_app():
# Irreversible: v3_entity.key will change from empty to unset.
v4_entity.clear_key()
# v3_entity.entity_group is redundant.
# Ignore v3_entity.owner.
# Ignore v3_entity.kind.
# Ignore v3_entity.kind_url.
v4_properties = {}
for v3_property in v3_entity.property_list():
self.__add_v4_property_to_entity(v4_entity, v4_properties, v3_property,
True)
for v3_property in v3_entity.raw_property_list():
self.__add_v4_property_to_entity(v4_entity, v4_properties, v3_property,
False)
def v4_value_to_v3_property_value(self, v4_value, v3_value):
"""Converts a v4 Value to a v3 PropertyValue.
Args:
v4_value: an entity_v4_pb.Value
v3_value: an entity_pb.PropertyValue to populate
"""
v3_value.Clear()
if v4_value.has_boolean_value():
v3_value.set_booleanvalue(v4_value.boolean_value())
elif v4_value.has_integer_value():
v3_value.set_int64value(v4_value.integer_value())
elif v4_value.has_double_value():
v3_value.set_doublevalue(v4_value.double_value())
elif v4_value.has_timestamp_microseconds_value():
v3_value.set_int64value(v4_value.timestamp_microseconds_value())
elif v4_value.has_key_value():
v3_ref = entity_pb.Reference()
self.v4_to_v3_reference(v4_value.key_value(), v3_ref)
self.v3_reference_to_v3_property_value(v3_ref, v3_value)
elif v4_value.has_blob_key_value():
v3_value.set_stringvalue(v4_value.blob_key_value())
elif v4_value.has_string_value():
v3_value.set_stringvalue(v4_value.string_value())
elif v4_value.has_blob_value():
v3_value.set_stringvalue(v4_value.blob_value())
elif v4_value.has_entity_value():
v4_entity_value = v4_value.entity_value()
v4_meaning = v4_value.meaning()
if (v4_meaning == MEANING_GEORSS_POINT
or v4_meaning == MEANING_PREDEFINED_ENTITY_POINT):
self.__v4_to_v3_point_value(v4_entity_value,
v3_value.mutable_pointvalue())
elif v4_meaning == MEANING_PREDEFINED_ENTITY_USER:
self.v4_entity_to_v3_user_value(v4_entity_value,
v3_value.mutable_uservalue())
else:
v3_entity_value = entity_pb.EntityProto()
self.v4_to_v3_entity(v4_entity_value, v3_entity_value)
v3_value.set_stringvalue(v3_entity_value.SerializePartialToString())
elif v4_value.has_geo_point_value():
point_value = v3_value.mutable_pointvalue()
point_value.set_x(v4_value.geo_point_value().latitude())
point_value.set_y(v4_value.geo_point_value().longitude())
else:
# Null value; do nothing.
pass
def v3_property_to_v4_value(self, v3_property, indexed, v4_value):
"""Converts a v3 Property to a v4 Value.
Args:
v3_property: an entity_pb.Property
indexed: whether the v3 property is indexed
v4_value: an entity_v4_pb.Value to populate
"""
v4_value.Clear()
v3_property_value = v3_property.value()
v3_meaning = v3_property.meaning()
v3_uri_meaning = None
if v3_property.meaning_uri():
v3_uri_meaning = v3_property.meaning_uri()
if not self.__is_v3_property_value_union_valid(v3_property_value):
# Irreversible: Value with 2+ types will reduce to 1 type and discard
# meanings.
v3_meaning = None
v3_uri_meaning = None
elif v3_meaning == entity_pb.Property.NO_MEANING:
v3_meaning = None
elif not self.__is_v3_property_value_meaning_valid(v3_property_value,
v3_meaning):
# Irreversible: Invalid meaning will be discarded.
v3_meaning = None
is_zlib_value = False
if v3_uri_meaning:
if v3_uri_meaning == URI_MEANING_ZLIB:
if v3_property_value.has_stringvalue():
is_zlib_value = True
if v3_meaning != entity_pb.Property.BLOB:
# Irreversible: Meaning will be lost.
v3_meaning = entity_pb.Property.BLOB
else:
pass # Irreversible: Zlib uri meaning will be lost.
else:
pass # Irreversible: Non-zlib uri meaning will be lost.
# Copy the typed value, if present.
if v3_property_value.has_booleanvalue():
v4_value.set_boolean_value(v3_property_value.booleanvalue())
elif v3_property_value.has_int64value():
if v3_meaning == entity_pb.Property.GD_WHEN:
v4_value.set_timestamp_microseconds_value(
v3_property_value.int64value())
v3_meaning = None
else:
v4_value.set_integer_value(v3_property_value.int64value())
elif v3_property_value.has_doublevalue():
v4_value.set_double_value(v3_property_value.doublevalue())
elif v3_property_value.has_referencevalue():
v3_ref = entity_pb.Reference()
self.__v3_reference_value_to_v3_reference(
v3_property_value.referencevalue(), v3_ref)
self.v3_to_v4_key(v3_ref, v4_value.mutable_key_value())
elif v3_property_value.has_stringvalue():
if v3_meaning == entity_pb.Property.ENTITY_PROTO:
serialized_entity_v3 = v3_property_value.stringvalue()
v3_entity = entity_pb.EntityProto()
# The v3 entity may have been serialized without a key, hence the
# partial parse.
v3_entity.ParsePartialFromString(serialized_entity_v3)
self.v3_to_v4_entity(v3_entity, v4_value.mutable_entity_value())
v3_meaning = None
elif (v3_meaning == entity_pb.Property.BLOB
or v3_meaning == entity_pb.Property.BYTESTRING):
v4_value.set_blob_value(v3_property_value.stringvalue())
# Only preserve meaning for unindexed BYTESTRING.
if indexed or v3_meaning == entity_pb.Property.BLOB:
v3_meaning = None
else:
string_value = v3_property_value.stringvalue()
if is_valid_utf8(string_value):
if v3_meaning == entity_pb.Property.BLOBKEY:
v4_value.set_blob_key_value(string_value)
v3_meaning = None
else:
v4_value.set_string_value(string_value)
else:
# The "string" is not a valid utf8 string. Convert it to a blob.
v4_value.set_blob_value(string_value)
# Discard the meaning with the conversion, except meaning index.
if v3_meaning != entity_pb.Property.INDEX_VALUE:
v3_meaning = None
# Irreversible: Non-utf8 "string" will change to blob (and lose
# meaning).
elif v3_property_value.has_pointvalue():
if v3_meaning == MEANING_GEORSS_POINT:
point_value = v3_property_value.pointvalue()
v4_value.mutable_geo_point_value().set_latitude(point_value.x())
v4_value.mutable_geo_point_value().set_longitude(point_value.y())
else:
self.__v3_to_v4_point_entity(v3_property_value.pointvalue(),
v4_value.mutable_entity_value())
v4_value.set_meaning(MEANING_PREDEFINED_ENTITY_POINT)
v3_meaning = None
elif v3_property_value.has_uservalue():
self.v3_user_value_to_v4_entity(v3_property_value.uservalue(),
v4_value.mutable_entity_value())
v4_value.set_meaning(MEANING_PREDEFINED_ENTITY_USER)
v3_meaning = None
else:
pass # v3 value is null. Leave v4 value null.
if is_zlib_value:
v4_value.set_meaning(MEANING_ZLIB)
elif v3_meaning:
v4_value.set_meaning(v3_meaning)
# Set v4 indexed only if the current (default) v4 value is unhelpful.
if indexed != v4_value.indexed():
v4_value.set_indexed(indexed)
def v4_to_v3_property(self, property_name, is_multi, is_projection,
v4_value, v3_property):
"""Converts info from a v4 Property to a v3 Property.
v4_value must not have a list_value.
Args:
property_name: the name of the property
is_multi: whether the property contains multiple values
is_projection: whether the property is projected
v4_value: an entity_v4_pb.Value
v3_property: an entity_pb.Property to populate
"""
assert not v4_value.list_value_list(), 'v4 list_value not convertable to v3'
v3_property.Clear()
v3_property.set_name(property_name)
if v4_value.has_meaning() and v4_value.meaning() == MEANING_EMPTY_LIST:
v3_property.set_meaning(MEANING_EMPTY_LIST)
v3_property.set_multiple(False)
v3_property.mutable_value()
return
v3_property.set_multiple(is_multi)
self.v4_value_to_v3_property_value(v4_value, v3_property.mutable_value())
v4_meaning = None
if v4_value.has_meaning():
v4_meaning = v4_value.meaning()
if v4_value.has_timestamp_microseconds_value():
v3_property.set_meaning(entity_pb.Property.GD_WHEN)
elif v4_value.has_blob_key_value():
v3_property.set_meaning(entity_pb.Property.BLOBKEY)
elif v4_value.has_blob_value():
if v4_meaning == MEANING_ZLIB:
v3_property.set_meaning_uri(URI_MEANING_ZLIB)
if v4_meaning == entity_pb.Property.BYTESTRING:
if v4_value.indexed():
pass
# Irreversible: Blob with redundant byte string meaning will lose it
# when converted back to v4.
else:
if v4_value.indexed():
v3_property.set_meaning(entity_pb.Property.BYTESTRING)
else:
v3_property.set_meaning(entity_pb.Property.BLOB)
v4_meaning = None
elif v4_value.has_entity_value():
if v4_meaning != MEANING_GEORSS_POINT:
if (v4_meaning != MEANING_PREDEFINED_ENTITY_POINT
and v4_meaning != MEANING_PREDEFINED_ENTITY_USER):
v3_property.set_meaning(entity_pb.Property.ENTITY_PROTO)
v4_meaning = None
elif v4_value.has_geo_point_value():
v3_property.set_meaning(MEANING_GEORSS_POINT)
else:
# Null or primitive value; do nothing.
pass
if v4_meaning is not None:
v3_property.set_meaning(v4_meaning)
# If the value is in a projection, we should override the meaning.
if is_projection:
v3_property.set_meaning(entity_pb.Property.INDEX_VALUE)
def __add_v3_property_from_v4(self, property_name, is_multi, is_projection,
v4_value, v3_entity):
"""Adds a v3 Property to an Entity based on information from a v4 Property.
Args:
property_name: the name of the property
is_multi: whether the property contains multiple values
is_projection: whether the property is a projection
v4_value: an entity_v4_pb.Value
v3_entity: an entity_pb.EntityProto
"""
if v4_value.indexed():
self.v4_to_v3_property(property_name, is_multi, is_projection,
v4_value, v3_entity.add_property())
else:
self.v4_to_v3_property(property_name, is_multi, is_projection,
v4_value, v3_entity.add_raw_property())
def __build_name_to_v4_property_map(self, v4_entity):
property_map = {}
for prop in v4_entity.property_list():
property_map[prop.name()] = prop
return property_map
def __add_v4_property_to_entity(self, v4_entity, property_map, v3_property,
indexed):
"""Adds a v4 Property to an entity or modifies an existing one.
property_map is used to track of properties that have already been added.
The same dict should be used for all of an entity's properties.
Args:
v4_entity: an entity_v4_pb.Entity
property_map: a dict of name -> v4_property
v3_property: an entity_pb.Property to convert to v4 and add to the dict
indexed: whether the property is indexed
"""
property_name = v3_property.name()
if property_name in property_map:
v4_property = property_map[property_name]
else:
v4_property = v4_entity.add_property()
v4_property.set_name(property_name)
property_map[property_name] = v4_property
if v3_property.multiple():
self.v3_property_to_v4_value(v3_property, indexed,
v4_property.mutable_value().add_list_value())
else:
self.v3_property_to_v4_value(v3_property, indexed,
v4_property.mutable_value())
def __get_v4_integer_value(self, v4_property):
"""Returns an integer value from a v4 Property.
Args:
v4_property: an entity_v4_pb.Property
Returns:
an integer
Raises:
InvalidConversionError: if the property doesn't contain an integer value
"""
check_conversion(v4_property.value().has_integer_value(),
'Property does not contain an integer value.')
return v4_property.value().integer_value()
def __get_v4_double_value(self, v4_property):
"""Returns a double value from a v4 Property.
Args:
v4_property: an entity_v4_pb.Property
Returns:
a double
Raises:
InvalidConversionError: if the property doesn't contain a double value
"""
check_conversion(v4_property.value().has_double_value(),
'Property does not contain a double value.')
return v4_property.value().double_value()
def __get_v4_string_value(self, v4_property):
"""Returns an string value from a v4 Property.
Args:
v4_property: an entity_v4_pb.Property
Returns:
a string
Throws:
InvalidConversionError: if the property doesn't contain a string value
"""
check_conversion(v4_property.value().has_string_value(),
'Property does not contain a string value.')
return v4_property.value().string_value()
def __v4_integer_property(self, name, value, indexed):
"""Creates a single-integer-valued v4 Property.
Args:
name: the property name
value: the integer value of the property
indexed: whether the value should be indexed
Returns:
an entity_v4_pb.Property
"""
v4_property = entity_v4_pb.Property()
v4_property.set_name(name)
v4_value = v4_property.mutable_value()
v4_value.set_indexed(indexed)
v4_value.set_integer_value(value)
return v4_property
def __v4_double_property(self, name, value, indexed):
"""Creates a single-double-valued v4 Property.
Args:
name: the property name
value: the double value of the property
indexed: whether the value should be indexed
Returns:
an entity_v4_pb.Property
"""
v4_property = entity_v4_pb.Property()
v4_property.set_name(name)
v4_value = v4_property.mutable_value()
v4_value.set_indexed(indexed)
v4_value.set_double_value(value)
return v4_property
def __v4_string_property(self, name, value, indexed):
"""Creates a single-string-valued v4 Property.
Args:
name: the property name
value: the string value of the property
indexed: whether the value should be indexed
Returns:
an entity_v4_pb.Property
"""
v4_property = entity_v4_pb.Property()
v4_property.set_name(name)
v4_value = v4_property.mutable_value()
v4_value.set_indexed(indexed)
v4_value.set_string_value(value)
return v4_property
def __v4_to_v3_point_value(self, v4_point_entity, v3_point_value):
"""Converts a v4 point Entity to a v3 PointValue.
Args:
v4_point_entity: an entity_v4_pb.Entity representing a point
v3_point_value: an entity_pb.Property_PointValue to populate
"""
v3_point_value.Clear()
name_to_v4_property = self.__build_name_to_v4_property_map(v4_point_entity)
v3_point_value.set_x(
self.__get_v4_double_value(name_to_v4_property['x']))
v3_point_value.set_y(
self.__get_v4_double_value(name_to_v4_property['y']))
def __v3_to_v4_point_entity(self, v3_point_value, v4_entity):
"""Converts a v3 UserValue to a v4 user Entity.
Args:
v3_point_value: an entity_pb.Property_PointValue
v4_entity: an entity_v4_pb.Entity to populate
"""
v4_entity.Clear()
v4_entity.property_list().append(
self.__v4_double_property(PROPERTY_NAME_X, v3_point_value.x(), False))
v4_entity.property_list().append(
self.__v4_double_property(PROPERTY_NAME_Y, v3_point_value.y(), False))
def v4_entity_to_v3_user_value(self, v4_user_entity, v3_user_value):
"""Converts a v4 user Entity to a v3 UserValue.
Args:
v4_user_entity: an entity_v4_pb.Entity representing a user
v3_user_value: an entity_pb.Property_UserValue to populate
"""
v3_user_value.Clear()
name_to_v4_property = self.__build_name_to_v4_property_map(v4_user_entity)
# Email and auth domain are required to be present in the v4 entity.
v3_user_value.set_email(self.__get_v4_string_value(
name_to_v4_property[PROPERTY_NAME_EMAIL]))
v3_user_value.set_auth_domain(self.__get_v4_string_value(
name_to_v4_property[PROPERTY_NAME_AUTH_DOMAIN]))
if PROPERTY_NAME_USER_ID in name_to_v4_property:
v3_user_value.set_obfuscated_gaiaid(
self.__get_v4_string_value(
name_to_v4_property[PROPERTY_NAME_USER_ID]))
if PROPERTY_NAME_INTERNAL_ID in name_to_v4_property:
v3_user_value.set_gaiaid(self.__get_v4_integer_value(
name_to_v4_property[PROPERTY_NAME_INTERNAL_ID]))
else:
# Field gaiaid is required. Set it to a default value.
v3_user_value.set_gaiaid(0)
if PROPERTY_NAME_FEDERATED_IDENTITY in name_to_v4_property:
v3_user_value.set_federated_identity(
self.__get_v4_string_value(name_to_v4_property[
PROPERTY_NAME_FEDERATED_IDENTITY]))
if PROPERTY_NAME_FEDERATED_PROVIDER in name_to_v4_property:
v3_user_value.set_federated_provider(
self.__get_v4_string_value(name_to_v4_property[
PROPERTY_NAME_FEDERATED_PROVIDER]))
def v3_user_value_to_v4_entity(self, v3_user_value, v4_entity):
"""Converts a v3 UserValue to a v4 user Entity.
Args:
v3_user_value: an entity_pb.Property_UserValue
v4_entity: an entity_v4_pb.Entity to populate
"""
v4_entity.Clear()
v4_entity.property_list().append(
self.__v4_string_property(PROPERTY_NAME_EMAIL, v3_user_value.email(),
False))
v4_entity.property_list().append(self.__v4_string_property(
PROPERTY_NAME_AUTH_DOMAIN,
v3_user_value.auth_domain(), False))
# Ignore nickname.
if v3_user_value.gaiaid() != 0:
v4_entity.property_list().append(self.__v4_integer_property(
PROPERTY_NAME_INTERNAL_ID,
v3_user_value.gaiaid(),
False))
if v3_user_value.has_obfuscated_gaiaid():
v4_entity.property_list().append(self.__v4_string_property(
PROPERTY_NAME_USER_ID,
v3_user_value.obfuscated_gaiaid(),
False))
if v3_user_value.has_federated_identity():
v4_entity.property_list().append(self.__v4_string_property(
PROPERTY_NAME_FEDERATED_IDENTITY,
v3_user_value.federated_identity(),
False))
if v3_user_value.has_federated_provider():
v4_entity.property_list().append(self.__v4_string_property(
PROPERTY_NAME_FEDERATED_PROVIDER,
v3_user_value.federated_provider(),
False))
def v1_to_v3_reference(self, v1_key, v3_ref):
"""Converts a v1 Key to a v3 Reference.
Args:
v1_key: an googledatastore.Key
v3_ref: an entity_pb.Reference to populate
"""
v3_ref.Clear()
if v1_key.HasField('partition_id'):
project_id = v1_key.partition_id.project_id
if project_id:
app_id = self._id_resolver.resolve_app_id(project_id)
v3_ref.set_app(app_id)
if v1_key.partition_id.namespace_id:
v3_ref.set_name_space(v1_key.partition_id.namespace_id)
for v1_element in v1_key.path:
v3_element = v3_ref.mutable_path().add_element()
v3_element.set_type(v1_element.kind.encode('utf-8'))
id_type = v1_element.WhichOneof('id_type')
if id_type == 'id':
v3_element.set_id(v1_element.id)
elif id_type == 'name':
v3_element.set_name(v1_element.name.encode('utf-8'))
def v1_to_v3_references(self, v1_keys):
"""Converts a list of v1 Keys to a list of v3 References.
Args:
v1_keys: a list of googledatastore.Key objects
Returns:
a list of entity_pb.Reference objects
"""
v3_refs = []
for v1_key in v1_keys:
v3_ref = entity_pb.Reference()
self.v1_to_v3_reference(v1_key, v3_ref)
v3_refs.append(v3_ref)
return v3_refs
def v3_to_v1_key(self, v3_ref, v1_key):
"""Converts a v3 Reference to a v1 Key.
Args:
v3_ref: an entity_pb.Reference
v1_key: an googledatastore.Key to populate
"""
v1_key.Clear()
if not v3_ref.app():
return
project_id = self._id_resolver.resolve_project_id(v3_ref.app())
v1_key.partition_id.project_id = project_id
if v3_ref.name_space():
v1_key.partition_id.namespace_id = v3_ref.name_space()
for v3_element in v3_ref.path().element_list():
v1_element = v1_key.path.add()
v1_element.kind = v3_element.type()
if v3_element.has_id():
v1_element.id = v3_element.id()
if v3_element.has_name():
v1_element.name = v3_element.name()
def v3_to_v1_keys(self, v3_refs):
"""Converts a list of v3 References to a list of v1 Keys.
Args:
v3_refs: a list of entity_pb.Reference objects
Returns:
a list of googledatastore.Key objects
"""
v1_keys = []
for v3_ref in v3_refs:
v1_key = googledatastore.Key()
self.v3_to_v1_key(v3_ref, v1_key)
v1_keys.append(v1_key)
return v1_keys
def project_to_app_id(self, project_id):
"""Converts a string project id to a string app id."""
return self._id_resolver.resolve_app_id(project_id)
def app_to_project_id(self, app_id):
"""Converts a string app id to a string project id."""
return self._id_resolver.resolve_project_id(app_id)
def __new_v3_property(self, v3_entity, is_indexed):
if is_indexed:
return v3_entity.add_property()
else:
return v3_entity.add_raw_property()
def v1_to_v3_entity(self, v1_entity, v3_entity, is_projection=False):
"""Converts a v1 Entity to a v3 EntityProto.
Args:
v1_entity: an googledatastore.Entity
v3_entity: an entity_pb.EntityProto to populate
is_projection: True if the v1_entity is from a projection query.
"""
v3_entity.Clear()
for property_name, v1_value in v1_entity.properties.iteritems():
if v1_value.HasField('array_value'):
if len(v1_value.array_value.values) == 0:
empty_list = self.__new_v3_property(v3_entity,
not v1_value.exclude_from_indexes)
empty_list.set_name(property_name.encode('utf-8'))
empty_list.set_multiple(False)
empty_list.set_meaning(MEANING_EMPTY_LIST)
empty_list.mutable_value()
else:
for v1_sub_value in v1_value.array_value.values:
list_element = self.__new_v3_property(
v3_entity, not v1_sub_value.exclude_from_indexes)
self.v1_to_v3_property(
property_name, True, is_projection, v1_sub_value, list_element)
else:
value_property = self.__new_v3_property(
v3_entity, not v1_value.exclude_from_indexes)
self.v1_to_v3_property(
property_name, False, is_projection, v1_value, value_property)
if v1_entity.HasField('key'):
v1_key = v1_entity.key
self.v1_to_v3_reference(v1_key, v3_entity.mutable_key())
v3_ref = v3_entity.key()
self.v3_reference_to_group(v3_ref, v3_entity.mutable_entity_group())
else:
# Do NOT set v3_entity.key or .entity_group, even though they
# are required.
pass
def v3_to_v1_entity(self, v3_entity, v1_entity):
"""Converts a v3 EntityProto to a v1 Entity.
Args:
v3_entity: an entity_pb.EntityProto
v1_entity: an googledatastore.Proto to populate
"""
v1_entity.Clear()
self.v3_to_v1_key(v3_entity.key(), v1_entity.key)
if not v3_entity.key().has_app():
# Irreversible: v3_entity.key will change from empty to unset.
v1_entity.ClearField('key')
# v3_entity.entity_group is redundant.
# Ignore v3_entity.owner.
# Ignore v3_entity.kind.
# Ignore v3_entity.kind_url.
for v3_property in v3_entity.property_list():
self.__add_v1_property_to_entity(v1_entity, v3_property, True)
for v3_property in v3_entity.raw_property_list():
self.__add_v1_property_to_entity(v1_entity, v3_property, False)
def v1_value_to_v3_property_value(self, v1_value, v3_value):
"""Converts a v1 Value to a v3 PropertyValue.
Args:
v1_value: an googledatastore.Value
v3_value: an entity_pb.PropertyValue to populate
"""
v3_value.Clear()
field = v1_value.WhichOneof('value_type')
if field == 'boolean_value':
v3_value.set_booleanvalue(v1_value.boolean_value)
elif field == 'integer_value':
v3_value.set_int64value(v1_value.integer_value)
elif field == 'double_value':
v3_value.set_doublevalue(v1_value.double_value)
elif field == 'timestamp_value':
v3_value.set_int64value(
googledatastore.helper.micros_from_timestamp(
v1_value.timestamp_value))
elif field == 'key_value':
v3_ref = entity_pb.Reference()
self.v1_to_v3_reference(v1_value.key_value, v3_ref)
self.v3_reference_to_v3_property_value(v3_ref, v3_value)
elif field == 'string_value':
v3_value.set_stringvalue(v1_value.string_value.encode('utf-8'))
elif field == 'blob_value':
v3_value.set_stringvalue(v1_value.blob_value)
elif field == 'entity_value':
v1_entity_value = v1_value.entity_value
v1_meaning = v1_value.meaning
if v1_meaning == MEANING_PREDEFINED_ENTITY_USER:
self.v1_entity_to_v3_user_value(v1_entity_value,
v3_value.mutable_uservalue())
else:
v3_entity_value = entity_pb.EntityProto()
self.v1_to_v3_entity(v1_entity_value, v3_entity_value)
v3_value.set_stringvalue(v3_entity_value.SerializePartialToString())
elif field == 'geo_point_value':
point_value = v3_value.mutable_pointvalue()
point_value.set_x(v1_value.geo_point_value.latitude)
point_value.set_y(v1_value.geo_point_value.longitude)
elif field == 'null_value':
pass
else:
# No value set
pass
def v3_property_to_v1_value(self, v3_property, indexed, v1_value):
"""Converts a v3 Property to a v1 Value.
Args:
v3_property: an entity_pb.Property
indexed: whether the v3 property is indexed
v1_value: an googledatastore.Value to populate
"""
v1_value.Clear()
v3_property_value = v3_property.value()
v3_meaning = v3_property.meaning()
v3_uri_meaning = None
if v3_property.meaning_uri():
v3_uri_meaning = v3_property.meaning_uri()
if not self.__is_v3_property_value_union_valid(v3_property_value):
# Irreversible: Value with 2+ types will reduce to 1 type and discard
# meanings.
v3_meaning = None
v3_uri_meaning = None
elif v3_meaning == entity_pb.Property.NO_MEANING:
v3_meaning = None
elif not self.__is_v3_property_value_meaning_valid(v3_property_value,
v3_meaning):
# Irreversible: Invalid meaning will be discarded.
v3_meaning = None
is_zlib_value = False
if v3_uri_meaning:
if v3_uri_meaning == URI_MEANING_ZLIB:
if v3_property_value.has_stringvalue():
is_zlib_value = True
if v3_meaning != entity_pb.Property.BLOB:
# Irreversible: Meaning will be lost.
v3_meaning = entity_pb.Property.BLOB
else:
pass # Irreversible: Zlib uri meaning will be lost.
else:
pass # Irreversible: Non-zlib uri meaning will be lost.
# Copy the typed value, if present.
if v3_property.meaning() == entity_pb.Property.EMPTY_LIST:
v1_value.array_value.values.extend([])
v3_meaning = None
elif v3_property_value.has_booleanvalue():
v1_value.boolean_value = v3_property_value.booleanvalue()
elif v3_property_value.has_int64value():
if (v3_meaning == entity_pb.Property.GD_WHEN
and is_in_rfc_3339_bounds(v3_property_value.int64value())):
googledatastore.helper.micros_to_timestamp(
v3_property_value.int64value(), v1_value.timestamp_value)
v3_meaning = None
else:
v1_value.integer_value = v3_property_value.int64value()
elif v3_property_value.has_doublevalue():
v1_value.double_value = v3_property_value.doublevalue()
elif v3_property_value.has_referencevalue():
v3_ref = entity_pb.Reference()
self.__v3_reference_value_to_v3_reference(
v3_property_value.referencevalue(), v3_ref)
self.v3_to_v1_key(v3_ref, v1_value.key_value)
elif v3_property_value.has_stringvalue():
if v3_meaning == entity_pb.Property.ENTITY_PROTO:
serialized_entity_v3 = v3_property_value.stringvalue()
v3_entity = entity_pb.EntityProto()
# The v3 entity may have been serialized without a key, hence the
# partial parse.
v3_entity.ParsePartialFromString(serialized_entity_v3)
self.v3_to_v1_entity(v3_entity, v1_value.entity_value)
v3_meaning = None
elif (v3_meaning == entity_pb.Property.BLOB
or v3_meaning == entity_pb.Property.BYTESTRING):
v1_value.blob_value = v3_property_value.stringvalue()
# Only preserve meaning for unindexed BYTESTRING.
if indexed or v3_meaning == entity_pb.Property.BLOB:
v3_meaning = None
else:
string_value = v3_property_value.stringvalue()
if is_valid_utf8(string_value):
v1_value.string_value = string_value
else:
# The "string" is not a valid utf8 string. Convert it to a blob.
v1_value.blob_value = string_value
# Discard the meaning with the conversion, except meaning index.
if v3_meaning != entity_pb.Property.INDEX_VALUE:
v3_meaning = None
# Irreversible: Non-utf8 "string" will change to blob (and lose
# meaning).
elif v3_property_value.has_pointvalue():
if v3_meaning != MEANING_GEORSS_POINT:
v1_value.meaning = MEANING_POINT_WITHOUT_V3_MEANING
point_value = v3_property_value.pointvalue()
v1_value.geo_point_value.latitude = point_value.x()
v1_value.geo_point_value.longitude = point_value.y()
v3_meaning = None
elif v3_property_value.has_uservalue():
self.v3_user_value_to_v1_entity(v3_property_value.uservalue(),
v1_value.entity_value)
v1_value.meaning = MEANING_PREDEFINED_ENTITY_USER
v3_meaning = None
else:
# v3 value is null.
v1_value.null_value = googledatastore.NULL_VALUE
if is_zlib_value:
v1_value.meaning = MEANING_ZLIB
elif v3_meaning:
v1_value.meaning = v3_meaning
# Set v1 indexed only if the current (default) v1 value is unhelpful.
if indexed == v1_value.exclude_from_indexes:
v1_value.exclude_from_indexes = not indexed
def v1_to_v3_property(self, property_name, is_multi, is_projection,
v1_value, v3_property):
"""Converts info from a v1 Property to a v3 Property.
v1_value must not have an array_value.
Args:
property_name: the name of the property, unicode
is_multi: whether the property contains multiple values
is_projection: whether the property is projected
v1_value: an googledatastore.Value
v3_property: an entity_pb.Property to populate
"""
v1_value_type = v1_value.WhichOneof('value_type')
if v1_value_type == 'array_value':
assert False, 'v1 array_value not convertable to v3'
v3_property.Clear()
v3_property.set_name(property_name.encode('utf-8'))
v3_property.set_multiple(is_multi)
self.v1_value_to_v3_property_value(v1_value, v3_property.mutable_value())
v1_meaning = None
if v1_value.meaning:
v1_meaning = v1_value.meaning
if v1_value_type == 'timestamp_value':
v3_property.set_meaning(entity_pb.Property.GD_WHEN)
elif v1_value_type == 'blob_value':
if v1_meaning == MEANING_ZLIB:
v3_property.set_meaning_uri(URI_MEANING_ZLIB)
if v1_meaning == entity_pb.Property.BYTESTRING:
if not v1_value.exclude_from_indexes:
pass
# Irreversible: Blob with redundant byte string meaning will lose it
# when converted back to v1.
else:
if not v1_value.exclude_from_indexes:
v3_property.set_meaning(entity_pb.Property.BYTESTRING)
else:
v3_property.set_meaning(entity_pb.Property.BLOB)
v1_meaning = None
elif v1_value_type == 'entity_value':
if v1_meaning != MEANING_PREDEFINED_ENTITY_USER:
v3_property.set_meaning(entity_pb.Property.ENTITY_PROTO)
v1_meaning = None
elif v1_value_type == 'geo_point_value':
if v1_meaning != MEANING_POINT_WITHOUT_V3_MEANING:
v3_property.set_meaning(MEANING_GEORSS_POINT)
v1_meaning = None
elif v1_value_type == 'integer_value':
if v1_meaning == MEANING_NON_RFC_3339_TIMESTAMP:
v3_property.set_meaning(entity_pb.Property.GD_WHEN)
v1_meaning = None
else:
# Null or primitive value; do nothing.
pass
if v1_meaning is not None:
v3_property.set_meaning(v1_meaning)
# If the value is in a projection, we should override the meaning.
if is_projection:
v3_property.set_meaning(entity_pb.Property.INDEX_VALUE)
def __add_v1_property_to_entity(self, v1_entity, v3_property, indexed):
"""Adds a v1 Property to an entity or modifies an existing one.
Args:
v1_entity: an googledatastore.Entity
v3_property: an entity_pb.Property to convert to v1 and add to the dict
indexed: whether the property is indexed
"""
property_name = v3_property.name()
v1_value = v1_entity.properties[property_name]
if v3_property.multiple():
self.v3_property_to_v1_value(v3_property, indexed,
v1_value.array_value.values.add())
else:
self.v3_property_to_v1_value(v3_property, indexed, v1_value)
def __get_v1_integer_value(self, v1_value):
"""Returns an integer value from a v1 Value.
Args:
v1_value: a googledatastore.Value
Returns:
an integer
Raises:
InvalidConversionError: if the value doesn't contain an integer value
"""
check_conversion(v1_value.HasField('integer_value'),
'Value does not contain an integer value.')
return v1_value.integer_value
def __get_v1_double_value(self, v1_value):
"""Returns a double value from a v1 Value.
Args:
v1_value: an googledatastore.Value
Returns:
a double
Raises:
InvalidConversionError: if the value doesn't contain a double value
"""
check_conversion(v1_value.HasField('double_value'),
'Value does not contain a double value.')
return v1_value.double_value
def __get_v1_string_value(self, v1_value):
"""Returns an string value from a v1 Value.
Args:
v1_value: an googledatastore.Value
Returns:
a string
Throws:
InvalidConversionError: if the value doesn't contain a string value
"""
check_conversion(v1_value.HasField('string_value'),
'Value does not contain a string value.')
return v1_value.string_value.encode('utf-8')
def __v1_integer_property(self, entity, name, value, indexed):
"""Populates a single-integer-valued v1 Property.
Args:
entity: the entity to populate
name: the name of the property to populate
value: the integer value of the property
indexed: whether the value should be indexed
"""
v1_value = entity.properties[name]
v1_value.exclude_from_indexes = not indexed
v1_value.integer_value = value
def __v1_double_property(self, entity, name, value, indexed):
"""Populates a single-double-valued v1 Property.
Args:
entity: the entity to populate
name: the name of the property to populate
value: the double value of the property
indexed: whether the value should be indexed
"""
v1_value = entity.properties[name]
v1_value.exclude_from_indexes = not indexed
v1_value.double_value = value
def __v1_string_property(self, entity, name, value, indexed):
"""Populates a single-string-valued v1 Property.
Args:
entity: the entity to populate
name: the name of the property to populate
value: the string value of the property
indexed: whether the value should be indexed
"""
v1_value = entity.properties[name]
v1_value.exclude_from_indexes = not indexed
v1_value.string_value = value
def v1_entity_to_v3_user_value(self, v1_user_entity, v3_user_value):
"""Converts a v1 user Entity to a v3 UserValue.
Args:
v1_user_entity: an googledatastore.Entity representing a user
v3_user_value: an entity_pb.Property_UserValue to populate
"""
v3_user_value.Clear()
properties = v1_user_entity.properties
# Email and auth domain are required to be present in the v1 entity.
v3_user_value.set_email(self.__get_v1_string_value(
properties[PROPERTY_NAME_EMAIL]))
v3_user_value.set_auth_domain(self.__get_v1_string_value(
properties[PROPERTY_NAME_AUTH_DOMAIN]))
if PROPERTY_NAME_USER_ID in properties:
v3_user_value.set_obfuscated_gaiaid(
self.__get_v1_string_value(properties[PROPERTY_NAME_USER_ID]))
if PROPERTY_NAME_INTERNAL_ID in properties:
v3_user_value.set_gaiaid(self.__get_v1_integer_value(
properties[PROPERTY_NAME_INTERNAL_ID]))
else:
# Field gaiaid is required. Set it to a default value.
v3_user_value.set_gaiaid(0)
if PROPERTY_NAME_FEDERATED_IDENTITY in properties:
v3_user_value.set_federated_identity(
self.__get_v1_string_value(properties[
PROPERTY_NAME_FEDERATED_IDENTITY]))
if PROPERTY_NAME_FEDERATED_PROVIDER in properties:
v3_user_value.set_federated_provider(
self.__get_v1_string_value(properties[
PROPERTY_NAME_FEDERATED_PROVIDER]))
def v3_user_value_to_v1_entity(self, v3_user_value, v1_entity):
"""Converts a v3 UserValue to a v1 user Entity.
Args:
v3_user_value: an entity_pb.Property_UserValue
v1_entity: an googledatastore.Entity to populate
"""
v1_entity.Clear()
self.__v1_string_property(v1_entity, PROPERTY_NAME_EMAIL,
v3_user_value.email(), False)
self.__v1_string_property(v1_entity, PROPERTY_NAME_AUTH_DOMAIN,
v3_user_value.auth_domain(), False)
# Ignore nickname.
if v3_user_value.gaiaid() != 0:
self.__v1_integer_property(
v1_entity,
PROPERTY_NAME_INTERNAL_ID,
v3_user_value.gaiaid(),
False)
if v3_user_value.has_obfuscated_gaiaid():
self.__v1_string_property(
v1_entity,
PROPERTY_NAME_USER_ID,
v3_user_value.obfuscated_gaiaid(),
False)
if v3_user_value.has_federated_identity():
self.__v1_string_property(
v1_entity,
PROPERTY_NAME_FEDERATED_IDENTITY,
v3_user_value.federated_identity(),
False)
if v3_user_value.has_federated_provider():
self.__v1_string_property(
v1_entity,
PROPERTY_NAME_FEDERATED_PROVIDER,
v3_user_value.federated_provider(),
False)
def __is_v3_property_value_union_valid(self, v3_property_value):
"""Returns True if the v3 PropertyValue's union is valid."""
num_sub_values = (v3_property_value.has_booleanvalue()
+ v3_property_value.has_int64value()
+ v3_property_value.has_doublevalue()
+ v3_property_value.has_referencevalue()
+ v3_property_value.has_stringvalue()
+ v3_property_value.has_pointvalue()
+ v3_property_value.has_uservalue())
return num_sub_values <= 1
def __is_v3_property_value_meaning_valid(self, v3_property_value, v3_meaning):
"""Returns True if the v3 PropertyValue's type value matches its meaning."""
def ReturnTrue():
return True
def HasStringValue():
return v3_property_value.has_stringvalue()
def HasInt64Value():
return v3_property_value.has_int64value()
def HasPointValue():
return v3_property_value.has_pointvalue()
def ReturnFalse():
return False
value_checkers = {
entity_pb.Property.NO_MEANING: ReturnTrue,
entity_pb.Property.INDEX_VALUE: ReturnTrue,
entity_pb.Property.BLOB: HasStringValue,
entity_pb.Property.TEXT: HasStringValue,
entity_pb.Property.BYTESTRING: HasStringValue,
entity_pb.Property.ATOM_CATEGORY: HasStringValue,
entity_pb.Property.ATOM_LINK: HasStringValue,
entity_pb.Property.ATOM_TITLE: HasStringValue,
entity_pb.Property.ATOM_CONTENT: HasStringValue,
entity_pb.Property.ATOM_SUMMARY: HasStringValue,
entity_pb.Property.ATOM_AUTHOR: HasStringValue,
entity_pb.Property.GD_EMAIL: HasStringValue,
entity_pb.Property.GD_IM: HasStringValue,
entity_pb.Property.GD_PHONENUMBER: HasStringValue,
entity_pb.Property.GD_POSTALADDRESS: HasStringValue,
entity_pb.Property.BLOBKEY: HasStringValue,
entity_pb.Property.ENTITY_PROTO: HasStringValue,
entity_pb.Property.GD_WHEN: HasInt64Value,
entity_pb.Property.GD_RATING: HasInt64Value,
entity_pb.Property.GEORSS_POINT: HasPointValue,
entity_pb.Property.EMPTY_LIST: ReturnTrue,
}
default = ReturnFalse
return value_checkers.get(v3_meaning, default)()
def __v3_reference_has_id_or_name(self, v3_ref):
"""Determines if a v3 Reference specifies an ID or name.
Args:
v3_ref: an entity_pb.Reference
Returns:
boolean: True if the last path element specifies an ID or name.
"""
path = v3_ref.path()
assert path.element_size() >= 1
last_element = path.element(path.element_size() - 1)
return last_element.has_id() or last_element.has_name()
def v3_reference_to_group(self, v3_ref, group):
"""Converts a v3 Reference to a v3 Path representing the entity group.
The entity group is represented as an entity_pb.Path containing only the
first element in the provided Reference.
Args:
v3_ref: an entity_pb.Reference
group: an entity_pb.Path to populate
"""
group.Clear()
path = v3_ref.path()
assert path.element_size() >= 1
group.add_element().CopyFrom(path.element(0))
def v3_reference_to_v3_property_value(self, v3_ref, v3_property_value):
"""Converts a v3 Reference to a v3 PropertyValue.
Args:
v3_ref: an entity_pb.Reference
v3_property_value: an entity_pb.PropertyValue to populate
"""
v3_property_value.Clear()
reference_value = v3_property_value.mutable_referencevalue()
if v3_ref.has_app():
reference_value.set_app(v3_ref.app())
if v3_ref.has_name_space():
reference_value.set_name_space(v3_ref.name_space())
for v3_path_element in v3_ref.path().element_list():
v3_ref_value_path_element = reference_value.add_pathelement()
if v3_path_element.has_type():
v3_ref_value_path_element.set_type(v3_path_element.type())
if v3_path_element.has_id():
v3_ref_value_path_element.set_id(v3_path_element.id())
if v3_path_element.has_name():
v3_ref_value_path_element.set_name(v3_path_element.name())
def __v3_reference_value_to_v3_reference(self, v3_ref_value, v3_ref):
"""Converts a v3 ReferenceValue to a v3 Reference.
Args:
v3_ref_value: an entity_pb.PropertyValue_ReferenceValue
v3_ref: an entity_pb.Reference to populate
"""
v3_ref.Clear()
if v3_ref_value.has_app():
v3_ref.set_app(v3_ref_value.app())
if v3_ref_value.has_name_space():
v3_ref.set_name_space(v3_ref_value.name_space())
for v3_ref_value_path_element in v3_ref_value.pathelement_list():
v3_path_element = v3_ref.mutable_path().add_element()
if v3_ref_value_path_element.has_type():
v3_path_element.set_type(v3_ref_value_path_element.type())
if v3_ref_value_path_element.has_id():
v3_path_element.set_id(v3_ref_value_path_element.id())
if v3_ref_value_path_element.has_name():
v3_path_element.set_name(v3_ref_value_path_element.name())
class _QueryConverter(object):
"""Base converter for v3 and v1 queries."""
def __init__(self, entity_converter):
self._entity_converter = entity_converter
def get_entity_converter(self):
return self._entity_converter
def _v3_filter_to_v1_property_filter(self, v3_filter, v1_property_filter):
"""Converts a v3 Filter to a v1 PropertyFilter.
Args:
v3_filter: a datastore_pb.Filter
v1_property_filter: a googledatastore.PropertyFilter to populate
Raises:
InvalidConversionError if the filter cannot be converted
"""
check_conversion(v3_filter.property_size() == 1,
'invalid filter')
check_conversion(v3_filter.op() <= 5,
'unsupported filter op: %d' % v3_filter.op())
v1_property_filter.Clear()
v1_property_filter.op = v3_filter.op()
v1_property_filter.property.name = v3_filter.property(0).name()
self._entity_converter.v3_property_to_v1_value(
v3_filter.property(0), True, v1_property_filter.value)
def _v3_query_to_v1_ancestor_filter(self, v3_query, v1_property_filter):
"""Converts a v3 Query to a v1 ancestor PropertyFilter.
Args:
v3_query: a datastore_pb.Query
v1_property_filter: a googledatastore.PropertyFilter to populate
"""
v1_property_filter.Clear()
v1_property_filter.set_operator(
v3_query.shallow() and
googledatastore.PropertyFilter.HAS_PARENT or
googledatastore.PropertyFilter.HAS_ANCESTOR)
prop = v1_property_filter.property
prop.set_name(PROPERTY_NAME_KEY)
if v3_query.has_ancestor():
self._entity_converter.v3_to_v1_key(
v3_query.ancestor(),
v1_property_filter.value.mutable_key_value)
else:
v1_property_filter.value.null_value = googledatastore.NULL_VALUE
def v3_order_to_v1_order(self, v3_order, v1_order):
"""Converts a v3 Query order to a v1 PropertyOrder.
Args:
v3_order: a datastore_pb.Query.Order
v1_order: a googledatastore.PropertyOrder to populate
"""
v1_order.property.name = v3_order.property()
if v3_order.has_direction():
v1_order.direction = v3_order.direction()
def _v3_filter_to_v4_property_filter(self, v3_filter, v4_property_filter):
"""Converts a v3 Filter to a v4 PropertyFilter.
Args:
v3_filter: a datastore_pb.Filter
v4_property_filter: a datastore_v4_pb.PropertyFilter to populate
Raises:
InvalidConversionError if the filter cannot be converted
"""
check_conversion(v3_filter.property_size() == 1,
'invalid filter')
check_conversion(v3_filter.op() <= 5,
'unsupported filter op: %d' % v3_filter.op())
v4_property_filter.Clear()
v4_property_filter.set_operator(v3_filter.op())
v4_property_filter.mutable_property().set_name(v3_filter.property(0).name())
self._entity_converter.v3_property_to_v4_value(
v3_filter.property(0), True, v4_property_filter.mutable_value())
def _v3_query_to_v4_ancestor_filter(self, v3_query, v4_property_filter):
"""Converts a v3 Query to a v4 ancestor PropertyFilter.
Args:
v3_query: a datastore_pb.Query
v4_property_filter: a datastore_v4_pb.PropertyFilter to populate
"""
v4_property_filter.Clear()
v4_property_filter.set_operator(
datastore_v4_pb.PropertyFilter.HAS_ANCESTOR)
prop = v4_property_filter.mutable_property()
prop.set_name(PROPERTY_NAME_KEY)
self._entity_converter.v3_to_v4_key(
v3_query.ancestor(),
v4_property_filter.mutable_value().mutable_key_value())
def v3_order_to_v4_order(self, v3_order, v4_order):
"""Converts a v3 Query order to a v4 PropertyOrder.
Args:
v3_order: a datastore_pb.Query.Order
v4_order: a datastore_v4_pb.PropertyOrder to populate
"""
v4_order.mutable_property().set_name(v3_order.property())
if v3_order.has_direction():
v4_order.set_direction(v3_order.direction())
def get_entity_converter(id_resolver=None):
"""Returns a converter for v3 and v1 entities and keys.
Args:
id_resolver: An IdResolver for project id resolution.
"""
id_resolver = id_resolver or _IdentityIdResolver()
return _EntityConverter(id_resolver)
| 36.298438
| 84
| 0.704222
|
f368eebfe6bf29fe1dc0c0cefb40068da9bb8491
| 220
|
py
|
Python
|
_draft/x_7_5-c.py
|
ofl/kuku2
|
7247fb1862d917d23258ebe7a93dca5939433225
|
[
"MIT"
] | null | null | null |
_draft/x_7_5-c.py
|
ofl/kuku2
|
7247fb1862d917d23258ebe7a93dca5939433225
|
[
"MIT"
] | 1
|
2021-11-13T08:03:04.000Z
|
2021-11-13T08:03:04.000Z
|
_draft/x_7_5-c.py
|
ofl/kuku2
|
7247fb1862d917d23258ebe7a93dca5939433225
|
[
"MIT"
] | null | null | null |
# x_7_5
#
#
import re
text_1 = 'ももさん'
text_2 = 'ももたろさん'
a = r'もも.*さん'
b = r'もも.+さん'
print(bool(re.match(a, text_1)))
print(bool(re.match(a, text_2)))
print(bool(re.match(b, text_1)))
print(bool(re.match(b, text_2)))
| 12.941176
| 32
| 0.631818
|
cf5684d859e38932f55c117bc86aa0a9f73d2f1e
| 356
|
py
|
Python
|
Exercises/Exercise2EspecialParameters.py
|
davidavg/OOP_Python
|
ca4e8376a50b9c81b5ac18c466bd8d147bdbe679
|
[
"MIT"
] | null | null | null |
Exercises/Exercise2EspecialParameters.py
|
davidavg/OOP_Python
|
ca4e8376a50b9c81b5ac18c466bd8d147bdbe679
|
[
"MIT"
] | null | null | null |
Exercises/Exercise2EspecialParameters.py
|
davidavg/OOP_Python
|
ca4e8376a50b9c81b5ac18c466bd8d147bdbe679
|
[
"MIT"
] | null | null | null |
'''
Created on Aug 13, 2018
@author: david avalos
'''
class Calculator:
def __init__(self, a, b):
self.a = a
self.b = b
def addition(self):
print(self.a+self.b)
def subtraction(self):
print(self.a-self.b)
obj = Calculator(5,2)
obj.addition()
obj.subtraction()
| 14.833333
| 29
| 0.511236
|
b19d012e18327e9821be1dcb646b2e9193ec47f5
| 4,301
|
py
|
Python
|
functions/pairing.py
|
saulius-lipkevicius/Transformed_GA
|
599bbcd93f7e59417586606e04945d860447b2c3
|
[
"MIT"
] | null | null | null |
functions/pairing.py
|
saulius-lipkevicius/Transformed_GA
|
599bbcd93f7e59417586606e04945d860447b2c3
|
[
"MIT"
] | null | null | null |
functions/pairing.py
|
saulius-lipkevicius/Transformed_GA
|
599bbcd93f7e59417586606e04945d860447b2c3
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from sklearn.utils import shuffle
import argparse
import os
pd.options.mode.chained_assignment = None # default='warn'
parser = argparse.ArgumentParser(description='Breeder acquires new aptamers...')
parser.add_argument("--p", "--path_initial_aptamers"
, help="Path to evaluatedd aptamers with M.A.W.S"
, type=str)
parser.add_argument("--h", "--later"
, help="Path to evaluatedd aptamers with M.A.W.S"
, type=str)
parser.add_argument("--o", "--output_path"
, help="Path to save new generation of aptamers"
, type=str)
parser.add_argument("--i", "--iter"
, help="The iteration number"
, type=str)
parser.add_argument("--l", "--labeled"
, help="Is data labeled"
, action="store_true")
args = parser.parse_args()
def pairWithLabel(df):
apt1 = pd.DataFrame(columns=['Sequence1', 'Entropy1'])
apt2 = pd.DataFrame(columns=['Sequence2', 'Entropy2'])
for first in range(0,len(df)):
x = pd.DataFrame({'Sequence1':df.loc[first,'Sequence'],'Entropy1':df.loc[first,'Entropy']},index = range(1))
apt1 = apt1.append([x]*(len(df) - first - 1), ignore_index=True)
for second in range(1,len(df) + 1):
y = pd.DataFrame({'Sequence2':df.loc[second:,'Sequence'],'Entropy2':df.loc[second:,'Entropy']})
apt2 = apt2.append(y, ignore_index=True)
dataset = apt1.join(apt2)
dataset["Label"] = np.where(dataset.eval("Entropy1 >= Entropy2"), 1, 0)
dataset = shuffle(dataset)
return dataset[['Sequence1', 'Sequence2', 'Label']]
def pairWithoutLabel():
apt1 = pd.DataFrame(columns=['Sequence1'])
apt2 = pd.DataFrame(columns=['Sequence2'])
df = pd.read_csv(args.h)
for first in range(0,len(df)):
x = pd.DataFrame.from_records({'Sequence1':[df.loc[first, 'Sequence']]})
apt1 = apt1.append([x]*(len(df) - first - 1), ignore_index = True)
y = pd.DataFrame({'Sequence2':df.loc[first+1:,'Sequence']})
apt2 = apt2.append(y, ignore_index=True)
dataset = apt1.join(apt2)
return dataset[['Sequence1', 'Sequence2']]
def balanceData(dataset):
counts = dataset['Label'].value_counts()
if counts.loc[0] >= counts.loc[1]:
change = int((counts.loc[0] - counts.loc[1])/2)
value = 0
else:
change = int((counts.loc[1] - counts.loc[0])/2)
value = 1
zeros = dataset[(dataset['Label'] == value)]
zerosIndexes = zeros.sample(change).index
zeros.loc[zerosIndexes,'Label'] = int(abs(value - 1))
dataset.update(zeros)
dataset['Label'] = dataset['Label'].astype(int)
return dataset
def main():
if args.l:
df = pd.read_csv(args.p)
path = './datasets/training/'
# First of all we pair up every aptamer together, then balance number of accurancies of classes 1 and 0 to train those equally
data = pairWithLabel(df)
dataset = balanceData(data)
# 80% training, 10% validating, 10% testing
train, test, val = np.split(dataset, [int(.8*len(dataset)), int(.9*len(dataset))])
# Find the top N aptamers from the score_sequences.csv
top = df.nlargest(200, 'Entropy')
print("Migrating preprocessed training data to {}".format(path))
dataset.to_csv('./datasets/training/full_comparison.csv', encoding='utf-8', index=False)
train.to_csv('./datasets/training/train.csv', encoding='utf-8', index=False)
test.to_csv('./datasets/training/test.csv', encoding='utf-8', index=False)
val.to_csv('./datasets/training/val.csv', encoding='utf-8', index=False)
folder = './datasets/ga_interim_data/{}'.format(args.o)
if not os.path.exists(folder):
os.mkdir(folder)
top.to_csv('{}/top_iter_0.csv'.format(folder), encoding='utf-8', index=False)
else:
dataset = pairWithoutLabel()
print("Saving new generation {} to {}".format(args.i, args.o))
dataset.to_csv('{}/iteration_{}.csv'.format(args.o, args.i), encoding='utf-8', index=False)
if __name__ == "__main__":
main()
| 34.96748
| 135
| 0.601721
|
ee42d55efee551b7bc9cc93e9d51c60ffe056fc7
| 3,098
|
py
|
Python
|
arakat-core/examples/demo/SimpleExample.py
|
sopaoglu/arakat
|
efa32fcc93076801cad24ab850ecdf9048a824e8
|
[
"Apache-2.0"
] | 23
|
2018-08-18T17:32:40.000Z
|
2021-10-05T22:57:06.000Z
|
arakat-core/examples/demo/SimpleExample.py
|
sopaoglu/arakat
|
efa32fcc93076801cad24ab850ecdf9048a824e8
|
[
"Apache-2.0"
] | 23
|
2018-09-22T08:47:07.000Z
|
2021-08-04T07:08:34.000Z
|
arakat-core/examples/demo/SimpleExample.py
|
sopaoglu/arakat
|
efa32fcc93076801cad24ab850ecdf9048a824e8
|
[
"Apache-2.0"
] | 22
|
2018-08-17T10:33:31.000Z
|
2021-10-05T22:57:07.000Z
|
from src.pipeline_generator.generators import PipelineGenerator
data={
"graph":{
"nodes": {
"node1":
{
"id": "node1",
"parent": "task1",
"name": "Batch Read from CSV",
"category": 0,
"node_id": 47,
"node_type": 0,
"family": 0,
"compatible_with_stream": False,
"compatible_stream_output_modes": [],
"compatible_with_spark_pipeline": False,
"is_splitter": False,
"produces_model": False,
"can_infer_schema": True,
"file_type": "csv",
"parameters": {
"path": {"value": "hdfs://namenode:9000/Demo/SimpleExample/data/wine_data.csv", "type": "string"},
"header": {"value": False, "type": "boolean"},
"sep": {"value": ",", "type": "string"},
"quote": {"value": '\\\"', "type": "string"}
}
},
"node2":
{
"id": "node2",
"parent": "task1",
"node_id": 7,
"name": "Descriptive Statistics",
"category": 14,
"node_type": 0,
"family": 5,
"compatible_with_stream": False,
"compatible_stream_output_modes": [],
"compatible_with_spark_pipeline": False,
"is_splitter": False,
"produces_model": False,
"ddfo_name": "describe",
"parameters": {}
},
"node3":
{
"id": "node3",
"parent": "task1",
"node_id": 61,
"name": "Batch Write to Parquet",
"category": 1,
"node_type": 0,
"family": 2,
"compatible_with_stream": False,
"compatible_stream_output_modes": [],
"compatible_with_spark_pipeline": False,
"is_splitter": False,
"produces_model": False,
"file_type": "parquet",
"parameters": {
"path": {"value": "hdfs://namenode:9000/Demo/SimpleExample/results/wine_statistics.parquet", "type": "string"}
}
},
"task1": {
"id": "task1",
"parent": None,
"node_type": 1
}
},
"edges": {
"node1-node2": {"type": "dataframe"},
"node2-node3": {"type": "dataframe"}
}
},
"dag_properties": {
"app_id": "Demo_SimpleExample",
"bash_command": "sh /usr/local/shell_scripts/run.sh",
"schedule_interval": "@once",
"default_args": {
"owner": "airflow",
"start_date": "01/01/2018"
}
}
}
code_info, success, errors, additional_info = PipelineGenerator.generate_pipeline(data["graph"], data["dag_properties"])
| 35.609195
| 130
| 0.428018
|
aa1d71029c68cc182a8dd3338a23085fa6c2514b
| 15,656
|
py
|
Python
|
buildozer/targets/ios.py
|
pavelsof/buildozer
|
4333b9c0480aa339f9eccfa82a5a02f4ebc81198
|
[
"MIT"
] | null | null | null |
buildozer/targets/ios.py
|
pavelsof/buildozer
|
4333b9c0480aa339f9eccfa82a5a02f4ebc81198
|
[
"MIT"
] | null | null | null |
buildozer/targets/ios.py
|
pavelsof/buildozer
|
4333b9c0480aa339f9eccfa82a5a02f4ebc81198
|
[
"MIT"
] | null | null | null |
'''
iOS target, based on kivy-ios project
'''
import sys
if sys.platform != 'darwin':
raise NotImplementedError('Windows platform not yet working for Android')
import plistlib
from buildozer import BuildozerCommandException
from buildozer.target import Target, no_config
from os.path import join, basename, expanduser, realpath
from getpass import getpass
PHP_TEMPLATE = '''
<?php
// credits goes to http://jeffreysambells.com/2010/06/22/ios-wireless-app-distribution
$ipas = glob('*.ipa');
$provisioningProfiles = glob('*.mobileprovision');
$plists = glob('*.plist');
$sr = stristr( $_SERVER['SCRIPT_URI'], '.php' ) === false ?
$_SERVER['SCRIPT_URI'] : dirname($_SERVER['SCRIPT_URI']) . '/';
$provisioningProfile = $sr . $provisioningProfiles[0];
$ipa = $sr . $ipas[0];
$itmsUrl = urlencode( $sr . 'index.php?plist=' . str_replace( '.plist', '', $plists[0] ) );
if ($_GET['plist']) {
$plist = file_get_contents( dirname(__FILE__)
. DIRECTORY_SEPARATOR
. preg_replace( '/![A-Za-z0-9-_]/i', '', $_GET['plist']) . '.plist' );
$plist = str_replace('_URL_', $ipa, $plist);
header('content-type: application/xml');
echo $plist;
die();
}
?><!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>Install {appname}</title>
<style type="text/css">
li { padding: 1em; }
</style>
</head>
<body>
<ul>
<li><a href="<? echo $provisioningProfile; ?>">Install Team Provisioning File</a></li>
<li><a href="itms-services://?action=download-manifest&url=<? echo $itmsUrl; ?>">
Install Application</a></li>
</ul>
</body>
</html>
'''
class TargetIos(Target):
targetname = "ios"
def check_requirements(self):
checkbin = self.buildozer.checkbin
cmd = self.buildozer.cmd
checkbin('Xcode xcodebuild', 'xcodebuild')
checkbin('Xcode xcode-select', 'xcode-select')
checkbin('Git git', 'git')
checkbin('Cython cython', 'cython')
checkbin('pkg-config', 'pkg-config')
checkbin('autoconf', 'autoconf')
checkbin('automake', 'automake')
checkbin('libtool', 'libtool')
self.buildozer.debug('Check availability of a iPhone SDK')
sdk = cmd('xcodebuild -showsdks | fgrep "iphoneos" |'
'tail -n 1 | awk \'{print $2}\'',
get_stdout=True)[0]
if not sdk:
raise Exception(
'No iPhone SDK found. Please install at least one iOS SDK.')
else:
self.buildozer.debug(' -> found %r' % sdk)
self.buildozer.debug('Check Xcode path')
xcode = cmd('xcode-select -print-path', get_stdout=True)[0]
if not xcode:
raise Exception('Unable to get xcode path')
self.buildozer.debug(' -> found {0}'.format(xcode))
def install_platform(self):
self.ios_dir = self.install_or_update_repo('kivy-ios', platform='ios')
self.ios_deploy_dir = self.install_or_update_repo('ios-deploy',
platform='ios',
branch='1.7.0',
owner='phonegap')
def get_available_packages(self):
available_modules = self.buildozer.cmd(
'./toolchain.py recipes --compact',
cwd=self.ios_dir, get_stdout=True)[0]
return available_modules.splitlines()[0].split()
def compile_platform(self):
# for ios, the compilation depends really on the app requirements.
# compile the distribution only if the requirements changed.
last_requirements = self.buildozer.state.get('ios.requirements', '')
app_requirements = self.buildozer.config.getlist('app', 'requirements',
'')
# we need to extract the requirements that kivy-ios knows about
available_modules = self.get_available_packages()
onlyname = lambda x: x.split('==')[0]
ios_requirements = [x for x in app_requirements if onlyname(x) in
available_modules]
need_compile = 0
if last_requirements != ios_requirements:
need_compile = 1
# len('requirements.source.') == 20, so use name[20:]
source_dirs = {'{}_DIR'.format(name[20:].upper()):
realpath(expanduser(value))
for name, value in self.buildozer.config.items('app')
if name.startswith('requirements.source.')}
if source_dirs:
need_compile = 1
self.buildozer.environ.update(source_dirs)
self.buildozer.info('Using custom source dirs:\n {}'.format(
'\n '.join(['{} = {}'.format(k, v)
for k, v in source_dirs.items()])))
if not need_compile:
self.buildozer.info('Distribution already compiled, pass.')
return
modules_str = ' '.join(ios_requirements)
self.buildozer.cmd('./toolchain.py build {}'.format(modules_str),
cwd=self.ios_dir)
if not self.buildozer.file_exists(self.ios_deploy_dir, 'ios-deploy'):
self.buildozer.cmd('make ios-deploy', cwd=self.ios_deploy_dir)
self.buildozer.state['ios.requirements'] = ios_requirements
self.buildozer.state.sync()
def _get_package(self):
config = self.buildozer.config
package_domain = config.getdefault('app', 'package.domain', '')
package = config.get('app', 'package.name')
if package_domain:
package = package_domain + '.' + package
return package.lower()
def build_package(self):
self._unlock_keychain()
# create the project
app_name = self.buildozer.namify(self.buildozer.config.get('app',
'package.name'))
ios_frameworks = self.buildozer.config.getlist('app', 'ios.frameworks', '')
frameworks_cmd = ''
for framework in ios_frameworks:
frameworks_cmd += '--add-framework={} '.format(framework)
self.app_project_dir = join(self.ios_dir, '{0}-ios'.format(app_name.lower()))
if not self.buildozer.file_exists(self.app_project_dir):
create_cmd = './toolchain.py create {0}{1} {2}'.format(frameworks_cmd, app_name,
self.buildozer.app_dir)
self.buildozer.cmd(create_cmd, cwd=self.ios_dir)
else:
update_cmd = './toolchain.py update {0}{1}-ios'.format(frameworks_cmd, app_name)
self.buildozer.cmd(update_cmd, cwd=self.ios_dir)
# fix the plist
plist_fn = '{}-Info.plist'.format(app_name.lower())
plist_rfn = join(self.app_project_dir, plist_fn)
version = self.buildozer.get_version()
self.buildozer.info('Update Plist {}'.format(plist_fn))
plist = plistlib.readPlist(plist_rfn)
plist['CFBundleIdentifier'] = self._get_package()
plist['CFBundleShortVersionString'] = version
plist['CFBundleVersion'] = '{}.{}'.format(version,
self.buildozer.build_id)
# add icons
self._create_icons()
# ok, write the modified plist.
plistlib.writePlist(plist, plist_rfn)
mode = 'Debug' if self.build_mode == 'debug' else 'Release'
self.buildozer.cmd('xcodebuild -configuration {} ENABLE_BITCODE=NO clean build'.format(mode),
cwd=self.app_project_dir)
ios_app_dir = '{app_lower}-ios/build/{mode}-iphoneos/{app_lower}.app'.format(
app_lower=app_name.lower(), mode=mode)
self.buildozer.state['ios:latestappdir'] = ios_app_dir
key = 'ios.codesign.{}'.format(self.build_mode)
ioscodesign = self.buildozer.config.getdefault('app', key, '')
if not ioscodesign:
self.buildozer.error('Cannot create the IPA package without'
' signature. You must fill the "{}" token.'.format(key))
return
elif ioscodesign[0] not in ('"', "'"):
ioscodesign = '"{}"'.format(ioscodesign)
intermediate_dir = join(self.ios_dir, '{}-{}.intermediates'.format(app_name, version))
xcarchive = join(intermediate_dir, '{}-{}.xcarchive'.format(
app_name, version))
ipa_name = '{}-{}.ipa'.format(app_name, version)
ipa_tmp = join(intermediate_dir, ipa_name)
ipa = join(self.buildozer.bin_dir, ipa_name)
build_dir = join(self.ios_dir, '{}-ios'.format(app_name.lower()))
self.buildozer.rmdir(intermediate_dir)
self.buildozer.info('Creating archive...')
self.buildozer.cmd((
'/usr/bin/xcodebuild'
' -alltargets'
' -configuration {mode}'
' -scheme {scheme}'
' -archivePath "{xcarchive}"'
' archive'
' ENABLE_BITCODE=NO'
).format(mode=mode, xcarchive=xcarchive, scheme=app_name.lower()),
cwd=build_dir)
self.buildozer.info('Creating IPA...')
self.buildozer.cmd((
'/usr/bin/xcodebuild'
' -exportArchive'
' -exportFormat IPA'
' -archivePath "{xcarchive}"'
' -exportPath "{ipa}"'
' CODE_SIGN_IDENTITY={ioscodesign}'
' ENABLE_BITCODE=NO'
).format(xcarchive=xcarchive, ipa=ipa_tmp, ioscodesign=ioscodesign),
cwd=build_dir)
self.buildozer.info('Moving IPA to bin...')
self.buildozer.file_rename(ipa_tmp, ipa)
self.buildozer.info('iOS packaging done!')
self.buildozer.info('IPA {0} available in the bin directory'.format(
basename(ipa)))
self.buildozer.state['ios:latestipa'] = ipa
self.buildozer.state['ios:latestmode'] = self.build_mode
def cmd_deploy(self, *args):
super().cmd_deploy(*args)
self._run_ios_deploy(lldb=False)
def cmd_run(self, *args):
super().cmd_run(*args)
self._run_ios_deploy(lldb=True)
def cmd_xcode(self, *args):
'''Open the xcode project.
'''
app_name = self.buildozer.namify(self.buildozer.config.get('app',
'package.name'))
app_name = app_name.lower()
ios_dir = ios_dir = join(self.buildozer.platform_dir, 'kivy-ios')
self.buildozer.cmd('open {}.xcodeproj'.format(
app_name), cwd=join(ios_dir, '{}-ios'.format(app_name)))
def _run_ios_deploy(self, lldb=False):
state = self.buildozer.state
if 'ios:latestappdir' not in state:
self.buildozer.error(
'App not built yet. Run "debug" or "release" first.')
return
ios_app_dir = state.get('ios:latestappdir')
if lldb:
debug_mode = '-d'
self.buildozer.info('Deploy and start the application')
else:
debug_mode = ''
self.buildozer.info('Deploy the application')
self.buildozer.cmd('{iosdeploy} {debug_mode} -b {app_dir}'.format(
iosdeploy=join(self.ios_deploy_dir, 'ios-deploy'),
debug_mode=debug_mode, app_dir=ios_app_dir),
cwd=self.ios_dir, show_output=True)
def _create_icons(self):
icon = self.buildozer.config.getdefault('app', 'icon.filename', '')
if not icon:
return
icon_fn = join(self.buildozer.app_dir, icon)
if not self.buildozer.file_exists(icon_fn):
self.buildozer.error('Icon {} does not exists'.format(icon_fn))
return
self.buildozer.cmd('./toolchain.py icon {} {}'.format(
self.app_project_dir, icon_fn),
cwd=self.ios_dir)
def check_configuration_tokens(self):
errors = []
config = self.buildozer.config
identity_debug = config.getdefault('app', 'ios.codesign.debug', '')
identity_release = config.getdefault('app', 'ios.codesign.release',
identity_debug)
available_identities = self._get_available_identities()
if not identity_debug:
errors.append('[app] "ios.codesign.debug" key missing, '
'you must give a certificate name to use.')
elif identity_debug not in available_identities:
errors.append('[app] identity {} not found. '
'Check with list_identities'.format(identity_debug))
if not identity_release:
errors.append('[app] "ios.codesign.release" key missing, '
'you must give a certificate name to use.')
elif identity_release not in available_identities:
errors.append('[app] identity "{}" not found. '
'Check with list_identities'.format(identity_release))
super().check_configuration_tokens(errors)
@no_config
def cmd_list_identities(self, *args):
'''List the available identities to use for signing.
'''
identities = self._get_available_identities()
print('Available identities:')
for x in identities:
print(' - {}'.format(x))
def _get_available_identities(self):
output = self.buildozer.cmd('security find-identity -v -p codesigning',
get_stdout=True)[0]
lines = output.splitlines()[:-1]
lines = [u'"{}"'.format(x.split('"')[1]) for x in lines]
return lines
def _unlock_keychain(self):
password_file = join(self.buildozer.buildozer_dir, '.ioscodesign')
password = None
if self.buildozer.file_exists(password_file):
with open(password_file) as fd:
password = fd.read()
if not password:
# no password available, try to unlock anyway...
error = self.buildozer.cmd('security unlock-keychain -u',
break_on_error=False)[2]
if not error:
return
else:
# password available, try to unlock
error = self.buildozer.cmd('security unlock-keychain -p {}'.format(
password), break_on_error=False, sensible=True)[2]
if not error:
return
# we need the password to unlock.
correct = False
attempt = 3
while attempt:
attempt -= 1
password = getpass('Password to unlock the default keychain:')
error = self.buildozer.cmd('security unlock-keychain -p "{}"'.format(
password), break_on_error=False, sensible=True)[2]
if not error:
correct = True
break
self.error('Invalid keychain password')
if not correct:
self.error('Unable to unlock the keychain, exiting.')
raise BuildozerCommandException()
# maybe user want to save it for further reuse?
print(
'The keychain password can be saved in the build directory\n'
'As soon as the build directory will be cleaned, '
'the password will be erased.')
save = None
while save is None:
q = input('Do you want to save the password (Y/n): ')
if q in ('', 'Y'):
save = True
elif q == 'n':
save = False
else:
print('Invalid answer!')
if save:
with open(password_file, 'wb') as fd:
fd.write(password.encode())
def get_target(buildozer):
return TargetIos(buildozer)
| 38.561576
| 101
| 0.584568
|
97e3697051b7427e68dbe6844f86b01cdbbba969
| 2,403
|
py
|
Python
|
algorithms/backtrack/find_words.py
|
liyang101010/algorithms_lee
|
5ef8260f99f66be4a015dafcc71110bb679b7769
|
[
"MIT"
] | 1
|
2020-03-08T13:33:24.000Z
|
2020-03-08T13:33:24.000Z
|
algorithms/backtrack/find_words.py
|
osbreeno/algorithms_lee
|
5ef8260f99f66be4a015dafcc71110bb679b7769
|
[
"MIT"
] | null | null | null |
algorithms/backtrack/find_words.py
|
osbreeno/algorithms_lee
|
5ef8260f99f66be4a015dafcc71110bb679b7769
|
[
"MIT"
] | null | null | null |
"""
Given a matrix of words and a list of words to search,
return a list of words that exists in the board
This is Word Search II on LeetCode
board = [
['o','a','a','n'],
['e','t','a','e'],
['i','h','k','r'],
['i','f','l','v']
]
words = ["oath","pea","eat","rain"]
"""
def find_words(board, words):
def backtrack(board, i, j, trie, pre, used, result):
"""
backtrack tries to build each words from
the board and return all words found
@param: board, the passed in board of characters
@param: i, the row index
@param: j, the column index
@param: trie, a trie of the passed in words
@param: pre, a buffer of currently build string that differs
by recursion stack
@param: used, a replica of the board except in booleans
to state whether a character has been used
@param: result, the resulting set that contains all words found
@return: list of words found
"""
if '#' in trie:
result.add(pre)
if i < 0 or i >= len(board) or j < 0 or j >= len(board[0]):
return
if not used[i][j] and board[i][j] in trie:
used[i][j] = True
backtrack(board, i+1, j, trie[board[i][j]],
pre+board[i][j], used, result)
backtrack(board, i, j+1, trie[board[i][j]],
pre+board[i][j], used, result)
backtrack(board, i-1, j, trie[board[i][j]],
pre+board[i][j], used, result)
backtrack(board, i, j-1, trie[board[i][j]],
pre+board[i][j], used, result)
used[i][j] = False
# make a trie structure that is essentially dictionaries of dictionaries
# that map each character to a potential next character
trie = {}
for word in words:
curr_trie = trie
for char in word:
if char not in curr_trie:
curr_trie[char] = {}
curr_trie = curr_trie[char]
curr_trie['#'] = '#'
# result is a set of found words since we do not want repeats
result = set()
used = [[False]*len(board[0]) for _ in range(len(board))]
for i in range(len(board)):
for j in range(len(board[0])):
backtrack(board, i, j, trie, '', used, result)
return list(result)
| 32.472973
| 76
| 0.5335
|
273da9995de0498bd7579202c87581cb8528f48a
| 524
|
py
|
Python
|
api/posts/migrations/0002_post_activity.py
|
Juangr1803/Foro-AgrodatAI
|
a8f23afd32d2ec60d25a03c97f5f353fd0ef5e0b
|
[
"MIT"
] | 1
|
2021-04-19T16:13:39.000Z
|
2021-04-19T16:13:39.000Z
|
api/posts/migrations/0002_post_activity.py
|
Juangr1803/Foro-AgrodatAI
|
a8f23afd32d2ec60d25a03c97f5f353fd0ef5e0b
|
[
"MIT"
] | null | null | null |
api/posts/migrations/0002_post_activity.py
|
Juangr1803/Foro-AgrodatAI
|
a8f23afd32d2ec60d25a03c97f5f353fd0ef5e0b
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.3 on 2020-12-12 16:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('activities', '0001_initial'),
('posts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='activity',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='activities.activity'),
),
]
| 24.952381
| 130
| 0.629771
|
2a24bad68d43732959883e348e64378dd0ec9929
| 27,295
|
py
|
Python
|
tests/examples/minlplib/no7_ar25_1.py
|
ouyang-w-19/decogo
|
52546480e49776251d4d27856e18a46f40c824a1
|
[
"MIT"
] | 2
|
2021-07-03T13:19:10.000Z
|
2022-02-06T10:48:13.000Z
|
tests/examples/minlplib/no7_ar25_1.py
|
ouyang-w-19/decogo
|
52546480e49776251d4d27856e18a46f40c824a1
|
[
"MIT"
] | 1
|
2021-07-04T14:52:14.000Z
|
2021-07-15T10:17:11.000Z
|
tests/examples/minlplib/no7_ar25_1.py
|
ouyang-w-19/decogo
|
52546480e49776251d4d27856e18a46f40c824a1
|
[
"MIT"
] | null | null | null |
# MINLP written by GAMS Convert at 04/21/18 13:52:38
#
# Equation counts
# Total E G L N X C B
# 270 2 2 266 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 113 71 0 42 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 1061 1047 14 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.i1 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i2 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i3 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i4 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i5 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i6 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i7 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i8 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i9 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i10 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i11 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i12 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i13 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i14 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i15 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i16 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i17 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i18 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i19 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i20 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i21 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i22 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i23 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i24 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i25 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i26 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i27 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i28 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i29 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i30 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i31 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i32 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i33 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i34 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i35 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i36 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i37 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i38 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i39 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i40 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i41 = Var(within=Integers,bounds=(0,100),initialize=0)
m.i42 = Var(within=Integers,bounds=(0,100),initialize=0)
m.x44 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x45 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x46 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x47 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x48 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x49 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x50 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x51 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x52 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x53 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x54 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x55 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x56 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x57 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x58 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x59 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x60 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x61 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x62 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x63 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x64 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x65 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x66 = Var(within=Reals,bounds=(2.5298,6.3246),initialize=2.5298)
m.x67 = Var(within=Reals,bounds=(2.5298,6.3246),initialize=2.5298)
m.x68 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x69 = Var(within=Reals,bounds=(2.5298,6.3246),initialize=2.5298)
m.x70 = Var(within=Reals,bounds=(2.5298,6.3246),initialize=2.5298)
m.x71 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x72 = Var(within=Reals,bounds=(2.5298,6.3246),initialize=2.5298)
m.x73 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x74 = Var(within=Reals,bounds=(2.5298,6.3246),initialize=2.5298)
m.x75 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x76 = Var(within=Reals,bounds=(3.7947,8.54),initialize=3.7947)
m.x77 = Var(within=Reals,bounds=(4.2155,9.4868),initialize=4.2155)
m.x78 = Var(within=Reals,bounds=(1.8974,4.7434),initialize=1.8974)
m.x79 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x80 = Var(within=Reals,bounds=(1.8974,4.7434),initialize=1.8974)
m.x81 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x82 = Var(within=Reals,bounds=(1.8974,4.7434),initialize=1.8974)
m.x83 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x84 = Var(within=Reals,bounds=(1.8974,4.7434),initialize=1.8974)
m.x85 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x86 = Var(within=Reals,bounds=(1.8974,4.7434),initialize=1.8974)
m.x87 = Var(within=Reals,bounds=(1.8974,4.7434),initialize=1.8974)
m.x88 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x89 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x90 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x91 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x92 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x93 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x94 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x95 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x96 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x97 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x98 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x99 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x100 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x101 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x102 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x103 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x104 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x105 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x106 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x107 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x108 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x109 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x110 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x111 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x112 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x113 = Var(within=Reals,bounds=(None,None),initialize=0)
m.obj = Objective(expr= 5*m.x44 + 5*m.x45 + m.x46 + m.x47 + 3*m.x48 + 3*m.x49 + m.x50 + m.x51 + 2*m.x52 + 2*m.x53
+ m.x54 + m.x55 + 4*m.x56 + 4*m.x57 + 2*m.x58 + 2*m.x59 + m.x60 + m.x61, sense=minimize)
m.c2 = Constraint(expr= m.x62 - m.x63 >= 0)
m.c3 = Constraint(expr= m.x64 - m.x65 >= 0)
m.c4 = Constraint(expr= m.i1 - m.i2 == 0)
m.c5 = Constraint(expr= - 8.54*m.i1 + 0.5*m.x66 + 0.5*m.x67 - m.x68 <= 0)
m.c6 = Constraint(expr= 13*m.i1 + 0.5*m.x69 + 0.5*m.x70 - m.x71 <= 13)
m.c7 = Constraint(expr= - 8.54*m.i3 + 0.5*m.x66 + 0.5*m.x72 - m.x73 <= 0)
m.c8 = Constraint(expr= 13*m.i3 + 0.5*m.x69 + 0.5*m.x74 - m.x75 <= 13)
m.c9 = Constraint(expr= - 8.54*m.i5 - m.x44 + 0.5*m.x66 + 0.5*m.x76 <= 0)
m.c10 = Constraint(expr= 13*m.i5 - m.x45 + 0.5*m.x69 + 0.5*m.x77 <= 13)
m.c11 = Constraint(expr= - 8.54*m.i7 + 0.5*m.x66 + 0.5*m.x78 - m.x79 <= 0)
m.c12 = Constraint(expr= 13*m.i7 + 0.5*m.x69 + 0.5*m.x80 - m.x81 <= 13)
m.c13 = Constraint(expr= - 8.54*m.i9 + 0.5*m.x66 + 0.5*m.x82 - m.x83 <= 0)
m.c14 = Constraint(expr= 13*m.i9 + 0.5*m.x69 + 0.5*m.x84 - m.x85 <= 13)
m.c15 = Constraint(expr= - 8.54*m.i11 - m.x46 + 0.5*m.x66 + 0.5*m.x86 <= 0)
m.c16 = Constraint(expr= 13*m.i11 - m.x47 + 0.5*m.x69 + 0.5*m.x87 <= 13)
m.c17 = Constraint(expr= - 8.54*m.i13 + 0.5*m.x67 + 0.5*m.x72 - m.x88 <= 0)
m.c18 = Constraint(expr= 13*m.i13 + 0.5*m.x70 + 0.5*m.x74 - m.x89 <= 13)
m.c19 = Constraint(expr= - 8.54*m.i15 - m.x48 + 0.5*m.x67 + 0.5*m.x76 <= 0)
m.c20 = Constraint(expr= 13*m.i15 - m.x49 + 0.5*m.x70 + 0.5*m.x77 <= 13)
m.c21 = Constraint(expr= - 8.54*m.i17 + 0.5*m.x67 + 0.5*m.x78 - m.x90 <= 0)
m.c22 = Constraint(expr= 13*m.i17 + 0.5*m.x70 + 0.5*m.x80 - m.x91 <= 13)
m.c23 = Constraint(expr= - 8.54*m.i19 + 0.5*m.x67 + 0.5*m.x82 - m.x92 <= 0)
m.c24 = Constraint(expr= 13*m.i19 + 0.5*m.x70 + 0.5*m.x84 - m.x93 <= 13)
m.c25 = Constraint(expr= - 8.54*m.i21 - m.x50 + 0.5*m.x67 + 0.5*m.x86 <= 0)
m.c26 = Constraint(expr= 13*m.i21 - m.x51 + 0.5*m.x70 + 0.5*m.x87 <= 13)
m.c27 = Constraint(expr= - 8.54*m.i23 - m.x52 + 0.5*m.x72 + 0.5*m.x76 <= 0)
m.c28 = Constraint(expr= 13*m.i23 - m.x53 + 0.5*m.x74 + 0.5*m.x77 <= 13)
m.c29 = Constraint(expr= - 8.54*m.i25 + 0.5*m.x72 + 0.5*m.x78 - m.x94 <= 0)
m.c30 = Constraint(expr= 13*m.i25 + 0.5*m.x74 + 0.5*m.x80 - m.x95 <= 13)
m.c31 = Constraint(expr= - 8.54*m.i27 + 0.5*m.x72 + 0.5*m.x82 - m.x96 <= 0)
m.c32 = Constraint(expr= 13*m.i27 + 0.5*m.x74 + 0.5*m.x84 - m.x97 <= 13)
m.c33 = Constraint(expr= - 8.54*m.i29 - m.x54 + 0.5*m.x72 + 0.5*m.x86 <= 0)
m.c34 = Constraint(expr= 13*m.i29 - m.x55 + 0.5*m.x74 + 0.5*m.x87 <= 13)
m.c35 = Constraint(expr= - 8.54*m.i31 - m.x56 + 0.5*m.x76 + 0.5*m.x78 <= 0)
m.c36 = Constraint(expr= 13*m.i31 - m.x57 + 0.5*m.x77 + 0.5*m.x80 <= 13)
m.c37 = Constraint(expr= - 8.54*m.i33 + 0.5*m.x76 + 0.5*m.x82 - m.x98 <= 0)
m.c38 = Constraint(expr= 13*m.i33 + 0.5*m.x77 + 0.5*m.x84 - m.x99 <= 13)
m.c39 = Constraint(expr= - 8.54*m.i35 + 0.5*m.x76 + 0.5*m.x86 - m.x100 <= 0)
m.c40 = Constraint(expr= 13*m.i35 + 0.5*m.x77 + 0.5*m.x87 - m.x101 <= 13)
m.c41 = Constraint(expr= - 8.54*m.i37 + 0.5*m.x78 + 0.5*m.x82 - m.x102 <= 0)
m.c42 = Constraint(expr= 13*m.i37 + 0.5*m.x80 + 0.5*m.x84 - m.x103 <= 13)
m.c43 = Constraint(expr= - 8.54*m.i39 - m.x58 + 0.5*m.x78 + 0.5*m.x86 <= 0)
m.c44 = Constraint(expr= 13*m.i39 - m.x59 + 0.5*m.x80 + 0.5*m.x87 <= 13)
m.c45 = Constraint(expr= - 8.54*m.i41 - m.x60 + 0.5*m.x82 + 0.5*m.x86 <= 0)
m.c46 = Constraint(expr= 13*m.i41 - m.x61 + 0.5*m.x84 + 0.5*m.x87 <= 13)
m.c47 = Constraint(expr= - 0.395288*m.x66 - 0.158112*m.x69 <= -2)
m.c48 = Constraint(expr= - 0.158113*m.x66 - 0.395288*m.x69 <= -2)
m.c49 = Constraint(expr= - 0.395288*m.x67 - 0.158112*m.x70 <= -2)
m.c50 = Constraint(expr= - 0.158113*m.x67 - 0.395288*m.x70 <= -2)
m.c51 = Constraint(expr= - 0.395288*m.x72 - 0.158112*m.x74 <= -2)
m.c52 = Constraint(expr= - 0.158113*m.x72 - 0.395288*m.x74 <= -2)
m.c53 = Constraint(expr= - 0.263525*m.x76 - 0.105408*m.x77 <= -2)
m.c54 = Constraint(expr= - 0.117096*m.x76 - 0.237222*m.x77 <= -2)
m.c55 = Constraint(expr= - 0.527037*m.x78 - 0.210822*m.x80 <= -2)
m.c56 = Constraint(expr= - 0.210819*m.x78 - 0.527044*m.x80 <= -2)
m.c57 = Constraint(expr= - 0.527037*m.x82 - 0.210822*m.x84 <= -2)
m.c58 = Constraint(expr= - 0.210819*m.x82 - 0.527044*m.x84 <= -2)
m.c59 = Constraint(expr= - 0.527037*m.x86 - 0.210822*m.x87 <= -2)
m.c60 = Constraint(expr= - 0.210819*m.x86 - 0.527044*m.x87 <= -2)
m.c61 = Constraint(expr= m.x62 + 0.5*m.x66 <= 8.54)
m.c62 = Constraint(expr= - m.x62 + 0.5*m.x66 <= 0)
m.c63 = Constraint(expr= m.x65 + 0.5*m.x69 <= 13)
m.c64 = Constraint(expr= - m.x65 + 0.5*m.x69 <= 0)
m.c65 = Constraint(expr= m.x63 + 0.5*m.x67 <= 8.54)
m.c66 = Constraint(expr= - m.x63 + 0.5*m.x67 <= 0)
m.c67 = Constraint(expr= m.x64 + 0.5*m.x70 <= 13)
m.c68 = Constraint(expr= - m.x64 + 0.5*m.x70 <= 0)
m.c69 = Constraint(expr= 0.5*m.x72 + m.x104 <= 8.54)
m.c70 = Constraint(expr= 0.5*m.x72 - m.x104 <= 0)
m.c71 = Constraint(expr= 0.5*m.x74 + m.x105 <= 13)
m.c72 = Constraint(expr= 0.5*m.x74 - m.x105 <= 0)
m.c73 = Constraint(expr= 0.5*m.x76 + m.x106 <= 8.54)
m.c74 = Constraint(expr= 0.5*m.x76 - m.x106 <= 0)
m.c75 = Constraint(expr= 0.5*m.x77 + m.x107 <= 13)
m.c76 = Constraint(expr= 0.5*m.x77 - m.x107 <= 0)
m.c77 = Constraint(expr= 0.5*m.x78 + m.x108 <= 8.54)
m.c78 = Constraint(expr= 0.5*m.x78 - m.x108 <= 0)
m.c79 = Constraint(expr= 0.5*m.x80 + m.x109 <= 13)
m.c80 = Constraint(expr= 0.5*m.x80 - m.x109 <= 0)
m.c81 = Constraint(expr= 0.5*m.x82 + m.x110 <= 8.54)
m.c82 = Constraint(expr= 0.5*m.x82 - m.x110 <= 0)
m.c83 = Constraint(expr= 0.5*m.x84 + m.x111 <= 13)
m.c84 = Constraint(expr= 0.5*m.x84 - m.x111 <= 0)
m.c85 = Constraint(expr= 0.5*m.x86 + m.x112 <= 8.54)
m.c86 = Constraint(expr= 0.5*m.x86 - m.x112 <= 0)
m.c87 = Constraint(expr= 0.5*m.x87 + m.x113 <= 13)
m.c88 = Constraint(expr= 0.5*m.x87 - m.x113 <= 0)
m.c89 = Constraint(expr= m.x62 - m.x63 - m.x68 <= 0)
m.c90 = Constraint(expr= - m.x62 + m.x63 - m.x68 <= 0)
m.c91 = Constraint(expr= - m.x64 + m.x65 - m.x71 <= 0)
m.c92 = Constraint(expr= m.x64 - m.x65 - m.x71 <= 0)
m.c93 = Constraint(expr= - 8.54*m.i1 - 8.54*m.i2 - m.x62 + m.x63 + 0.5*m.x66 + 0.5*m.x67 <= 0)
m.c94 = Constraint(expr= - 8.54*m.i1 + 8.54*m.i2 + m.x62 - m.x63 + 0.5*m.x66 + 0.5*m.x67 <= 8.54)
m.c95 = Constraint(expr= 13*m.i1 - 13*m.i2 + m.x64 - m.x65 + 0.5*m.x69 + 0.5*m.x70 <= 13)
m.c96 = Constraint(expr= 13*m.i1 + 13*m.i2 - m.x64 + m.x65 + 0.5*m.x69 + 0.5*m.x70 <= 26)
m.c97 = Constraint(expr= m.x62 - m.x73 - m.x104 <= 0)
m.c98 = Constraint(expr= - m.x62 - m.x73 + m.x104 <= 0)
m.c99 = Constraint(expr= m.x65 - m.x75 - m.x105 <= 0)
m.c100 = Constraint(expr= - m.x65 - m.x75 + m.x105 <= 0)
m.c101 = Constraint(expr= - 8.54*m.i3 - 8.54*m.i4 - m.x62 + 0.5*m.x66 + 0.5*m.x72 + m.x104 <= 0)
m.c102 = Constraint(expr= - 8.54*m.i3 + 8.54*m.i4 + m.x62 + 0.5*m.x66 + 0.5*m.x72 - m.x104 <= 8.54)
m.c103 = Constraint(expr= 13*m.i3 - 13*m.i4 - m.x65 + 0.5*m.x69 + 0.5*m.x74 + m.x105 <= 13)
m.c104 = Constraint(expr= 13*m.i3 + 13*m.i4 + m.x65 + 0.5*m.x69 + 0.5*m.x74 - m.x105 <= 26)
m.c105 = Constraint(expr= - m.x44 + m.x62 - m.x106 <= 0)
m.c106 = Constraint(expr= - m.x44 - m.x62 + m.x106 <= 0)
m.c107 = Constraint(expr= - m.x45 + m.x65 - m.x107 <= 0)
m.c108 = Constraint(expr= - m.x45 - m.x65 + m.x107 <= 0)
m.c109 = Constraint(expr= - 8.54*m.i5 - 8.54*m.i6 - m.x62 + 0.5*m.x66 + 0.5*m.x76 + m.x106 <= 0)
m.c110 = Constraint(expr= - 8.54*m.i5 + 8.54*m.i6 + m.x62 + 0.5*m.x66 + 0.5*m.x76 - m.x106 <= 8.54)
m.c111 = Constraint(expr= 13*m.i5 - 13*m.i6 - m.x65 + 0.5*m.x69 + 0.5*m.x77 + m.x107 <= 13)
m.c112 = Constraint(expr= 13*m.i5 + 13*m.i6 + m.x65 + 0.5*m.x69 + 0.5*m.x77 - m.x107 <= 26)
m.c113 = Constraint(expr= m.x62 - m.x79 - m.x108 <= 0)
m.c114 = Constraint(expr= - m.x62 - m.x79 + m.x108 <= 0)
m.c115 = Constraint(expr= m.x65 - m.x81 - m.x109 <= 0)
m.c116 = Constraint(expr= - m.x65 - m.x81 + m.x109 <= 0)
m.c117 = Constraint(expr= - 8.54*m.i7 - 8.54*m.i8 - m.x62 + 0.5*m.x66 + 0.5*m.x78 + m.x108 <= 0)
m.c118 = Constraint(expr= - 8.54*m.i7 + 8.54*m.i8 + m.x62 + 0.5*m.x66 + 0.5*m.x78 - m.x108 <= 8.54)
m.c119 = Constraint(expr= 13*m.i7 - 13*m.i8 - m.x65 + 0.5*m.x69 + 0.5*m.x80 + m.x109 <= 13)
m.c120 = Constraint(expr= 13*m.i7 + 13*m.i8 + m.x65 + 0.5*m.x69 + 0.5*m.x80 - m.x109 <= 26)
m.c121 = Constraint(expr= m.x62 - m.x83 - m.x110 <= 0)
m.c122 = Constraint(expr= - m.x62 - m.x83 + m.x110 <= 0)
m.c123 = Constraint(expr= m.x65 - m.x85 - m.x111 <= 0)
m.c124 = Constraint(expr= - m.x65 - m.x85 + m.x111 <= 0)
m.c125 = Constraint(expr= - 8.54*m.i9 - 8.54*m.i10 - m.x62 + 0.5*m.x66 + 0.5*m.x82 + m.x110 <= 0)
m.c126 = Constraint(expr= - 8.54*m.i9 + 8.54*m.i10 + m.x62 + 0.5*m.x66 + 0.5*m.x82 - m.x110 <= 8.54)
m.c127 = Constraint(expr= 13*m.i9 - 13*m.i10 - m.x65 + 0.5*m.x69 + 0.5*m.x84 + m.x111 <= 13)
m.c128 = Constraint(expr= 13*m.i9 + 13*m.i10 + m.x65 + 0.5*m.x69 + 0.5*m.x84 - m.x111 <= 26)
m.c129 = Constraint(expr= - m.x46 + m.x62 - m.x112 <= 0)
m.c130 = Constraint(expr= - m.x46 - m.x62 + m.x112 <= 0)
m.c131 = Constraint(expr= - m.x47 + m.x65 - m.x113 <= 0)
m.c132 = Constraint(expr= - m.x47 - m.x65 + m.x113 <= 0)
m.c133 = Constraint(expr= - 8.54*m.i11 - 8.54*m.i12 - m.x62 + 0.5*m.x66 + 0.5*m.x86 + m.x112 <= 0)
m.c134 = Constraint(expr= - 8.54*m.i11 + 8.54*m.i12 + m.x62 + 0.5*m.x66 + 0.5*m.x86 - m.x112 <= 8.54)
m.c135 = Constraint(expr= 13*m.i11 - 13*m.i12 - m.x65 + 0.5*m.x69 + 0.5*m.x87 + m.x113 <= 13)
m.c136 = Constraint(expr= 13*m.i11 + 13*m.i12 + m.x65 + 0.5*m.x69 + 0.5*m.x87 - m.x113 <= 26)
m.c137 = Constraint(expr= m.x63 - m.x88 - m.x104 <= 0)
m.c138 = Constraint(expr= - m.x63 - m.x88 + m.x104 <= 0)
m.c139 = Constraint(expr= m.x64 - m.x89 - m.x105 <= 0)
m.c140 = Constraint(expr= - m.x64 - m.x89 + m.x105 <= 0)
m.c141 = Constraint(expr= - 8.54*m.i13 - 8.54*m.i14 - m.x63 + 0.5*m.x67 + 0.5*m.x72 + m.x104 <= 0)
m.c142 = Constraint(expr= - 8.54*m.i13 + 8.54*m.i14 + m.x63 + 0.5*m.x67 + 0.5*m.x72 - m.x104 <= 8.54)
m.c143 = Constraint(expr= 13*m.i13 - 13*m.i14 - m.x64 + 0.5*m.x70 + 0.5*m.x74 + m.x105 <= 13)
m.c144 = Constraint(expr= 13*m.i13 + 13*m.i14 + m.x64 + 0.5*m.x70 + 0.5*m.x74 - m.x105 <= 26)
m.c145 = Constraint(expr= - m.x48 + m.x63 - m.x106 <= 0)
m.c146 = Constraint(expr= - m.x48 - m.x63 + m.x106 <= 0)
m.c147 = Constraint(expr= - m.x49 + m.x64 - m.x107 <= 0)
m.c148 = Constraint(expr= - m.x49 - m.x64 + m.x107 <= 0)
m.c149 = Constraint(expr= - 8.54*m.i15 - 8.54*m.i16 - m.x63 + 0.5*m.x67 + 0.5*m.x76 + m.x106 <= 0)
m.c150 = Constraint(expr= - 8.54*m.i15 + 8.54*m.i16 + m.x63 + 0.5*m.x67 + 0.5*m.x76 - m.x106 <= 8.54)
m.c151 = Constraint(expr= 13*m.i15 - 13*m.i16 - m.x64 + 0.5*m.x70 + 0.5*m.x77 + m.x107 <= 13)
m.c152 = Constraint(expr= 13*m.i15 + 13*m.i16 + m.x64 + 0.5*m.x70 + 0.5*m.x77 - m.x107 <= 26)
m.c153 = Constraint(expr= m.x63 - m.x90 - m.x108 <= 0)
m.c154 = Constraint(expr= - m.x63 - m.x90 + m.x108 <= 0)
m.c155 = Constraint(expr= m.x64 - m.x91 - m.x109 <= 0)
m.c156 = Constraint(expr= - m.x64 - m.x91 + m.x109 <= 0)
m.c157 = Constraint(expr= - 8.54*m.i17 - 8.54*m.i18 - m.x63 + 0.5*m.x67 + 0.5*m.x78 + m.x108 <= 0)
m.c158 = Constraint(expr= - 8.54*m.i17 + 8.54*m.i18 + m.x63 + 0.5*m.x67 + 0.5*m.x78 - m.x108 <= 8.54)
m.c159 = Constraint(expr= 13*m.i17 - 13*m.i18 - m.x64 + 0.5*m.x70 + 0.5*m.x80 + m.x109 <= 13)
m.c160 = Constraint(expr= 13*m.i17 + 13*m.i18 + m.x64 + 0.5*m.x70 + 0.5*m.x80 - m.x109 <= 26)
m.c161 = Constraint(expr= m.x63 - m.x92 - m.x110 <= 0)
m.c162 = Constraint(expr= - m.x63 - m.x92 + m.x110 <= 0)
m.c163 = Constraint(expr= m.x64 - m.x93 - m.x111 <= 0)
m.c164 = Constraint(expr= - m.x64 - m.x93 + m.x111 <= 0)
m.c165 = Constraint(expr= - 8.54*m.i19 - 8.54*m.i20 - m.x63 + 0.5*m.x67 + 0.5*m.x82 + m.x110 <= 0)
m.c166 = Constraint(expr= - 8.54*m.i19 + 8.54*m.i20 + m.x63 + 0.5*m.x67 + 0.5*m.x82 - m.x110 <= 8.54)
m.c167 = Constraint(expr= 13*m.i19 - 13*m.i20 - m.x64 + 0.5*m.x70 + 0.5*m.x84 + m.x111 <= 13)
m.c168 = Constraint(expr= 13*m.i19 + 13*m.i20 + m.x64 + 0.5*m.x70 + 0.5*m.x84 - m.x111 <= 26)
m.c169 = Constraint(expr= - m.x50 + m.x63 - m.x112 <= 0)
m.c170 = Constraint(expr= - m.x50 - m.x63 + m.x112 <= 0)
m.c171 = Constraint(expr= - m.x51 + m.x64 - m.x113 <= 0)
m.c172 = Constraint(expr= - m.x51 - m.x64 + m.x113 <= 0)
m.c173 = Constraint(expr= - 8.54*m.i21 - 8.54*m.i22 - m.x63 + 0.5*m.x67 + 0.5*m.x86 + m.x112 <= 0)
m.c174 = Constraint(expr= - 8.54*m.i21 + 8.54*m.i22 + m.x63 + 0.5*m.x67 + 0.5*m.x86 - m.x112 <= 8.54)
m.c175 = Constraint(expr= 13*m.i21 - 13*m.i22 - m.x64 + 0.5*m.x70 + 0.5*m.x87 + m.x113 <= 13)
m.c176 = Constraint(expr= 13*m.i21 + 13*m.i22 + m.x64 + 0.5*m.x70 + 0.5*m.x87 - m.x113 <= 26)
m.c177 = Constraint(expr= - m.x52 + m.x104 - m.x106 <= 0)
m.c178 = Constraint(expr= - m.x52 - m.x104 + m.x106 <= 0)
m.c179 = Constraint(expr= - m.x53 + m.x105 - m.x107 <= 0)
m.c180 = Constraint(expr= - m.x53 - m.x105 + m.x107 <= 0)
m.c181 = Constraint(expr= - 8.54*m.i23 - 8.54*m.i24 + 0.5*m.x72 + 0.5*m.x76 - m.x104 + m.x106 <= 0)
m.c182 = Constraint(expr= - 8.54*m.i23 + 8.54*m.i24 + 0.5*m.x72 + 0.5*m.x76 + m.x104 - m.x106 <= 8.54)
m.c183 = Constraint(expr= 13*m.i23 - 13*m.i24 + 0.5*m.x74 + 0.5*m.x77 - m.x105 + m.x107 <= 13)
m.c184 = Constraint(expr= 13*m.i23 + 13*m.i24 + 0.5*m.x74 + 0.5*m.x77 + m.x105 - m.x107 <= 26)
m.c185 = Constraint(expr= - m.x94 + m.x104 - m.x108 <= 0)
m.c186 = Constraint(expr= - m.x94 - m.x104 + m.x108 <= 0)
m.c187 = Constraint(expr= - m.x95 + m.x105 - m.x109 <= 0)
m.c188 = Constraint(expr= - m.x95 - m.x105 + m.x109 <= 0)
m.c189 = Constraint(expr= - 8.54*m.i25 - 8.54*m.i26 + 0.5*m.x72 + 0.5*m.x78 - m.x104 + m.x108 <= 0)
m.c190 = Constraint(expr= - 8.54*m.i25 + 8.54*m.i26 + 0.5*m.x72 + 0.5*m.x78 + m.x104 - m.x108 <= 8.54)
m.c191 = Constraint(expr= 13*m.i25 - 13*m.i26 + 0.5*m.x74 + 0.5*m.x80 - m.x105 + m.x109 <= 13)
m.c192 = Constraint(expr= 13*m.i25 + 13*m.i26 + 0.5*m.x74 + 0.5*m.x80 + m.x105 - m.x109 <= 26)
m.c193 = Constraint(expr= - m.x96 + m.x104 - m.x110 <= 0)
m.c194 = Constraint(expr= - m.x96 - m.x104 + m.x110 <= 0)
m.c195 = Constraint(expr= - m.x97 + m.x105 - m.x111 <= 0)
m.c196 = Constraint(expr= - m.x97 - m.x105 + m.x111 <= 0)
m.c197 = Constraint(expr= - 8.54*m.i27 - 8.54*m.i28 + 0.5*m.x72 + 0.5*m.x82 - m.x104 + m.x110 <= 0)
m.c198 = Constraint(expr= - 8.54*m.i27 + 8.54*m.i28 + 0.5*m.x72 + 0.5*m.x82 + m.x104 - m.x110 <= 8.54)
m.c199 = Constraint(expr= 13*m.i27 - 13*m.i28 + 0.5*m.x74 + 0.5*m.x84 - m.x105 + m.x111 <= 13)
m.c200 = Constraint(expr= 13*m.i27 + 13*m.i28 + 0.5*m.x74 + 0.5*m.x84 + m.x105 - m.x111 <= 26)
m.c201 = Constraint(expr= - m.x54 + m.x104 - m.x112 <= 0)
m.c202 = Constraint(expr= - m.x54 - m.x104 + m.x112 <= 0)
m.c203 = Constraint(expr= - m.x55 + m.x105 - m.x113 <= 0)
m.c204 = Constraint(expr= - m.x55 - m.x105 + m.x113 <= 0)
m.c205 = Constraint(expr= - 8.54*m.i29 - 8.54*m.i30 + 0.5*m.x72 + 0.5*m.x86 - m.x104 + m.x112 <= 0)
m.c206 = Constraint(expr= - 8.54*m.i29 + 8.54*m.i30 + 0.5*m.x72 + 0.5*m.x86 + m.x104 - m.x112 <= 8.54)
m.c207 = Constraint(expr= 13*m.i29 - 13*m.i30 + 0.5*m.x74 + 0.5*m.x87 - m.x105 + m.x113 <= 13)
m.c208 = Constraint(expr= 13*m.i29 + 13*m.i30 + 0.5*m.x74 + 0.5*m.x87 + m.x105 - m.x113 <= 26)
m.c209 = Constraint(expr= - m.x56 + m.x106 - m.x108 <= 0)
m.c210 = Constraint(expr= - m.x56 - m.x106 + m.x108 <= 0)
m.c211 = Constraint(expr= - m.x57 + m.x107 - m.x109 <= 0)
m.c212 = Constraint(expr= - m.x57 - m.x107 + m.x109 <= 0)
m.c213 = Constraint(expr= - 8.54*m.i31 - 8.54*m.i32 + 0.5*m.x76 + 0.5*m.x78 - m.x106 + m.x108 <= 0)
m.c214 = Constraint(expr= - 8.54*m.i31 + 8.54*m.i32 + 0.5*m.x76 + 0.5*m.x78 + m.x106 - m.x108 <= 8.54)
m.c215 = Constraint(expr= 13*m.i31 - 13*m.i32 + 0.5*m.x77 + 0.5*m.x80 - m.x107 + m.x109 <= 13)
m.c216 = Constraint(expr= 13*m.i31 + 13*m.i32 + 0.5*m.x77 + 0.5*m.x80 + m.x107 - m.x109 <= 26)
m.c217 = Constraint(expr= - m.x98 + m.x106 - m.x110 <= 0)
m.c218 = Constraint(expr= - m.x98 - m.x106 + m.x110 <= 0)
m.c219 = Constraint(expr= - m.x99 + m.x107 - m.x111 <= 0)
m.c220 = Constraint(expr= - m.x99 - m.x107 + m.x111 <= 0)
m.c221 = Constraint(expr= - 8.54*m.i33 - 8.54*m.i34 + 0.5*m.x76 + 0.5*m.x82 - m.x106 + m.x110 <= 0)
m.c222 = Constraint(expr= - 8.54*m.i33 + 8.54*m.i34 + 0.5*m.x76 + 0.5*m.x82 + m.x106 - m.x110 <= 8.54)
m.c223 = Constraint(expr= 13*m.i33 - 13*m.i34 + 0.5*m.x77 + 0.5*m.x84 - m.x107 + m.x111 <= 13)
m.c224 = Constraint(expr= 13*m.i33 + 13*m.i34 + 0.5*m.x77 + 0.5*m.x84 + m.x107 - m.x111 <= 26)
m.c225 = Constraint(expr= - m.x100 + m.x106 - m.x112 <= 0)
m.c226 = Constraint(expr= - m.x100 - m.x106 + m.x112 <= 0)
m.c227 = Constraint(expr= - m.x101 + m.x107 - m.x113 <= 0)
m.c228 = Constraint(expr= - m.x101 - m.x107 + m.x113 <= 0)
m.c229 = Constraint(expr= - 8.54*m.i35 - 8.54*m.i36 + 0.5*m.x76 + 0.5*m.x86 - m.x106 + m.x112 <= 0)
m.c230 = Constraint(expr= - 8.54*m.i35 + 8.54*m.i36 + 0.5*m.x76 + 0.5*m.x86 + m.x106 - m.x112 <= 8.54)
m.c231 = Constraint(expr= 13*m.i35 - 13*m.i36 + 0.5*m.x77 + 0.5*m.x87 - m.x107 + m.x113 <= 13)
m.c232 = Constraint(expr= 13*m.i35 + 13*m.i36 + 0.5*m.x77 + 0.5*m.x87 + m.x107 - m.x113 <= 26)
m.c233 = Constraint(expr= - m.x102 + m.x108 - m.x110 <= 0)
m.c234 = Constraint(expr= - m.x102 - m.x108 + m.x110 <= 0)
m.c235 = Constraint(expr= - m.x103 + m.x109 - m.x111 <= 0)
m.c236 = Constraint(expr= - m.x103 - m.x109 + m.x111 <= 0)
m.c237 = Constraint(expr= - 8.54*m.i37 - 8.54*m.i38 + 0.5*m.x78 + 0.5*m.x82 - m.x108 + m.x110 <= 0)
m.c238 = Constraint(expr= - 8.54*m.i37 + 8.54*m.i38 + 0.5*m.x78 + 0.5*m.x82 + m.x108 - m.x110 <= 8.54)
m.c239 = Constraint(expr= 13*m.i37 - 13*m.i38 + 0.5*m.x80 + 0.5*m.x84 - m.x109 + m.x111 <= 13)
m.c240 = Constraint(expr= 13*m.i37 + 13*m.i38 + 0.5*m.x80 + 0.5*m.x84 + m.x109 - m.x111 <= 26)
m.c241 = Constraint(expr= - m.x58 + m.x108 - m.x112 <= 0)
m.c242 = Constraint(expr= - m.x58 - m.x108 + m.x112 <= 0)
m.c243 = Constraint(expr= - m.x59 + m.x109 - m.x113 <= 0)
m.c244 = Constraint(expr= - m.x59 - m.x109 + m.x113 <= 0)
m.c245 = Constraint(expr= - 8.54*m.i39 - 8.54*m.i40 + 0.5*m.x78 + 0.5*m.x86 - m.x108 + m.x112 <= 0)
m.c246 = Constraint(expr= - 8.54*m.i39 + 8.54*m.i40 + 0.5*m.x78 + 0.5*m.x86 + m.x108 - m.x112 <= 8.54)
m.c247 = Constraint(expr= 13*m.i39 - 13*m.i40 + 0.5*m.x80 + 0.5*m.x87 - m.x109 + m.x113 <= 13)
m.c248 = Constraint(expr= 13*m.i39 + 13*m.i40 + 0.5*m.x80 + 0.5*m.x87 + m.x109 - m.x113 <= 26)
m.c249 = Constraint(expr= - m.x60 + m.x110 - m.x112 <= 0)
m.c250 = Constraint(expr= - m.x60 - m.x110 + m.x112 <= 0)
m.c251 = Constraint(expr= - m.x61 + m.x111 - m.x113 <= 0)
m.c252 = Constraint(expr= - m.x61 - m.x111 + m.x113 <= 0)
m.c253 = Constraint(expr= - 8.54*m.i41 - 8.54*m.i42 + 0.5*m.x82 + 0.5*m.x86 - m.x110 + m.x112 <= 0)
m.c254 = Constraint(expr= - 8.54*m.i41 + 8.54*m.i42 + 0.5*m.x82 + 0.5*m.x86 + m.x110 - m.x112 <= 8.54)
m.c255 = Constraint(expr= 13*m.i41 - 13*m.i42 + 0.5*m.x84 + 0.5*m.x87 - m.x111 + m.x113 <= 13)
m.c256 = Constraint(expr= 13*m.i41 + 13*m.i42 + 0.5*m.x84 + 0.5*m.x87 + m.x111 - m.x113 <= 26)
m.c257 = Constraint(expr=16/m.x66 - m.x69 <= 0)
m.c258 = Constraint(expr=16/m.x69 - m.x66 <= 0)
m.c259 = Constraint(expr=16/m.x67 - m.x70 <= 0)
m.c260 = Constraint(expr=16/m.x70 - m.x67 <= 0)
m.c261 = Constraint(expr=16/m.x72 - m.x74 <= 0)
m.c262 = Constraint(expr=16/m.x74 - m.x72 <= 0)
m.c263 = Constraint(expr=36/m.x76 - m.x77 <= 0)
m.c264 = Constraint(expr=36/m.x77 - m.x76 <= 0)
m.c265 = Constraint(expr=9/m.x78 - m.x80 <= 0)
m.c266 = Constraint(expr=9/m.x80 - m.x78 <= 0)
m.c267 = Constraint(expr=9/m.x82 - m.x84 <= 0)
m.c268 = Constraint(expr=9/m.x84 - m.x82 <= 0)
m.c269 = Constraint(expr=9/m.x86 - m.x87 <= 0)
m.c270 = Constraint(expr=9/m.x87 - m.x86 <= 0)
| 40.258112
| 115
| 0.599267
|
98c58b5f2ed33bad0c9f8f884e7cfaf461c99dab
| 1,278
|
py
|
Python
|
dashboard_queue.py
|
gnott/elife-bot
|
584c315d15d1289e0d2c27c28aaaae31174812e4
|
[
"MIT"
] | null | null | null |
dashboard_queue.py
|
gnott/elife-bot
|
584c315d15d1289e0d2c27c28aaaae31174812e4
|
[
"MIT"
] | null | null | null |
dashboard_queue.py
|
gnott/elife-bot
|
584c315d15d1289e0d2c27c28aaaae31174812e4
|
[
"MIT"
] | null | null | null |
import boto.sqs
from boto.sqs.message import Message
import json
import uuid
def send_message(message, settings):
conn = boto.sqs.connect_to_region(settings.sqs_region,
aws_access_key_id=settings.aws_access_key_id,
aws_secret_access_key=settings.aws_secret_access_key)
queue = conn.get_queue(settings.event_monitor_queue)
m = Message()
m.set_body(json.dumps(message))
queue.write(m)
def build_event_message(item_identifier, version, run, event_type, timestamp, status, message):
message = {
'message_type': 'event',
'item_identifier': item_identifier,
'version': version,
'run': run,
'event_type': event_type,
'timestamp': timestamp.isoformat(),
'status': status,
'message': message,
'message_id': str(uuid.uuid4())
}
return message
def build_property_message(item_identifier, version, name, value, property_type):
message = {
'message_type': 'property',
'item_identifier': item_identifier,
'version': version,
'name': name,
'value': value,
'property_type': property_type,
'message_id': str(uuid.uuid4())
}
return message
| 28.4
| 95
| 0.626761
|
fbd781706eeb6c2365f86afed31d97868a4e54b2
| 821
|
py
|
Python
|
config.py
|
Duncanian/Book-A-Meal-1
|
d700b90a07f2412d5d93a381f9153b3f1ddc1838
|
[
"MIT"
] | 1
|
2018-05-06T06:12:24.000Z
|
2018-05-06T06:12:24.000Z
|
config.py
|
Duncanian/Book-A-Meal-1
|
d700b90a07f2412d5d93a381f9153b3f1ddc1838
|
[
"MIT"
] | 1
|
2018-06-13T05:19:13.000Z
|
2018-06-13T05:19:13.000Z
|
config.py
|
lennykioko/Book-A-Meal
|
d700b90a07f2412d5d93a381f9153b3f1ddc1838
|
[
"MIT"
] | 2
|
2018-04-19T10:30:40.000Z
|
2018-07-27T07:04:19.000Z
|
"""Contains various settings for each process of development
"""
from os import getenv
class Config(object):
"""Base class with all the fundamental config variables"""
DEBUG = False
TESTING = False
SQLALCHEMY_TRACK_MODIFICATIONS = True
SECRET_KEY = getenv('SECRET_KEY')
class TestingConfig(Config):
"""Contains config variables required during testing"""
DEBUG = True
TESTING = True
SQLALCHEMY_DATABASE_URI = getenv('TESTING_DATABASE_URI')
class DevelopmentConfig(Config):
"""Contains config variables required during development"""
DEBUG = True
SQLALCHEMY_DATABASE_URI = getenv('DEVELOPMENT_DATABASE_URI')
class ProductionConfig(Config):
"""Contains config variables for use during production"""
SQLALCHEMY_DATABASE_URI = getenv('PRODUCTION_DATABASE_URI')
| 27.366667
| 64
| 0.744214
|
80fdf3b67b9656e459f310ffa7a0239a2726e958
| 5,979
|
py
|
Python
|
FMPasteBoxAppDelegate.py
|
karstenw/FMPasteBox
|
c84aa860401051ed369d8559afa83f572a2bd729
|
[
"BSD-2-Clause"
] | 2
|
2021-07-14T10:07:13.000Z
|
2021-11-14T17:59:18.000Z
|
FMPasteBoxAppDelegate.py
|
karstenw/FMPasteBox
|
c84aa860401051ed369d8559afa83f572a2bd729
|
[
"BSD-2-Clause"
] | null | null | null |
FMPasteBoxAppDelegate.py
|
karstenw/FMPasteBox
|
c84aa860401051ed369d8559afa83f572a2bd729
|
[
"BSD-2-Clause"
] | null | null | null |
#
# FMPasteBoxAppDelegate.py
# FMPasteBox
#
import sys
import os
import pprint
pp = pprint.pprint
import pdb
kwlog = True
import objc
import Foundation
NSObject = Foundation.NSObject
NSMutableDictionary = Foundation.NSMutableDictionary
NSData = Foundation.NSData
import AppKit
NSWindowController = AppKit.NSWindowController
NSApplication = AppKit.NSApplication
NSUserDefaults = AppKit.NSUserDefaults
NSMutableAttributedString = AppKit.NSMutableAttributedString
NSBeep = AppKit.NSBeep
NSPasteboard = AppKit.NSPasteboard
import FMPasteBoxLayoutObjects
import FMPasteBoxTools
read_pb = FMPasteBoxTools.read_pb
makeunicode = FMPasteBoxTools.makeunicode
fmpPasteboardTypes = FMPasteBoxTools.fmpPasteboardTypes
additionalFMPPasteboardTypes = FMPasteBoxTools.additionalFMPPasteboardTypes
displaynameTypes = FMPasteBoxTools.displaynameTypes
datetimestamp = FMPasteBoxTools.datetimestamp
import FMPasteBoxVersion
import FMPasteBoxPrefController
PrefController = FMPasteBoxPrefController.FMPasteBoxPreferenceController
class FMPasteBoxAppDelegate(NSObject):
menClipboardtype = objc.IBOutlet()
butGetClipboard = objc.IBOutlet()
butPushClipboard = objc.IBOutlet()
tfXMLEditor = objc.IBOutlet()
appWindow = objc.IBOutlet()
def initialize(self):
if kwlog:
print "FMPasteBoxAppDelegate.initialize()"
userdefaults = NSMutableDictionary.dictionary()
userdefaults.setObject_forKey_(u"", u'txtFileMakerAppPath')
userdefaults.setObject_forKey_(u"", u'txtExportsPath')
userdefaults.setObject_forKey_(False, u'cbDoExports')
NSUserDefaults.standardUserDefaults().registerDefaults_(userdefaults)
self.preferenceController = None
def awakeFromNib(self):
# for later
defaults = NSUserDefaults.standardUserDefaults()
# set up type menu
self.menClipboardtype.removeAllItems()
menuItems = [ u"" ]
menuItems.extend( displaynameTypes.keys() )
menuItems.sort()
for menuItem in menuItems:
self.menClipboardtype.addItemWithTitle_( menuItem )
self.menClipboardtype.setTitle_( u"" )
# set up text view
self.tfXMLEditor.setUsesFindPanel_(True)
window = self.tfXMLEditor.window()
window.makeFirstResponder_(self.tfXMLEditor)
def applicationDidFinishLaunching_(self, notification):
app = NSApplication.sharedApplication()
app.activateIgnoringOtherApps_(True)
window = self.tfXMLEditor.window()
window.makeFirstResponder_(self.tfXMLEditor)
@objc.IBAction
def getClipboard_(self, sender):
pasteboardContents = read_pb()
if not pasteboardContents:
# abort - nothing on pasteboard
NSBeep()
# we must return implicit None! Crashing otherwise.
return
defaults = NSUserDefaults.standardUserDefaults()
exportClipboards = defaults.boolForKey_( u'cbDoExports' )
if exportClipboards:
exportFolder = makeunicode(defaults.objectForKey_( u'txtExportsPath' ))
if os.path.exists( exportFolder ):
d,t = FMPasteBoxTools.datetimestamp()
dayFolder = os.path.join( exportFolder, d )
mainType = "-"
try:
mainType = mainType + pasteboardContents.typ.name
except:
pass
sessionFolder = os.path.join( dayFolder, t + mainType)
try:
exportItems = pasteboardContents.additionals[:]
exportItems.append( pasteboardContents )
for item in exportItems:
name = item.typ.name
ext = item.typ.fileExt
data = item.data
path = os.path.join( sessionFolder, name + ext )
if ext == ".xml":
data = makeunicode( data )
data = data.encode( "utf-8" )
if not os.path.exists( sessionFolder ):
os.makedirs( sessionFolder )
f = open(path, 'w')
f.write( data )
f.close()
if ext == ".xml":
FMPasteBoxLayoutObjects.exportAssets( path, sessionFolder )
except Exception, err:
print
print "ADDITIONALS FAILED"
print err
print
pbType = pasteboardContents.typ
pbTypeName = pbType.name
self.menClipboardtype.setTitle_( pbTypeName )
self.tfXMLEditor.setString_( makeunicode( pasteboardContents.data ) )
window = self.tfXMLEditor.window()
window.makeFirstResponder_(self.tfXMLEditor)
def textView(self):
# model
return makeunicode( self.tfXMLEditor.string() )
@objc.IBAction
def pushClipboard_(self, sender):
# get text view data
data = makeunicode(self.textView())
data = data.encode("utf-8")
l = len(data)
nsdata = NSData.dataWithBytes_length_(data, l)
# get pasteboard type
pasteboardType = displaynameTypes.get( self.menClipboardtype.title(), u"" )
if not pasteboardType:
NSBeep()
# we must return implicit None! Crashing otherwise.
return
# write to pasteboard
pasteboard = NSPasteboard.generalPasteboard()
pasteboard.clearContents()
pasteboardTypeName = pasteboardType.pbname
pasteboard.setData_forType_( nsdata, pasteboardTypeName)
@objc.IBAction
def showPreferencePanel_(self, sender):
if self.preferenceController == None:
self.preferenceController = PrefController.alloc().init()
self.preferenceController.showWindow_( self.preferenceController )
| 33.589888
| 87
| 0.635223
|
f8bae8154f987f13f2eab78834d68219b33fd5f0
| 342
|
py
|
Python
|
socialcrawl/networks/tasks.py
|
enterstudio/socialcrawl
|
8ad0081db3eafe96132291e579984e504c48021f
|
[
"MIT"
] | 13
|
2015-03-04T22:34:28.000Z
|
2021-12-26T06:27:21.000Z
|
socialcrawl/networks/tasks.py
|
MrMugiwara/socialcrawl
|
8ad0081db3eafe96132291e579984e504c48021f
|
[
"MIT"
] | 2
|
2017-01-28T21:45:53.000Z
|
2018-08-05T10:58:15.000Z
|
socialcrawl/networks/tasks.py
|
enterstudio/socialcrawl
|
8ad0081db3eafe96132291e579984e504c48021f
|
[
"MIT"
] | 4
|
2016-09-19T20:29:34.000Z
|
2020-07-24T18:04:00.000Z
|
from celery import task
from socialcrawl.clients.crawler import CachedTwitterClient, CachedFacebookClient
CLIENTS = {
'twitter': CachedTwitterClient(),
'facebook': CachedFacebookClient(),
}
@task()
def fetch(username, network):
data = CLIENTS[network].fetch_profile(username)
CLIENTS[network].save_profile(username, data)
| 22.8
| 81
| 0.75731
|
9feea1c6fc05c33f505917effcd1dc38a9c176f5
| 1,358
|
py
|
Python
|
azure-mgmt-resource/azure/mgmt/resource/policy/v2017_06_01_preview/models/__init__.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 4
|
2016-06-17T23:25:29.000Z
|
2022-03-30T22:37:45.000Z
|
azure/mgmt/resource/policy/v2017_06_01_preview/models/__init__.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure/mgmt/resource/policy/v2017_06_01_preview/models/__init__.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 3
|
2016-05-03T20:49:46.000Z
|
2017-10-05T21:05:27.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .policy_sku import PolicySku
from .policy_assignment import PolicyAssignment
from .error_response import ErrorResponse, ErrorResponseException
from .policy_definition_reference import PolicyDefinitionReference
from .policy_set_definition import PolicySetDefinition
from .policy_definition import PolicyDefinition
from .policy_assignment_paged import PolicyAssignmentPaged
from .policy_set_definition_paged import PolicySetDefinitionPaged
from .policy_definition_paged import PolicyDefinitionPaged
from .policy_client_enums import (
PolicyType,
PolicyMode,
)
__all__ = [
'PolicySku',
'PolicyAssignment',
'ErrorResponse', 'ErrorResponseException',
'PolicyDefinitionReference',
'PolicySetDefinition',
'PolicyDefinition',
'PolicyAssignmentPaged',
'PolicySetDefinitionPaged',
'PolicyDefinitionPaged',
'PolicyType',
'PolicyMode',
]
| 34.820513
| 76
| 0.709131
|
7dc854b7b709d8ffa6053f5b35d8c88b4341ff6d
| 422
|
py
|
Python
|
osf/migrations/0119_add_asset_perms.py
|
gaybro8777/osf.io
|
30408511510a40bc393565817b343ef5fd76ab14
|
[
"Apache-2.0"
] | 628
|
2015-01-15T04:33:22.000Z
|
2022-03-30T06:40:10.000Z
|
osf/migrations/0119_add_asset_perms.py
|
gaybro8777/osf.io
|
30408511510a40bc393565817b343ef5fd76ab14
|
[
"Apache-2.0"
] | 4,712
|
2015-01-02T01:41:53.000Z
|
2022-03-30T14:18:40.000Z
|
osf/migrations/0119_add_asset_perms.py
|
Johnetordoff/osf.io
|
de10bf249c46cede04c78f7e6f7e352c69e6e6b5
|
[
"Apache-2.0"
] | 371
|
2015-01-12T16:14:08.000Z
|
2022-03-31T18:58:29.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-16 17:04
from django.db import migrations
def noop(*args):
# This migration used to update admin permissions
# This is now handled by the post_migrate signal
pass
class Migration(migrations.Migration):
dependencies = [
('osf', '0118_auto_20180716_1216'),
]
operations = [
migrations.RunPython(noop, noop),
]
| 21.1
| 53
| 0.656398
|
836abda861e50d7f700a2fa518ffc0f2a3086b4d
| 856
|
py
|
Python
|
handyman/transform.py
|
gfleetwood/handyman
|
c3d4a8d7fe87536bb8c9b8f8ba0fce7b1f7feade
|
[
"MIT"
] | 3
|
2019-04-04T18:05:13.000Z
|
2020-12-13T14:55:32.000Z
|
handyman/transform.py
|
gfleetwood/handyman
|
c3d4a8d7fe87536bb8c9b8f8ba0fce7b1f7feade
|
[
"MIT"
] | null | null | null |
handyman/transform.py
|
gfleetwood/handyman
|
c3d4a8d7fe87536bb8c9b8f8ba0fce7b1f7feade
|
[
"MIT"
] | 1
|
2021-11-20T08:26:03.000Z
|
2021-11-20T08:26:03.000Z
|
def impute_scale_data(df):
'''
Imputes missing values with the median for numeric features and the mode for categorical ones,
and then scales the numeric features by subtracting the mean and dividing by the standard deviation.
'''
imputer_num = sk_pp.Imputer(strategy = 'median')
df.loc[:, df.select_dtypes(exclude = ['object']).columns] = \
imputer_num.fit_transform(df.select_dtypes(exclude = ['object']))
scaler = sk_pp.StandardScaler()
df.loc[:, df.select_dtypes(exclude = ['object']).columns] = \
scaler.fit_transform(df.select_dtypes(exclude = ['object']))
for col in df.select_dtypes(include = ['object']).columns:
df.loc[:, col] = df.loc[:, col].fillna(value = df.loc[:, col].value_counts().index[0])
df_imputed_scaled = df
return(df_imputed_scaled)
| 38.909091
| 104
| 0.658879
|
17ff164013e36878e093dd45a4a852e6709e57d2
| 9,551
|
py
|
Python
|
src/bvcf.py
|
shtoneyan/Basset
|
b6c7f8995bb4f8fc37eccf3ee0f78478beef51d7
|
[
"MIT"
] | 248
|
2015-10-06T12:30:53.000Z
|
2022-02-02T20:30:34.000Z
|
src/bvcf.py
|
Deepstatsanalysis/Basset
|
18753ad9ff5a46291021a0fa1abaad037b6f64f0
|
[
"MIT"
] | 51
|
2015-10-08T04:57:41.000Z
|
2021-08-12T19:53:04.000Z
|
src/bvcf.py
|
Deepstatsanalysis/Basset
|
18753ad9ff5a46291021a0fa1abaad037b6f64f0
|
[
"MIT"
] | 120
|
2015-10-15T00:49:44.000Z
|
2022-02-16T21:17:17.000Z
|
#!/usr/bin/env python
from __future__ import print_function
from optparse import OptionParser
import sys
import numpy as np
import pandas as pd
import pysam
from dna_io import dna_one_hot
################################################################################
# bvcf.py
#
# Methods and classes to support .vcf SNP analysis.
################################################################################
def cap_allele(allele, cap=5):
''' Cap the length of an allele in the figures '''
if len(allele) > cap:
allele = allele[:cap] + '*'
return allele
def snps_seq1(snps, seq_len, genome_fasta, return_seqs=False):
''' Produce an array of one hot coded sequences for a list of SNPs.
Attrs:
snps [SNP] : list of SNPs
seq_len (int) : sequence length to code
genome_fasta (str) : genome FASTA file
Return:
seq_vecs (array) : one hot coded sequences surrounding the SNPs
seq_headers [str] : headers for sequences
seq_snps [SNP] : list of used SNPs
'''
left_len = seq_len//2 - 1
right_len = seq_len//2
# open genome FASTA
genome = pysam.Fastafile(genome_fasta)
# initialize one hot coded vector list
seq_vecs_list = []
# save successful SNPs
seq_snps = []
# save sequence strings, too
seqs = []
# name sequences
seq_headers = []
for snp in snps:
# specify positions in GFF-style 1-based
seq_start = snp.pos - left_len
seq_end = snp.pos + right_len + len(snp.ref_allele) - snp.longest_alt()
# extract sequence as BED style
if seq_start < 0:
seq = 'N'*(-seq_start) + genome.fetch(snp.chrom, 0, seq_end).upper()
else:
seq = genome.fetch(snp.chrom, seq_start-1, seq_end).upper()
# extend to full length
if len(seq) < seq_end - seq_start:
seq += 'N'*(seq_end-seq_start-len(seq))
# verify that ref allele matches ref sequence
seq_ref = seq[left_len:left_len+len(snp.ref_allele)]
if seq_ref != snp.ref_allele:
if seq_ref not in snp.alt_alleles:
print('WARNING: Skipping %s - neither allele matches reference genome: %s vs %s' % (snp.rsid, snp.ref_allele, seq_ref), file=sys.stderr)
continue
else:
print('WARNING: %s - alt (as opposed to ref) allele matches reference genome; changing reference genome to match.' % (snp.rsid), file=sys.stderr)
# remove alt allele and include ref allele
seq = seq[:left_len] + snp.ref_allele + seq[left_len+len(seq_ref):]
# note that this won't work for indels, but they will be sent to the
# skipping code above because seq_ref will be the wrong length as the
# proper alternative allele
seq_snps.append(snp)
# one hot code ref allele
seq_vecs_ref, seq_ref = dna_length_1hot(seq, seq_len)
seq_vecs_list.append(seq_vecs_ref)
if return_seqs:
seqs.append(seq_ref)
# name ref allele
seq_headers.append('%s_%s' % (snp.rsid, cap_allele(snp.ref_allele)))
for alt_al in snp.alt_alleles:
# remove ref allele and include alt allele
seq_alt = seq[:left_len] + alt_al + seq[left_len+len(snp.ref_allele):]
# one hot code
seq_vecs_alt, seq_alt = dna_length_1hot(seq_alt, seq_len)
seq_vecs_list.append(seq_vecs_alt)
if return_seqs:
seqs.append(seq_alt)
# name
seq_headers.append('%s_%s' % (snp.rsid, cap_allele(alt_al)))
# stack
seq_vecs = np.vstack(seq_vecs_list)
if return_seqs:
return seq_vecs, seq_headers, seq_snps, seqs
else:
return seq_vecs, seq_headers, seq_snps
def snps2_seq1(snps, seq_len, genome1_fasta, genome2_fasta, return_seqs=False):
''' Produce an array of one hot coded sequences for a list of SNPs.
Attrs:
snps [SNP] : list of SNPs
seq_len (int) : sequence length to code
genome_fasta (str) : major allele genome FASTA file
genome2_fasta (str) : minor allele genome FASTA file
Return:
seq_vecs (array) : one hot coded sequences surrounding the SNPs
seq_headers [str] : headers for sequences
seq_snps [SNP] : list of used SNPs
'''
left_len = seq_len/2 - 1
right_len = seq_len/2
# open genome FASTA
genome1 = pysam.Fastafile(genome1_fasta)
genome2 = pysam.Fastafile(genome2_fasta)
# initialize one hot coded vector list
seq_vecs_list = []
# save successful SNPs
seq_snps = []
# save sequence strings, too
seqs = []
# name sequences
seq_headers = []
for snp in snps:
if len(snp.alt_alleles) > 1:
print('Major/minor genome mode requires only two alleles: %s' % snp.rsid, file=sys.stderr)
exit(1)
alt_al = snp.alt_alleles[0]
# specify positions in GFF-style 1-based
seq_start = snp.pos - left_len
seq_end = snp.pos + right_len + len(snp.ref_allele)
# extract sequence as BED style
if seq_start < 0:
seq_ref = 'N'*(-seq_start) + genome1.fetch(snp.chrom, 0, seq_end).upper()
else:
seq_ref = genome1.fetch(snp.chrom, seq_start-1, seq_end).upper()
# extend to full length
if len(seq_ref) < seq_end - seq_start:
seq_ref += 'N'*(seq_end-seq_start-len(seq_ref))
# verify that ref allele matches ref sequence
seq_ref_snp = seq_ref[left_len:left_len+len(snp.ref_allele)]
if seq_ref_snp != snp.ref_allele:
print('WARNING: Major allele SNP %s doesnt match reference genome: %s vs %s' % (snp.rsid, snp.ref_allele, seq_ref_snp), file=sys.stderr)
exit(1)
# specify positions in GFF-style 1-based
seq_start = snp.pos2 - left_len
seq_end = snp.pos2 + right_len + len(alt_al)
# extract sequence as BED style
if seq_start < 0:
seq_alt = 'N'*(-seq_start) + genome2.fetch(snp.chrom, 0, seq_end).upper()
else:
seq_alt = genome2.fetch(snp.chrom, seq_start-1, seq_end).upper()
# extend to full length
if len(seq_alt) < seq_end - seq_start:
seq_alt += 'N'*(seq_end-seq_start-len(seq_alt))
# verify that ref allele matches ref sequence
seq_alt_snp = seq_alt[left_len:left_len+len(alt_al)]
if seq_alt_snp != alt_al:
print('WARNING: Minor allele SNP %s doesnt match reference genome: %s vs %s' % (snp.rsid, snp.alt_alleles[0], seq_alt_snp), file=sys.stderr)
exit(1)
seq_snps.append(snp)
# one hot code ref allele
seq_vecs_ref, seq_ref = dna_length_1hot(seq_ref, seq_len)
seq_vecs_list.append(seq_vecs_ref)
if return_seqs:
seqs.append(seq_ref)
# name ref allele
seq_headers.append('%s_%s' % (snp.rsid, cap_allele(snp.ref_allele)))
# one hot code alt allele
seq_vecs_alt, seq_alt = dna_length_1hot(seq_alt, seq_len)
seq_vecs_list.append(seq_vecs_alt)
if return_seqs:
seqs.append(seq_alt)
# name
seq_headers.append('%s_%s' % (snp.rsid, cap_allele(alt_al)))
# stack
seq_vecs = np.vstack(seq_vecs_list)
if return_seqs:
return seq_vecs, seq_headers, seq_snps, seqs
else:
return seq_vecs, seq_headers, seq_snps
def dna_length_1hot(seq, length):
''' Adjust the sequence length and compute
a 1hot coding. '''
if length < len(seq):
# trim the sequence
seq_trim = (len(seq)-length)//2
seq = seq[seq_trim:seq_trim+length]
elif length > len(seq):
# extend with N's
nfront = (length-len(seq))//2
nback = length - len(seq) - nfront
seq = 'N'*nfront + seq + 'N'*nback
seq_1hot = dna_one_hot(seq)
return seq_1hot, seq
def vcf_snps(vcf_file, index_snp=False, score=False, pos2=False):
''' Load SNPs from a VCF file '''
vcf_in = open(vcf_file)
# read through header
line = vcf_in.readline()
while line[0] == '#':
line = vcf_in.readline()
# read in SNPs
snps = []
while line:
snps.append(SNP(line, index_snp, score, pos2))
line = vcf_in.readline()
return snps
class SNP:
''' SNP
Represent SNPs read in from a VCF file
Attributes:
vcf_line (str)
'''
def __init__(self, vcf_line, index_snp=False, score=False, pos2=False):
a = vcf_line.split()
if a[0].startswith('chr'):
self.chrom = a[0]
else:
self.chrom = 'chr%s' % a[0]
self.pos = int(a[1])
self.rsid = a[2]
self.ref_allele = a[3]
self.alt_alleles = a[4].split(',')
self.index_snp = '.'
if index_snp:
self.index_snp = a[5]
self.score = None
if score:
self.score = float(a[6])
self.pos2 = None
if pos2:
self.pos2 = int(a[5])
def get_alleles(self):
''' Return a list of all alleles '''
alleles = [self.ref_allele] + self.alt_alleles
return alleles
def longest_alt(self):
''' Return the longest alt allele. '''
return max([len(al) for al in self.alt_alleles])
def __str__(self):
return 'SNP(%s, %s:%d, %s/%s)' % (self.rsid, self.chrom, self.pos, self.ref_allele, ','.join(self.alt_alleles))
| 30.514377
| 161
| 0.594179
|
5fc391770926dd7ef11862edd1950309e8023dff
| 1,041
|
py
|
Python
|
tests/test_x509_parser.py
|
jcrowgey/x5092json
|
97dcc266d5db3b4b487613621121c69aa5714a74
|
[
"BSD-3-Clause"
] | 15
|
2019-01-11T23:43:23.000Z
|
2022-01-04T07:19:23.000Z
|
tests/test_x509_parser.py
|
jcrowgey/x5092json
|
97dcc266d5db3b4b487613621121c69aa5714a74
|
[
"BSD-3-Clause"
] | 10
|
2019-01-17T17:14:59.000Z
|
2021-04-26T11:38:14.000Z
|
tests/test_x509_parser.py
|
jcrowgey/x5092json
|
97dcc266d5db3b4b487613621121c69aa5714a74
|
[
"BSD-3-Clause"
] | 8
|
2019-01-16T21:48:34.000Z
|
2021-09-03T10:59:42.000Z
|
import json
import os
from x5092json import x509parser
from base64 import b64decode
from collections import OrderedDict
import pytest
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
test_data = json.load(
open(os.path.join(TEST_DIR, "test_data.json")),
object_pairs_hook=OrderedDict,
)
@pytest.mark.parametrize("test_name", test_data.keys())
def test_run(test_name):
test_item = test_data[test_name]
certificate = x509parser.READERS["DER"](b64decode(test_item["raw"]))
cert_data = x509parser.parse(certificate)
assert cert_data == test_item["parsed"]
def test_load_files():
pem_file = open(os.path.join(TEST_DIR, "test_cert.pem"), mode="rb")
certificate = x509parser.load_certificate(pem_file)
cert_data = x509parser.parse(certificate)
assert cert_data
der_file = open(os.path.join(TEST_DIR, "test_cert.der"), mode="rb")
certificate = x509parser.load_certificate(
der_file, x509parser.READERS["DER"]
)
cert_data = x509parser.parse(certificate)
assert cert_data
| 29.742857
| 72
| 0.738713
|
8fc7e49d2de8d461467153a60994f91c1e497b32
| 1,247
|
py
|
Python
|
google/ads/googleads/v9/services/types/language_constant_service.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v9/services/types/language_constant_service.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v9/services/types/language_constant_service.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.services",
marshal="google.ads.googleads.v9",
manifest={"GetLanguageConstantRequest",},
)
class GetLanguageConstantRequest(proto.Message):
r"""Request message for
[LanguageConstantService.GetLanguageConstant][google.ads.googleads.v9.services.LanguageConstantService.GetLanguageConstant].
Attributes:
resource_name (str):
Required. Resource name of the language
constant to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
| 31.175
| 128
| 0.732959
|
c55385e45c67b7c46504ddf3a77de7b04b43472d
| 298
|
py
|
Python
|
flask_monitoringdashboard/test/test_factory.py
|
jlane9/Flask-MonitoringDashboard
|
b989bcf8f870ccd9141210eb4b2b8f716873c4fe
|
[
"MIT"
] | null | null | null |
flask_monitoringdashboard/test/test_factory.py
|
jlane9/Flask-MonitoringDashboard
|
b989bcf8f870ccd9141210eb4b2b8f716873c4fe
|
[
"MIT"
] | null | null | null |
flask_monitoringdashboard/test/test_factory.py
|
jlane9/Flask-MonitoringDashboard
|
b989bcf8f870ccd9141210eb4b2b8f716873c4fe
|
[
"MIT"
] | null | null | null |
import unittest
from flask_monitoringdashboard.test.utils import get_test_app
class TestFactory(unittest.TestCase):
def test_factory(self):
"""
Create multiple applications and verify that the app doesn't break.
"""
get_test_app()
get_test_app()
| 21.285714
| 79
| 0.674497
|
5e507193aa1ff19da31e7cb65a0df99aca3578b3
| 11,733
|
py
|
Python
|
mne/viz/tests/test_ica.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | null | null | null |
mne/viz/tests/test_ica.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | null | null | null |
mne/viz/tests/test_ica.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | null | null | null |
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: Simplified BSD
import os.path as op
import numpy as np
from numpy.testing import assert_equal, assert_array_equal
import pytest
import matplotlib.pyplot as plt
from mne import read_events, Epochs, read_cov, pick_types
from mne.io import read_raw_fif
from mne.preprocessing import ICA, create_ecg_epochs, create_eog_epochs
from mne.utils import run_tests_if_main, requires_sklearn
from mne.viz.ica import _create_properties_layout, plot_ica_properties
from mne.viz.utils import _fake_click
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.1, 0.2
raw_ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
def _get_raw(preload=False):
"""Get raw data."""
return read_raw_fif(raw_fname, preload=preload)
def _get_events():
"""Get events."""
return read_events(event_name)
def _get_picks(raw):
"""Get picks."""
return [0, 1, 2, 6, 7, 8, 12, 13, 14] # take a only few channels
def _get_epochs():
"""Get epochs."""
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
with pytest.warns(RuntimeWarning, match='projection'):
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks)
return epochs
@requires_sklearn
def test_plot_ica_components():
"""Test plotting of ICA solutions."""
res = 8
fast_test = {"res": res, "contours": 0, "sensors": False}
raw = _get_raw()
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
max_pca_components=3, n_pca_components=3)
ica_picks = _get_picks(raw)
with pytest.warns(RuntimeWarning, match='projection'):
ica.fit(raw, picks=ica_picks)
for components in [0, [0], [0, 1], [0, 1] * 2, None]:
ica.plot_components(components, image_interp='bilinear',
colorbar=True, **fast_test)
plt.close('all')
# test interactive mode (passing 'inst' arg)
ica.plot_components([0, 1], image_interp='bilinear', inst=raw, res=16)
fig = plt.gcf()
# test title click
# ----------------
lbl = fig.axes[1].get_label()
ica_idx = int(lbl[-3:])
titles = [ax.title for ax in fig.axes]
title_pos_midpoint = (titles[1].get_window_extent().extents
.reshape((2, 2)).mean(axis=0))
# first click adds to exclude
_fake_click(fig, fig.axes[1], title_pos_midpoint, xform='pix')
assert ica_idx in ica.exclude
# clicking again removes from exclude
_fake_click(fig, fig.axes[1], title_pos_midpoint, xform='pix')
assert ica_idx not in ica.exclude
# test topo click
# ---------------
_fake_click(fig, fig.axes[1], (0., 0.), xform='data')
c_fig = plt.gcf()
labels = [ax.get_label() for ax in c_fig.axes]
for l in ['topomap', 'image', 'erp', 'spectrum', 'variance']:
assert (l in labels)
topomap_ax = c_fig.axes[labels.index('topomap')]
title = topomap_ax.get_title()
assert (lbl == title)
ica.info = None
with pytest.raises(RuntimeError, match='fit the ICA'):
ica.plot_components(1, ch_type='mag')
plt.close('all')
@requires_sklearn
def test_plot_ica_properties():
"""Test plotting of ICA properties."""
res = 8
raw = _get_raw(preload=True)
raw.add_proj([], remove_existing=True)
events = _get_events()
picks = _get_picks(raw)[:6]
pick_names = [raw.ch_names[k] for k in picks]
raw.pick_channels(pick_names)
reject = dict(grad=4000e-13, mag=4e-12)
epochs = Epochs(raw, events[:10], event_id, tmin, tmax,
baseline=(None, 0), preload=True)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
max_pca_components=2, n_pca_components=2)
with pytest.warns(RuntimeWarning, match='projection'):
ica.fit(raw)
# test _create_properties_layout
fig, ax = _create_properties_layout()
assert_equal(len(ax), 5)
topoargs = dict(topomap_args={'res': res, 'contours': 0, "sensors": False})
ica.plot_properties(raw, picks=0, **topoargs)
ica.plot_properties(epochs, picks=1, dB=False, plot_std=1.5, **topoargs)
ica.plot_properties(epochs, picks=1, image_args={'sigma': 1.5},
topomap_args={'res': 10, 'colorbar': True},
psd_args={'fmax': 65.}, plot_std=False,
figsize=[4.5, 4.5], reject=reject)
plt.close('all')
pytest.raises(TypeError, ica.plot_properties, epochs, dB=list('abc'))
pytest.raises(TypeError, ica.plot_properties, ica)
pytest.raises(TypeError, ica.plot_properties, [0.2])
pytest.raises(TypeError, plot_ica_properties, epochs, epochs)
pytest.raises(TypeError, ica.plot_properties, epochs,
psd_args='not dict')
pytest.raises(ValueError, ica.plot_properties, epochs, plot_std=[])
fig, ax = plt.subplots(2, 3)
ax = ax.ravel()[:-1]
ica.plot_properties(epochs, picks=1, axes=ax, **topoargs)
fig = ica.plot_properties(raw, picks=[0, 1], **topoargs)
assert_equal(len(fig), 2)
pytest.raises(TypeError, plot_ica_properties, epochs, ica, picks=[0, 1],
axes=ax)
pytest.raises(ValueError, ica.plot_properties, epochs, axes='not axes')
plt.close('all')
# Test merging grads.
raw = _get_raw(preload=True)
picks = pick_types(raw.info, meg='grad')[:10]
ica = ICA(n_components=2)
ica.fit(raw, picks=picks)
ica.plot_properties(raw)
plt.close('all')
@requires_sklearn
def test_plot_ica_sources():
"""Test plotting of ICA panel."""
raw = read_raw_fif(raw_fname).crop(0, 1).load_data()
picks = _get_picks(raw)
epochs = _get_epochs()
raw.pick_channels([raw.ch_names[k] for k in picks])
ica_picks = pick_types(raw.info, meg=True, eeg=False, stim=False,
ecg=False, eog=False, exclude='bads')
ica = ICA(n_components=2, max_pca_components=3, n_pca_components=3)
ica.fit(raw, picks=ica_picks)
ica.exclude = [1]
fig = ica.plot_sources(raw)
fig.canvas.key_press_event('escape')
# Sadly close_event isn't called on Agg backend and the test always passes.
assert_array_equal(ica.exclude, [1])
plt.close('all')
# dtype can change int->np.int after load, test it explicitly
ica.n_components_ = np.int64(ica.n_components_)
fig = ica.plot_sources(raw)
# also test mouse clicks
data_ax = fig.axes[0]
_fake_click(fig, data_ax, [-0.1, 0.9]) # click on y-label
# `exclude` parameter is deprecated
with pytest.deprecated_call():
ica.plot_sources(raw, exclude=[1])
raw.info['bads'] = ['MEG 0113']
with pytest.raises(RuntimeError, match="Raw doesn't match fitted data"):
ica.plot_sources(inst=raw)
ica.plot_sources(epochs)
epochs.info['bads'] = ['MEG 0113']
with pytest.raises(RuntimeError, match="Epochs don't match fitted data"):
ica.plot_sources(inst=epochs)
epochs.info['bads'] = []
ica.plot_sources(epochs.average())
evoked = epochs.average()
fig = ica.plot_sources(evoked)
# Test a click
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax,
[line.get_xdata()[0], line.get_ydata()[0]], 'data')
_fake_click(fig, ax,
[ax.get_xlim()[0], ax.get_ylim()[1]], 'data')
# plot with bad channels excluded
ica.exclude = [0]
ica.plot_sources(evoked)
ica.labels_ = dict(eog=[0])
ica.labels_['eog/0/crazy-channel'] = [0]
ica.plot_sources(evoked) # now with labels
with pytest.raises(ValueError, match='must be of Raw or Epochs type'):
ica.plot_sources('meeow')
plt.close('all')
@requires_sklearn
def test_plot_ica_overlay():
"""Test plotting of ICA cleaning."""
raw = _get_raw(preload=True)
picks = _get_picks(raw)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
max_pca_components=3, n_pca_components=3)
# can't use info.normalize_proj here because of how and when ICA and Epochs
# objects do picking of Raw data
with pytest.warns(RuntimeWarning, match='projection'):
ica.fit(raw, picks=picks)
# don't test raw, needs preload ...
with pytest.warns(RuntimeWarning, match='projection'):
ecg_epochs = create_ecg_epochs(raw, picks=picks)
ica.plot_overlay(ecg_epochs.average())
with pytest.warns(RuntimeWarning, match='projection'):
eog_epochs = create_eog_epochs(raw, picks=picks)
ica.plot_overlay(eog_epochs.average())
pytest.raises(TypeError, ica.plot_overlay, raw[:2, :3][0])
pytest.raises(TypeError, ica.plot_overlay, raw, exclude=2)
ica.plot_overlay(raw)
plt.close('all')
# smoke test for CTF
raw = read_raw_fif(raw_ctf_fname)
raw.apply_gradient_compensation(3)
picks = pick_types(raw.info, meg=True, ref_meg=False)
ica = ICA(n_components=2, max_pca_components=3, n_pca_components=3)
ica.fit(raw, picks=picks)
with pytest.warns(RuntimeWarning, match='longer than'):
ecg_epochs = create_ecg_epochs(raw)
ica.plot_overlay(ecg_epochs.average())
plt.close('all')
@requires_sklearn
def test_plot_ica_scores():
"""Test plotting of ICA scores."""
raw = _get_raw()
picks = _get_picks(raw)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
max_pca_components=3, n_pca_components=3)
with pytest.warns(RuntimeWarning, match='projection'):
ica.fit(raw, picks=picks)
ica.labels_ = dict()
ica.labels_['eog/0/foo'] = 0
ica.labels_['eog'] = 0
ica.labels_['ecg'] = 1
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1])
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1], labels='foo')
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1], labels='eog')
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1], labels='ecg')
pytest.raises(
ValueError,
ica.plot_scores,
[0.3, 0.2], axhline=[0.1, -0.1], labels=['one', 'one-too-many'])
pytest.raises(ValueError, ica.plot_scores, [0.2])
plt.close('all')
@requires_sklearn
def test_plot_instance_components():
"""Test plotting of components as instances of raw and epochs."""
raw = _get_raw()
picks = _get_picks(raw)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
max_pca_components=3, n_pca_components=3)
with pytest.warns(RuntimeWarning, match='projection'):
ica.fit(raw, picks=picks)
ica.exclude = [0]
fig = ica.plot_sources(raw, title='Components')
for key in ['down', 'up', 'right', 'left', 'o', '-', '+', '=', 'pageup',
'pagedown', 'home', 'end', 'f11', 'b']:
fig.canvas.key_press_event(key)
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax, [line.get_xdata()[0], line.get_ydata()[0]],
'data')
_fake_click(fig, ax, [-0.1, 0.9]) # click on y-label
fig.canvas.key_press_event('escape')
plt.close('all')
epochs = _get_epochs()
fig = ica.plot_sources(epochs, title='Components')
for key in ['down', 'up', 'right', 'left', 'o', '-', '+', '=', 'pageup',
'pagedown', 'home', 'end', 'f11', 'b']:
fig.canvas.key_press_event(key)
# Test a click
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax, [line.get_xdata()[0], line.get_ydata()[0]], 'data')
_fake_click(fig, ax, [-0.1, 0.9]) # click on y-label
fig.canvas.key_press_event('escape')
plt.close('all')
run_tests_if_main()
| 36.437888
| 79
| 0.646212
|
7f71db1db8b1e52248516329621ba95d1fb384fc
| 9,968
|
py
|
Python
|
tests/quick.py
|
wfarah/frbpoppy
|
e575c49e6b4a69015a66d3f38a3459e0ffe4eb05
|
[
"MIT"
] | null | null | null |
tests/quick.py
|
wfarah/frbpoppy
|
e575c49e6b4a69015a66d3f38a3459e0ffe4eb05
|
[
"MIT"
] | null | null | null |
tests/quick.py
|
wfarah/frbpoppy
|
e575c49e6b4a69015a66d3f38a3459e0ffe4eb05
|
[
"MIT"
] | null | null | null |
"""Use standard populations to speed up calculation times."""
import os
from frbpoppy import CosmicPopulation, SurveyPopulation, paths, unpickle
class StandardCosmicPops:
"""docstring for StandardCosmicPop."""
def __init__(self, sort, size, alpha, gamma):
"""Quickly get standard populations.
Args:
sort (str): Which type of population, standard, std_candle etc.
size (str): Choice of 'small', 'medium' or 'large'
Returns:
pop: Desired population.
"""
self.sort = sort
self.size = size
self.alpha = alpha
self.gamma = gamma
self.name = f'{self.sort}_{self.size}'
if alpha is not None:
self.name += f'_{self.alpha}'
if gamma is not None:
self.name += f'_{self.gamma}'
self.path = paths.populations() + self.name + '.p'
if self.size == 'small': # For testing purposes
self.n = int(1e5)
elif self.size == 'medium': # For laptop purposes
self.n = int(1e6)
elif self.size == 'large': # For cluster purposes
self.n = int(1e7)
elif self.size == 'huge': # For cluster purposes
self.n = int(1e8)
def standard_pop(self):
"""Generate a standard population."""
pop = CosmicPopulation(self.n,
days=1,
name=self.name,
H_0=67.74,
W_m=0.3089,
W_v=0.6911,
dm_host_model='normal',
dm_host_mu=100,
dm_host_sigma=200,
dm_igm_index=1000,
dm_igm_sigma=None,
dm_mw_model='ne2001',
emission_range=[10e6, 10e9],
lum_range=[1e40, 1e45],
lum_index=0.,
n_model='vol_co',
alpha=-1.5,
pulse_model='lognormal',
pulse_range=[1., 1.],
pulse_mu=0.1,
pulse_sigma=0.5,
si_mu=-1.4,
si_sigma=1.,
z_max=2.5)
pop.save()
return pop
def standard_candle_pop(self):
"""Generate a standard candle population."""
pop = CosmicPopulation(self.n,
days=1,
name=self.name,
H_0=67.74,
W_m=0.3089,
W_v=0.6911,
dm_host_model='normal',
dm_host_mu=100,
dm_host_sigma=0,
dm_igm_index=1000,
dm_igm_sigma=None,
dm_mw_model='ne2001',
emission_range=[10e6, 10e9],
lum_range=[1e36, 1e36],
lum_index=0.,
n_model='sfr',
alpha=-1.5,
pulse_model='uniform',
pulse_range=[1., 1.],
pulse_mu=0.1,
pulse_sigma=0.,
si_mu=0.,
si_sigma=0.,
z_max=2.5)
pop.save()
return pop
def alpha_pop(self):
"""Generate a population varying with alpha."""
pop = CosmicPopulation(self.n,
days=1,
name=self.name,
H_0=67.74,
W_m=0.3089,
W_v=0.6911,
dm_host_model='normal',
dm_host_mu=100,
dm_host_sigma=200,
dm_igm_index=1000,
dm_igm_sigma=None,
dm_mw_model='ne2001',
emission_range=[10e6, 10e9],
lum_range=[1e40, 1e45],
lum_index=0.,
n_model='vol_co',
alpha=self.alpha,
pulse_model='lognormal',
pulse_range=[1., 1.],
pulse_mu=0.1,
pulse_sigma=0.5,
si_mu=-1.4,
si_sigma=1.,
z_max=2.5)
pop.save()
return pop
def alpha_simple_pop(self):
"""Generate a simple local population varying with alpha."""
pop = CosmicPopulation(self.n,
days=1,
name=self.name,
H_0=67.74,
W_m=0.3089,
W_v=0.6911,
dm_host_model='normal',
dm_host_mu=0.,
dm_host_sigma=0.,
dm_igm_index=0.,
dm_igm_sigma=None,
dm_mw_model='zero',
emission_range=[10e6, 10e9],
lum_range=[1e38, 1e38],
lum_index=0.,
n_model='vol_co',
alpha=self.alpha,
pulse_model='uniform',
pulse_range=[10, 10],
pulse_mu=0.1,
pulse_sigma=1.,
si_mu=0.,
si_sigma=0.,
z_max=0.01)
pop.save()
return pop
def gamma_pop(self):
"""Generate a population varying with spectral index."""
pop = CosmicPopulation(self.n,
days=1,
name=self.name,
H_0=67.74,
W_m=0.3089,
W_v=0.6911,
dm_host_model='normal',
dm_host_mu=0.,
dm_host_sigma=0.,
dm_igm_index=0.,
dm_igm_sigma=None,
dm_mw_model='zero',
emission_range=[10e6, 10e9],
lum_range=[10**42.5, 10**42.5],
lum_index=0.,
n_model='vol_co',
alpha=-1.5,
pulse_model='uniform',
pulse_range=[10, 10],
pulse_mu=0.1,
pulse_sigma=1.,
si_mu=self.gamma,
si_sigma=0.,
z_max=2.5)
pop.save()
return pop
def get_cosmic_pop(sort, size, load=True, overwrite=False,
alpha=None, gamma=None):
"""Quickly get standard populations.
Args:
sort (str): Which type of population, standard, std_candle etc.
size (str): Choice of 'small', 'medium' or 'large'
load (bool): Whether to load in a population
overwrite (bool): Check whether a population has already
been run. If overwrite is true, it will always make a new
instance.
Returns:
pop: Desired population.
"""
pop = StandardCosmicPops(sort, size, alpha=alpha, gamma=gamma)
# Skip loading a population if you don't have to
if not load:
return pop.name
# Go for an earlier version if available
if not overwrite:
if os.path.isfile(pop.path):
return unpickle(pop.path)
# Else generate a standard population
if pop.sort == 'standard': # Also known as a complex population
return pop.standard_pop()
if pop.sort == 'standard_candle':
return pop.standard_candle_pop()
if pop.sort == 'alpha':
return pop.alpha_pop()
if pop.sort == 'gamma':
return pop.gamma_pop()
if pop.sort == 'alpha_simple':
return pop.alpha_simple_pop()
def get_survey_pop(pop, survey, overwrite=False):
"""Quickly get survey populations.
Args:
pop (CosmicPopulation): Population to survey
survey (Survey): Survey to use
overwrite (bool): Check whether a population has already
been run. If overwrite is true, it will always make a new
instance.
Returns:
pop: Desired population.
"""
observe = True
# Check where a possible population would be located
path = ''
if isinstance(pop, str):
name = f'{pop}_{survey.name}'
path = paths.populations() + name + '.p'
# Check if the file exists
if not overwrite:
if os.path.isfile(path):
observe = False
return unpickle(path)
# If all else fails observe again
if observe:
if isinstance(pop, str):
m = f'No survey population at {path}, yet no surveying requested'
raise ValueError(m)
surv_pop = SurveyPopulation(pop, survey)
surv_pop.name = f'{pop.name}_{survey.name}'
surv_pop.save()
return surv_pop
| 36.512821
| 77
| 0.408407
|
b3f7621a7c65d2b785c0656972d7e82720316b91
| 3,193
|
py
|
Python
|
tests/sinks/test_tcp_client.py
|
02strich/aws-embedded-metrics-python
|
4c5718e580dfb12529673a6b54810adfbfd54242
|
[
"Apache-2.0"
] | 130
|
2019-11-18T19:39:55.000Z
|
2022-03-28T09:56:25.000Z
|
tests/sinks/test_tcp_client.py
|
02strich/aws-embedded-metrics-python
|
4c5718e580dfb12529673a6b54810adfbfd54242
|
[
"Apache-2.0"
] | 50
|
2019-11-18T22:22:33.000Z
|
2022-02-06T11:03:31.000Z
|
tests/sinks/test_tcp_client.py
|
02strich/aws-embedded-metrics-python
|
4c5718e580dfb12529673a6b54810adfbfd54242
|
[
"Apache-2.0"
] | 23
|
2019-11-19T00:06:41.000Z
|
2021-12-09T02:01:40.000Z
|
from aws_embedded_metrics.sinks.tcp_client import TcpClient
from urllib.parse import urlparse
import socket
import threading
import time
import logging
log = logging.getLogger(__name__)
test_host = '0.0.0.0'
test_port = 9999
endpoint = urlparse("tcp://0.0.0.0:9999")
message = "_16-Byte-String_".encode('utf-8')
def test_can_send_message():
# arrange
agent = InProcessAgent().start()
client = TcpClient(endpoint)
# act
client.connect()
client.send_message(message)
# assert
time.sleep(1)
messages = agent.messages
assert 1 == len(messages)
assert message == messages[0]
agent.shutdown()
def test_can_connect_concurrently_from_threads():
# arrange
concurrency = 10
agent = InProcessAgent().start()
client = TcpClient(endpoint)
barrier = threading.Barrier(concurrency, timeout=5)
def run():
barrier.wait()
client.connect()
client.send_message(message)
def start_thread():
thread = threading.Thread(target=run, args=())
thread.daemon = True
thread.start()
# act
for _ in range(concurrency):
start_thread()
# assert
time.sleep(1)
messages = agent.messages
assert concurrency == len(messages)
for i in range(concurrency):
assert message == messages[i]
agent.shutdown()
def test_can_recover_from_agent_shutdown():
# arrange
agent = InProcessAgent().start()
client = TcpClient(endpoint)
# act
client.connect()
client.send_message(message)
agent.shutdown()
time.sleep(5)
client.send_message(message)
agent = InProcessAgent().start()
client.send_message(message)
# assert
time.sleep(1)
messages = agent.messages
assert 1 == len(messages)
assert message == messages[0]
agent.shutdown()
class InProcessAgent(object):
""" Agent that runs on a background thread and collects
messages in memory.
"""
def __init__(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((test_host, test_port))
self.sock.listen()
self.is_shutdown = False
self.messages = []
def start(self) -> "InProcessAgent":
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
return self
def run(self):
while not self.is_shutdown:
connection, client_address = self.sock.accept()
self.connection = connection
try:
while not self.is_shutdown:
data = self.connection.recv(16)
if data:
self.messages.append(data)
else:
break
finally:
log.error("Exited the recv loop")
def shutdown(self):
try:
self.is_shutdown = True
self.connection.shutdown(socket.SHUT_RDWR)
self.connection.close()
self.sock.close()
except Exception as e:
log.error("Failed to shutdown %s" % (e,))
| 25.141732
| 71
| 0.614156
|
44d26f6e69f1d65abe3a51be089a256b96af1ec2
| 6,030
|
py
|
Python
|
research/audioset/vggish/vggish_inference_demo.py
|
vincentcheny/models
|
afb1a59fc1bc792ac72d1a3e22e2469020529788
|
[
"Apache-2.0"
] | 1
|
2019-09-11T09:41:11.000Z
|
2019-09-11T09:41:11.000Z
|
research/audioset/vggish/vggish_inference_demo.py
|
vincentcheny/models
|
afb1a59fc1bc792ac72d1a3e22e2469020529788
|
[
"Apache-2.0"
] | null | null | null |
research/audioset/vggish/vggish_inference_demo.py
|
vincentcheny/models
|
afb1a59fc1bc792ac72d1a3e22e2469020529788
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""A simple demonstration of running VGGish in inference mode.
This is intended as a toy example that demonstrates how the various building
blocks (feature extraction, model definition and loading, postprocessing) work
together in an inference context.
A WAV file (assumed to contain signed 16-bit PCM samples) is read in, converted
into log mel spectrogram examples, fed into VGGish, the raw embedding output is
whitened and quantized, and the postprocessed embeddings are optionally written
in a SequenceExample to a TFRecord file (using the same format as the embedding
features released in AudioSet).
Usage:
# Run a WAV file through the model and print the embeddings. The model
# checkpoint is loaded from vggish_model.ckpt and the PCA parameters are
# loaded from vggish_pca_params.npz in the current directory.
$ python vggish_inference_demo.py --wav_file /path/to/a/wav/file
# Run a WAV file through the model and also write the embeddings to
# a TFRecord file. The model checkpoint and PCA parameters are explicitly
# passed in as well.
$ python vggish_inference_demo.py --wav_file /path/to/a/wav/file \
--tfrecord_file /path/to/tfrecord/file \
--checkpoint /path/to/model/checkpoint \
--pca_params /path/to/pca/params
# Run a built-in input (a sine wav) through the model and print the
# embeddings. Associated model files are read from the current directory.
$ python vggish_inference_demo.py
"""
from __future__ import print_function
import numpy as np
from scipy.io import wavfile
import six
import tensorflow as tf
import vggish_input
import vggish_params
import vggish_postprocess
import vggish_slim
flags = tf.app.flags
flags.DEFINE_string(
'wav_file', None,
'Path to a wav file. Should contain signed 16-bit PCM samples. '
'If none is provided, a synthetic sound is used.')
flags.DEFINE_string(
'checkpoint', 'vggish_model.ckpt',
'Path to the VGGish checkpoint file.')
flags.DEFINE_string(
'pca_params', 'vggish_pca_params.npz',
'Path to the VGGish PCA parameters file.')
flags.DEFINE_string(
'tfrecord_file', None,
'Path to a TFRecord file where embeddings will be written.')
FLAGS = flags.FLAGS
def main(_):
# In this simple example, we run the examples from a single audio file through
# the model. If none is provided, we generate a synthetic input.
if FLAGS.wav_file:
wav_file = FLAGS.wav_file
else:
# Write a WAV of a sine wav into an in-memory file object.
num_secs = 5
freq = 1000
sr = 44100
t = np.linspace(0, num_secs, int(num_secs * sr))
x = np.sin(2 * np.pi * freq * t)
# Convert to signed 16-bit samples.
samples = np.clip(x * 32768, -32768, 32767).astype(np.int16)
wav_file = six.BytesIO()
wavfile.write(wav_file, sr, samples)
wav_file.seek(0)
examples_batch = vggish_input.wavfile_to_examples(wav_file)
print(examples_batch)
# Prepare a postprocessor to munge the model embeddings.
pproc = vggish_postprocess.Postprocessor(FLAGS.pca_params)
# If needed, prepare a record writer to store the postprocessed embeddings.
writer = tf.python_io.TFRecordWriter(
FLAGS.tfrecord_file) if FLAGS.tfrecord_file else None
with tf.Graph().as_default(), tf.Session() as sess:
# Define the model in inference mode, load the checkpoint, and
# locate input and output tensors.
vggish_slim.define_vggish_slim(training=False)
vggish_slim.load_vggish_slim_checkpoint(sess, FLAGS.checkpoint)
features_tensor = sess.graph.get_tensor_by_name(
vggish_params.INPUT_TENSOR_NAME)
embedding_tensor = sess.graph.get_tensor_by_name(
vggish_params.OUTPUT_TENSOR_NAME)
# Run inference and postprocessing.
[embedding_batch] = sess.run([embedding_tensor],
feed_dict={features_tensor: examples_batch})
print(embedding_batch)
postprocessed_batch = pproc.postprocess(embedding_batch)
print(postprocessed_batch)
# Write the postprocessed embeddings as a SequenceExample, in a similar
# format as the features released in AudioSet. Each row of the batch of
# embeddings corresponds to roughly a second of audio (96 10ms frames), and
# the rows are written as a sequence of bytes-valued features, where each
# feature value contains the 128 bytes of the whitened quantized embedding.
seq_example = tf.train.SequenceExample(
feature_lists=tf.train.FeatureLists(
feature_list={
vggish_params.AUDIO_EMBEDDING_FEATURE_NAME:
tf.train.FeatureList(
feature=[
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[embedding.tobytes()]))
for embedding in postprocessed_batch
]
)
}
)
)
print(seq_example)
if writer:
writer.write(seq_example.SerializeToString())
if writer:
writer.close()
if __name__ == '__main__':
tf.app.run()
| 39.155844
| 81
| 0.672305
|
2cfc8eec823afc3bc34963aad8f715d2b0e9cbe8
| 25,605
|
py
|
Python
|
intersight/apis/storage_controller_api.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 21
|
2018-03-29T14:20:35.000Z
|
2021-10-13T05:11:41.000Z
|
intersight/apis/storage_controller_api.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 14
|
2018-01-30T15:45:46.000Z
|
2022-02-23T14:23:21.000Z
|
intersight/apis/storage_controller_api.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 18
|
2018-01-03T15:09:56.000Z
|
2021-07-16T02:21:54.000Z
|
# coding: utf-8
"""
Cisco Intersight OpenAPI specification.
The Cisco Intersight OpenAPI specification.
OpenAPI spec version: 1.0.9-1461
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class StorageControllerApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def storage_controllers_get(self, **kwargs):
"""
Read a 'storage.Controller' resource.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.storage_controllers_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param bool count: The $count query option allows clients to request a count of the matching resources.
:param str inlinecount: The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response
:param int top: The max number of documents to return.
:param int skip: The number of documents to skip.
:param str filter: Filter criteria for documents to return. A URI with a $filter System Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in $filter operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section. Query examples: $filter=Name eq 'Bob' $filter=Tags/any(t: t/Key eq 'Site') $filter=Tags/any(t: t/Key eq 'Site' and t/Value eq 'London')
:param str select: Specifies a subset of properties to return.
:param str orderby: Determines what values are used to order a collection of documents.
:param str expand: Specify additional attributes or related documents to return. Supports only 'DisplayNames' attribute now. Query examples: $expand=DisplayNames
:param str apply: Specify one or more transformation operations to perform aggregation on documents. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. Query examples: $apply=groupby((Model), aggregate($count as Total)) $apply=groupby((Model), aggregate(AvailableMemory with average as AverageAvailableMemory))
:param str at: Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for documents to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section. Query examples: at=VersionType eq 'Configured' at=InterestedMos.Moid eq '5b5877e56c6730367acf46cd'
:return: StorageControllerList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.storage_controllers_get_with_http_info(**kwargs)
else:
(data) = self.storage_controllers_get_with_http_info(**kwargs)
return data
def storage_controllers_get_with_http_info(self, **kwargs):
"""
Read a 'storage.Controller' resource.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.storage_controllers_get_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param bool count: The $count query option allows clients to request a count of the matching resources.
:param str inlinecount: The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response
:param int top: The max number of documents to return.
:param int skip: The number of documents to skip.
:param str filter: Filter criteria for documents to return. A URI with a $filter System Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in $filter operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section. Query examples: $filter=Name eq 'Bob' $filter=Tags/any(t: t/Key eq 'Site') $filter=Tags/any(t: t/Key eq 'Site' and t/Value eq 'London')
:param str select: Specifies a subset of properties to return.
:param str orderby: Determines what values are used to order a collection of documents.
:param str expand: Specify additional attributes or related documents to return. Supports only 'DisplayNames' attribute now. Query examples: $expand=DisplayNames
:param str apply: Specify one or more transformation operations to perform aggregation on documents. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. Query examples: $apply=groupby((Model), aggregate($count as Total)) $apply=groupby((Model), aggregate(AvailableMemory with average as AverageAvailableMemory))
:param str at: Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for documents to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section. Query examples: at=VersionType eq 'Configured' at=InterestedMos.Moid eq '5b5877e56c6730367acf46cd'
:return: StorageControllerList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['count', 'inlinecount', 'top', 'skip', 'filter', 'select', 'orderby', 'expand', 'apply', 'at']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method storage_controllers_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'count' in params:
query_params.append(('$count', params['count']))
if 'inlinecount' in params:
query_params.append(('$inlinecount', params['inlinecount']))
if 'top' in params:
query_params.append(('$top', params['top']))
if 'skip' in params:
query_params.append(('$skip', params['skip']))
if 'filter' in params:
query_params.append(('$filter', params['filter']))
if 'select' in params:
query_params.append(('$select', params['select']))
if 'orderby' in params:
query_params.append(('$orderby', params['orderby']))
if 'expand' in params:
query_params.append(('$expand', params['expand']))
if 'apply' in params:
query_params.append(('$apply', params['apply']))
if 'at' in params:
query_params.append(('at', params['at']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/storage/Controllers', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StorageControllerList',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def storage_controllers_moid_get(self, moid, **kwargs):
"""
Read a 'storage.Controller' resource.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.storage_controllers_moid_get(moid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The Moid of the storageController instance. (required)
:return: StorageController
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.storage_controllers_moid_get_with_http_info(moid, **kwargs)
else:
(data) = self.storage_controllers_moid_get_with_http_info(moid, **kwargs)
return data
def storage_controllers_moid_get_with_http_info(self, moid, **kwargs):
"""
Read a 'storage.Controller' resource.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.storage_controllers_moid_get_with_http_info(moid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The Moid of the storageController instance. (required)
:return: StorageController
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['moid']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method storage_controllers_moid_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'moid' is set
if ('moid' not in params) or (params['moid'] is None):
raise ValueError("Missing the required parameter `moid` when calling `storage_controllers_moid_get`")
collection_formats = {}
path_params = {}
if 'moid' in params:
path_params['Moid'] = params['moid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/storage/Controllers/{Moid}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StorageController',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def storage_controllers_moid_patch(self, moid, body, **kwargs):
"""
Update a 'storage.Controller' resource.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.storage_controllers_moid_patch(moid, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The Moid of the storageController instance. (required)
:param StorageController body: storageController to update (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.storage_controllers_moid_patch_with_http_info(moid, body, **kwargs)
else:
(data) = self.storage_controllers_moid_patch_with_http_info(moid, body, **kwargs)
return data
def storage_controllers_moid_patch_with_http_info(self, moid, body, **kwargs):
"""
Update a 'storage.Controller' resource.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.storage_controllers_moid_patch_with_http_info(moid, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The Moid of the storageController instance. (required)
:param StorageController body: storageController to update (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['moid', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method storage_controllers_moid_patch" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'moid' is set
if ('moid' not in params) or (params['moid'] is None):
raise ValueError("Missing the required parameter `moid` when calling `storage_controllers_moid_patch`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `storage_controllers_moid_patch`")
collection_formats = {}
path_params = {}
if 'moid' in params:
path_params['Moid'] = params['moid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/storage/Controllers/{Moid}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def storage_controllers_moid_post(self, moid, body, **kwargs):
"""
Update a 'storage.Controller' resource.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.storage_controllers_moid_post(moid, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The Moid of the storageController instance. (required)
:param StorageController body: storageController to update (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.storage_controllers_moid_post_with_http_info(moid, body, **kwargs)
else:
(data) = self.storage_controllers_moid_post_with_http_info(moid, body, **kwargs)
return data
def storage_controllers_moid_post_with_http_info(self, moid, body, **kwargs):
"""
Update a 'storage.Controller' resource.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.storage_controllers_moid_post_with_http_info(moid, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The Moid of the storageController instance. (required)
:param StorageController body: storageController to update (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['moid', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method storage_controllers_moid_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'moid' is set
if ('moid' not in params) or (params['moid'] is None):
raise ValueError("Missing the required parameter `moid` when calling `storage_controllers_moid_post`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `storage_controllers_moid_post`")
collection_formats = {}
path_params = {}
if 'moid' in params:
path_params['Moid'] = params['moid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/storage/Controllers/{Moid}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 50.70297
| 819
| 0.618864
|
7f724e97aec13316f245dea848e69fd3bd7513af
| 1,620
|
py
|
Python
|
tenant_handler/datastore/__init__.py
|
turnbros/tbc-api
|
888fddd11c8a4a0af66b1a9823df924ace2ba37d
|
[
"Apache-2.0"
] | null | null | null |
tenant_handler/datastore/__init__.py
|
turnbros/tbc-api
|
888fddd11c8a4a0af66b1a9823df924ace2ba37d
|
[
"Apache-2.0"
] | null | null | null |
tenant_handler/datastore/__init__.py
|
turnbros/tbc-api
|
888fddd11c8a4a0af66b1a9823df924ace2ba37d
|
[
"Apache-2.0"
] | null | null | null |
import json
from tenant_handler.tenant import Tenant
class TenantStore(object):
def __init__(self):
self._tenant_store = {}
def get_tenant_names(self):
return list(self._tenant_store.keys())
def get_tenant(self, tenant_name) -> Tenant:
tenant = self._tenant_store.get(tenant_name)
if tenant is None:
tenant = self.create_tenant(tenant_name)
return tenant
def create_tenant(self, tenant_name) -> Tenant:
tenant = Tenant(tenant_name)
self._tenant_store[tenant_name] = tenant
return tenant
def lock_tenant_state(self, tenant_name, lock):
state_lock = self.get_tenant(tenant_name).lock_state(lock)
if state_lock["ID"] != lock["ID"]:
return json.dumps(state_lock), 409
return "", 200
def unlock_tenant_state(self, tenant_name, lock_id):
state_lock = self.get_tenant(tenant_name).unlock_state(lock_id)
if state_lock is not None:
return json.dumps(state_lock), 423
return "", 200
def get_tenant_state(self, tenant_name):
return json.dumps(self.get_tenant(tenant_name).state), 200
def update_tenant_state(self, tenant_name, tenant_state, lock_id):
tenant = self.get_tenant(tenant_name)
if tenant.lock_id == lock_id:
tenant.state = tenant_state
return "", 200
return json.dumps(tenant.lock), 409
def purge_tenant_state(self, tenant_name):
self.get_tenant(tenant_name).state = {"version": 4}
return "", 200
def get_tenant_resources(self, tenant_name):
return self.get_tenant(tenant_name).get_resources(), 200
tenant_store = TenantStore()
def get_tenant_store():
return tenant_store
| 29.454545
| 68
| 0.72284
|
1dd2cbce0f6a861ffa740931b6023f60dc1a5e4e
| 6,591
|
py
|
Python
|
neutron/agent/firewall.py
|
hashsos/hashcloudos-neutron
|
76ec5ca105043be6bf7220b5c5684190ddf14952
|
[
"Apache-2.0"
] | null | null | null |
neutron/agent/firewall.py
|
hashsos/hashcloudos-neutron
|
76ec5ca105043be6bf7220b5c5684190ddf14952
|
[
"Apache-2.0"
] | null | null | null |
neutron/agent/firewall.py
|
hashsos/hashcloudos-neutron
|
76ec5ca105043be6bf7220b5c5684190ddf14952
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import contextlib
import six
from neutron_lib.api.definitions import port_security as psec
from neutron_lib import constants as n_const
from neutron_lib.utils import runtime
INGRESS_DIRECTION = n_const.INGRESS_DIRECTION
EGRESS_DIRECTION = n_const.EGRESS_DIRECTION
DIRECTION_IP_PREFIX = {INGRESS_DIRECTION: 'source_ip_prefix',
EGRESS_DIRECTION: 'dest_ip_prefix'}
# List of ICMPv6 types that should be permitted (ingress) by default. This list
# depends on iptables conntrack behavior of recognizing ICMP errors (types 1-4)
# as related traffic.
ICMPV6_ALLOWED_INGRESS_TYPES = (n_const.ICMPV6_TYPE_MLD_QUERY,
n_const.ICMPV6_TYPE_NS,
n_const.ICMPV6_TYPE_NA)
# List of ICMPv6 types that should be permitted (egress) by default.
ICMPV6_ALLOWED_EGRESS_TYPES = (n_const.ICMPV6_TYPE_MLD_QUERY,
n_const.ICMPV6_TYPE_RS,
n_const.ICMPV6_TYPE_NS,
n_const.ICMPV6_TYPE_NA)
def port_sec_enabled(port):
return port.get(psec.PORTSECURITY, True)
def load_firewall_driver_class(driver):
return runtime.load_class_by_alias_or_classname(
'neutron.agent.firewall_drivers', driver)
@six.add_metaclass(abc.ABCMeta)
class FirewallDriver(object):
"""Firewall Driver base class.
Defines methods that any driver providing security groups
and provider firewall functionality should implement.
Note port attribute should have information of security group ids and
security group rules.
the dict of port should have
device : interface name
fixed_ips: ips of the device
mac_address: mac_address of the device
security_groups: [sgid, sgid]
security_group_rules : [ rule, rule ]
the rule must contain ethertype and direction
the rule may contain security_group_id,
protocol, port_min, port_max
source_ip_prefix, source_port_min,
source_port_max, dest_ip_prefix, and
remote_group_id
Note: source_group_ip in REST API should be converted by this rule
if direction is ingress:
remote_group_ip will be a source_ip_prefix
if direction is egress:
remote_group_ip will be a dest_ip_prefix
Note: remote_group_id in REST API should be converted by this rule
if direction is ingress:
remote_group_id will be a list of source_ip_prefix
if direction is egress:
remote_group_id will be a list of dest_ip_prefix
remote_group_id will also remaining membership update management
"""
# OVS agent installs arp spoofing openflow rules. If firewall is capable
# of handling that, ovs agent doesn't need to install the protection.
provides_arp_spoofing_protection = False
@abc.abstractmethod
def prepare_port_filter(self, port):
"""Prepare filters for the port.
This method should be called before the port is created.
"""
def apply_port_filter(self, port):
"""Apply port filter.
Once this method returns, the port should be firewalled
appropriately. This method should as far as possible be a
no-op. It's vastly preferred to get everything set up in
prepare_port_filter.
"""
raise NotImplementedError()
@abc.abstractmethod
def update_port_filter(self, port):
"""Refresh security group rules from data store
Gets called when a port gets added to or removed from
the security group the port is a member of or if the
group gains or looses a rule.
"""
def remove_port_filter(self, port):
"""Stop filtering port."""
raise NotImplementedError()
def filter_defer_apply_on(self):
"""Defer application of filtering rule."""
pass
def filter_defer_apply_off(self):
"""Turn off deferral of rules and apply the rules now."""
pass
@property
def ports(self):
"""Returns filtered ports."""
pass
@contextlib.contextmanager
def defer_apply(self):
"""Defer apply context."""
self.filter_defer_apply_on()
try:
yield
finally:
self.filter_defer_apply_off()
def update_security_group_members(self, sg_id, ips):
"""Update group members in a security group."""
raise NotImplementedError()
def update_security_group_rules(self, sg_id, rules):
"""Update rules in a security group."""
raise NotImplementedError()
def security_group_updated(self, action_type, sec_group_ids,
device_id=None):
"""Called when a security group is updated.
Note: This method needs to be implemented by the firewall drivers
which use enhanced RPC for security_groups.
"""
raise NotImplementedError()
def process_trusted_ports(self, port_ids):
"""Process ports that are trusted and shouldn't be filtered."""
pass
def remove_trusted_ports(self, port_ids):
pass
class NoopFirewallDriver(FirewallDriver):
"""Noop Firewall Driver.
Firewall driver which does nothing.
This driver is for disabling the firewall functionality.
"""
def prepare_port_filter(self, port):
pass
def apply_port_filter(self, port):
pass
def update_port_filter(self, port):
pass
def remove_port_filter(self, port):
pass
def filter_defer_apply_on(self):
pass
def filter_defer_apply_off(self):
pass
@property
def ports(self):
return {}
def update_security_group_members(self, sg_id, ips):
pass
def update_security_group_rules(self, sg_id, rules):
pass
def security_group_updated(self, action_type, sec_group_ids,
device_id=None):
pass
| 31.6875
| 79
| 0.676832
|
8af310b03d28d08eab13d96bf6f9dc6f83dce365
| 1,131
|
py
|
Python
|
lib/galaxy/model/migrate/versions/0110_add_dataset_uuid.py
|
yvanlebras/galaxy
|
6b8489ca866825bcdf033523120a8b24ea6e6342
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/model/migrate/versions/0110_add_dataset_uuid.py
|
yvanlebras/galaxy
|
6b8489ca866825bcdf033523120a8b24ea6e6342
|
[
"CC-BY-3.0"
] | 2
|
2017-05-18T16:12:55.000Z
|
2022-03-08T12:08:43.000Z
|
lib/galaxy/model/migrate/versions/0110_add_dataset_uuid.py
|
yvanlebras/galaxy
|
6b8489ca866825bcdf033523120a8b24ea6e6342
|
[
"CC-BY-3.0"
] | null | null | null |
"""
Add UUID column to dataset table
"""
import logging
from sqlalchemy import (
Column,
MetaData,
Table,
)
from galaxy.model.custom_types import UUIDType
log = logging.getLogger(__name__)
dataset_uuid_column = Column("uuid", UUIDType, nullable=True)
def upgrade(migrate_engine):
print(__doc__)
metadata = MetaData()
metadata.bind = migrate_engine
metadata.reflect()
# Add the uuid colum to the dataset table
try:
dataset_table = Table("dataset", metadata, autoload=True)
dataset_uuid_column.create(dataset_table)
assert dataset_uuid_column is dataset_table.c.uuid
except Exception:
log.exception("Adding column 'uuid' to dataset table failed.")
def downgrade(migrate_engine):
metadata = MetaData()
metadata.bind = migrate_engine
metadata.reflect()
# Drop the dataset table's uuid column.
try:
dataset_table = Table("dataset", metadata, autoload=True)
dataset_uuid = dataset_table.c.uuid
dataset_uuid.drop()
except Exception:
log.exception("Dropping 'uuid' column from dataset table failed.")
| 24.586957
| 74
| 0.697613
|
1a74ab65f85a89cc481c27d2090b18f637b8c75f
| 999
|
py
|
Python
|
norfair/utils.py
|
pdd-999/norfair
|
83ff3fdae973707d8fc569d270e15badbe4a4619
|
[
"BSD-3-Clause"
] | 1
|
2021-12-30T09:02:44.000Z
|
2021-12-30T09:02:44.000Z
|
norfair/utils.py
|
pdd-999/norfair
|
83ff3fdae973707d8fc569d270e15badbe4a4619
|
[
"BSD-3-Clause"
] | null | null | null |
norfair/utils.py
|
pdd-999/norfair
|
83ff3fdae973707d8fc569d270e15badbe4a4619
|
[
"BSD-3-Clause"
] | 1
|
2022-03-29T03:07:54.000Z
|
2022-03-29T03:07:54.000Z
|
import numpy as np
import time
def validate_points(points: np.array) -> np.array:
# If the user is tracking only a single point, reformat it slightly.
if points.shape == (2,):
points = points[np.newaxis, ...]
elif len(points.shape) == 1:
print("The point(s) need to be in (x, y) formats ", points)
else:
if points.shape[1] != 2 or len(points.shape) > 2:
print("The point(s) need to be in (x, y) formats ", points)
return points
class Timer():
def __init__(self):
pass
def start(self, name: str = None):
self._name = name
self._startTime = time.perf_counter()
def end(self, print: bool = True):
self._timeElapsed = (time.perf_counter() - self._startTime)*1000
if print:
self.print()
return self._timeElapsed
def print(self):
print(f"{self._name} took {self._timeElapsed} ms")
@property
def time(self):
return self._timeElapsed
| 29.382353
| 72
| 0.592593
|
d54d12efc859724baf0adb2fdda176133c3d225a
| 5,033
|
py
|
Python
|
loky/backend/popen_loky_win32.py
|
pierreglaser/loky
|
71d37c73e0013ce9008fe7356c677917f8800af1
|
[
"BSD-3-Clause"
] | null | null | null |
loky/backend/popen_loky_win32.py
|
pierreglaser/loky
|
71d37c73e0013ce9008fe7356c677917f8800af1
|
[
"BSD-3-Clause"
] | null | null | null |
loky/backend/popen_loky_win32.py
|
pierreglaser/loky
|
71d37c73e0013ce9008fe7356c677917f8800af1
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sys
from pickle import load
from multiprocessing import process, util
from . import spawn
from . import reduction
from .context import get_spawning_popen, set_spawning_popen
if sys.platform == "win32":
# Avoid import error by code introspection tools such as test runners
# trying to import this module while running on non-Windows systems.
import msvcrt
from .compat_win32 import _winapi
from .compat_win32 import Popen as _Popen
from .reduction import duplicate
else:
_Popen = object
if sys.version_info[:2] < (3, 3):
from os import fdopen as open
__all__ = ['Popen']
#
#
#
TERMINATE = 0x10000
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(_Popen):
'''
Start a subprocess to run the code of a process object
'''
method = 'loky'
def __init__(self, process_obj):
prep_data = spawn.get_preparation_data(
process_obj._name, getattr(process_obj, "init_main_module", True))
# read end of pipe will be "stolen" by the child process
# -- see spawn_main() in spawn.py.
rfd, wfd = os.pipe()
rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True)
os.close(rfd)
cmd = get_command_line(parent_pid=os.getpid(), pipe_handle=rhandle)
cmd = ' '.join('"%s"' % x for x in cmd)
try:
with open(wfd, 'wb') as to_child:
# start process
try:
# This flag allows to pass inheritable handles from the
# parent to the child process in a python2-3 compatible way
# (see
# https://github.com/tomMoral/loky/pull/204#discussion_r290719629
# for more detail). When support for Python 2 is dropped,
# the cleaner multiprocessing.reduction.steal_handle should
# be used instead.
inherit = True
hp, ht, pid, tid = _winapi.CreateProcess(
spawn.get_executable(), cmd,
None, None, inherit, 0,
None, None, None)
_winapi.CloseHandle(ht)
except BaseException as e:
_winapi.CloseHandle(rhandle)
raise
# set attributes of self
self.pid = pid
self.returncode = None
self._handle = hp
self.sentinel = int(hp)
util.Finalize(self, _winapi.CloseHandle, (self.sentinel,))
# send information to child
set_spawning_popen(self)
if sys.version_info[:2] < (3, 4):
Popen._tls.process_handle = int(hp)
try:
reduction.dump(prep_data, to_child)
reduction.dump(process_obj, to_child)
finally:
set_spawning_popen(None)
if sys.version_info[:2] < (3, 4):
del Popen._tls.process_handle
except IOError as exc:
# IOError 22 happens when the launched subprocess terminated before
# wfd.close is called. Thus we can safely ignore it.
if exc.errno != 22:
raise
util.debug("While starting {}, ignored a IOError 22"
.format(process_obj._name))
def duplicate_for_child(self, handle):
assert self is get_spawning_popen()
return duplicate(handle, self.sentinel)
def get_command_line(pipe_handle, **kwds):
'''
Returns prefix of command line used for spawning a child process
'''
if getattr(sys, 'frozen', False):
return ([sys.executable, '--multiprocessing-fork', pipe_handle])
else:
prog = 'from loky.backend.popen_loky_win32 import main; main()'
opts = util._args_from_interpreter_flags()
return [spawn.get_executable()] + opts + [
'-c', prog, '--multiprocessing-fork', pipe_handle]
def is_forking(argv):
'''
Return whether commandline indicates we are forking
'''
if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':
assert len(argv) == 3
return True
else:
return False
def main():
'''
Run code specified by data received over pipe
'''
assert is_forking(sys.argv)
handle = int(sys.argv[-1])
fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
from_parent = os.fdopen(fd, 'rb')
process.current_process()._inheriting = True
preparation_data = load(from_parent)
spawn.prepare(preparation_data)
self = load(from_parent)
process.current_process()._inheriting = False
from_parent.close()
exitcode = self._bootstrap()
exit(exitcode)
| 32.681818
| 85
| 0.591695
|
adbd97703dce2956265dc424cb1905e4b9ed2524
| 773
|
py
|
Python
|
cpoll_cppsp/setup.py
|
lightyeare/FrameworkBenchmarks
|
40489856a0480c85227993d91de7d66e9224f8b4
|
[
"BSD-3-Clause"
] | 1
|
2015-01-28T07:11:03.000Z
|
2015-01-28T07:11:03.000Z
|
cpoll_cppsp/setup.py
|
lightyeare/FrameworkBenchmarks
|
40489856a0480c85227993d91de7d66e9224f8b4
|
[
"BSD-3-Clause"
] | null | null | null |
cpoll_cppsp/setup.py
|
lightyeare/FrameworkBenchmarks
|
40489856a0480c85227993d91de7d66e9224f8b4
|
[
"BSD-3-Clause"
] | null | null | null |
import subprocess
import sys
import os
import setup_util
def start(args, logfile, errfile):
setup_util.replace_text("cpoll_cppsp/www/connectioninfo.H", "\\#define BENCHMARK_DB_HOST \".*\"", "#define BENCHMARK_DB_HOST \"" + args.database_host + "\"")
subprocess.check_call("make", shell=True, cwd="cpoll_cppsp", stderr=errfile, stdout=logfile)
subprocess.Popen("./run_application \"$(pwd)\"/www -g g++-4.8 -m /forcedynamic.cppsm", shell=True, cwd="cpoll_cppsp", stderr=errfile, stdout=logfile);
return 0
def stop(logfile, errfile):
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'cppsp_standalone' in line:
pid = int(line.split(None, 2)[1])
os.kill(pid, 9)
return 0
| 38.65
| 159
| 0.698577
|
7d4dad6a92e46dcc6ca30dfd6a7d13cbb2bcd55c
| 312
|
py
|
Python
|
project/manage.py
|
riotkit-org/docker-django-wiki
|
e65cdb07b14a06e2fc690260b6553376630783ed
|
[
"MIT"
] | 9
|
2019-08-23T04:32:21.000Z
|
2021-12-07T21:55:32.000Z
|
project/manage.py
|
riotkit-org/docker-django-wiki
|
e65cdb07b14a06e2fc690260b6553376630783ed
|
[
"MIT"
] | 1
|
2020-04-21T13:55:17.000Z
|
2020-04-22T17:51:20.000Z
|
project/manage.py
|
riotkit-org/docker-django-wiki
|
e65cdb07b14a06e2fc690260b6553376630783ed
|
[
"MIT"
] | 3
|
2020-03-18T00:19:37.000Z
|
2020-11-08T18:27:08.000Z
|
#!/usr/bin/env python
from __future__ import absolute_import, unicode_literals
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wikiproject.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 24
| 75
| 0.794872
|
2324d425115bc85e1395f91e28c338cfe7221c7e
| 644
|
py
|
Python
|
acme/agents/jax/__init__.py
|
novatig/acme
|
7774e4a22519d8b05951320864e5308974eaad2a
|
[
"Apache-2.0"
] | 2,650
|
2020-06-01T16:31:25.000Z
|
2022-03-31T07:32:41.000Z
|
acme/agents/jax/__init__.py
|
novatig/acme
|
7774e4a22519d8b05951320864e5308974eaad2a
|
[
"Apache-2.0"
] | 199
|
2020-06-02T01:09:09.000Z
|
2022-03-31T17:11:20.000Z
|
acme/agents/jax/__init__.py
|
novatig/acme
|
7774e4a22519d8b05951320864e5308974eaad2a
|
[
"Apache-2.0"
] | 344
|
2020-06-01T16:45:21.000Z
|
2022-03-30T11:15:09.000Z
|
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JAX agents."""
| 37.882353
| 74
| 0.759317
|
000d3cd418c131da1a9a532589c58eb6634db3cf
| 3,461
|
py
|
Python
|
bindings/python/ensmallen/datasets/string/microlunatussoli.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5
|
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/string/microlunatussoli.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18
|
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/microlunatussoli.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3
|
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Microlunatus soli.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def MicrolunatusSoli(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Microlunatus soli graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Microlunatus soli graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="MicrolunatusSoli",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 32.961905
| 223
| 0.676683
|
4661b58c6cbc7dbbf74da979b0580cb0412042ac
| 13,610
|
py
|
Python
|
tests/io/test_sql.py
|
ANA-POTJE/kedro
|
cba5f4577e17d51346c719da0fa12bb784acffc8
|
[
"Apache-2.0"
] | null | null | null |
tests/io/test_sql.py
|
ANA-POTJE/kedro
|
cba5f4577e17d51346c719da0fa12bb784acffc8
|
[
"Apache-2.0"
] | null | null | null |
tests/io/test_sql.py
|
ANA-POTJE/kedro
|
cba5f4577e17d51346c719da0fa12bb784acffc8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-member
from typing import Any
import pandas as pd
import pytest
import sqlalchemy
from kedro.io import DataSetError, SQLQueryDataSet, SQLTableDataSet
TABLE_NAME = "table_a"
CONNECTION = "sqlite:///kedro.db"
SQL_QUERY = "SELECT * FROM table_a"
FAKE_CONN_STR = "some_sql://scott:tiger@localhost/foo"
ERROR_PREFIX = (
r"A module\/driver is missing when connecting to your " r"SQL server\.(.|\n)*"
)
@pytest.fixture
def dummy_dataframe():
return pd.DataFrame({"col1": [1, 2], "col2": [4, 5], "col3": [5, 6]})
@pytest.fixture(params=[dict()])
def table_data_set(request):
kwargs = dict(table_name=TABLE_NAME, credentials=dict(con=CONNECTION))
kwargs.update(request.param)
return SQLTableDataSet(**kwargs)
@pytest.fixture(params=[dict()])
def query_data_set(request):
kwargs = dict(sql=SQL_QUERY, credentials=dict(con=CONNECTION))
kwargs.update(request.param)
return SQLQueryDataSet(**kwargs)
class TestSQLTableDataSetLoad:
@staticmethod
def _assert_pd_called_once():
pd.read_sql_table.assert_called_once_with(table_name=TABLE_NAME, con=CONNECTION)
def test_empty_table_name(self):
"""Check the error when instantiating with an empty table"""
pattern = r"`table\_name` argument cannot be empty\."
with pytest.raises(DataSetError, match=pattern):
SQLTableDataSet(table_name="", credentials=dict(con=CONNECTION))
def test_empty_connection(self):
"""Check the error when instantiating with an empty
connection string"""
pattern = (
r"`con` argument cannot be empty\. "
r"Please provide a SQLAlchemy connection string\."
)
with pytest.raises(DataSetError, match=pattern):
SQLTableDataSet(table_name=TABLE_NAME, credentials=dict(con=""))
def test_load_sql_params(self, mocker, table_data_set):
"""Test `load` method invocation"""
mocker.patch("pandas.read_sql_table")
table_data_set.load()
self._assert_pd_called_once()
def test_load_driver_missing(self, mocker, table_data_set):
"""Check the error when the sql driver is missing"""
mocker.patch(
"pandas.read_sql_table",
side_effect=ImportError("No module named 'mysqldb'"),
)
with pytest.raises(DataSetError, match=ERROR_PREFIX + "mysqlclient"):
table_data_set.load()
self._assert_pd_called_once()
def test_invalid_module(self, mocker, table_data_set):
"""Test that if an invalid module/driver is encountered by SQLAlchemy
then the error should contain the original error message"""
_err = ImportError("Invalid module some_module")
mocker.patch("pandas.read_sql_table", side_effect=_err)
pattern = ERROR_PREFIX + r"Invalid module some\_module"
with pytest.raises(DataSetError, match=pattern):
table_data_set.load()
self._assert_pd_called_once()
def test_load_unknown_module(self, mocker, table_data_set):
"""Test that if an unknown module/driver is encountered by SQLAlchemy
then the error should contain the original error message"""
mocker.patch(
"pandas.read_sql_table",
side_effect=ImportError("No module named 'unknown_module'"),
)
pattern = ERROR_PREFIX + r"No module named \'unknown\_module\'"
with pytest.raises(DataSetError, match=pattern):
table_data_set.load()
@pytest.mark.parametrize(
"table_data_set", [{"credentials": dict(con=FAKE_CONN_STR)}], indirect=True
)
def test_load_unknown_sql(self, table_data_set):
"""Check the error when unknown sql dialect is provided"""
pattern = (
r"The SQL dialect in your connection is not supported " r"by SQLAlchemy"
)
with pytest.raises(DataSetError, match=pattern):
table_data_set.load()
class TestSQLTableDataSetSave:
_unknown_conn = "mysql+unknown_module://scott:tiger@localhost/foo"
@staticmethod
def _assert_to_sql_called_once(df: Any, index: bool = False):
df.to_sql.assert_called_once_with(name=TABLE_NAME, con=CONNECTION, index=index)
def test_save_default_index(self, mocker, table_data_set, dummy_dataframe):
"""Test `save` method invocation"""
mocker.patch.object(dummy_dataframe, "to_sql")
table_data_set.save(dummy_dataframe)
self._assert_to_sql_called_once(dummy_dataframe)
@pytest.mark.parametrize(
"table_data_set", [{"save_args": dict(index=True)}], indirect=True
)
def test_save_overwrite_index(self, mocker, table_data_set, dummy_dataframe):
"""Test writing DataFrame index as a column"""
mocker.patch.object(dummy_dataframe, "to_sql")
table_data_set.save(dummy_dataframe)
self._assert_to_sql_called_once(dummy_dataframe, True)
def test_save_driver_missing(self, mocker, table_data_set, dummy_dataframe):
"""Test that if an unknown module/driver is encountered by SQLAlchemy
then the error should contain the original error message"""
_err = ImportError("No module named 'mysqldb'")
mocker.patch.object(dummy_dataframe, "to_sql", side_effect=_err)
with pytest.raises(DataSetError, match=ERROR_PREFIX + "mysqlclient"):
table_data_set.save(dummy_dataframe)
@pytest.mark.parametrize(
"table_data_set", [{"credentials": dict(con=FAKE_CONN_STR)}], indirect=True
)
def test_save_unknown_sql(self, table_data_set, dummy_dataframe):
"""Check the error when unknown sql dialect is provided"""
pattern = (
r"The SQL dialect in your connection is not supported " r"by SQLAlchemy"
)
with pytest.raises(DataSetError, match=pattern):
table_data_set.save(dummy_dataframe)
@pytest.mark.parametrize(
"table_data_set", [{"credentials": dict(con=_unknown_conn)}], indirect=True
)
def test_save_unknown_module(self, mocker, table_data_set, dummy_dataframe):
"""Test that if an unknown module/driver is encountered by SQLAlchemy
then the error should contain the original error message"""
_err = ImportError("No module named 'unknown_module'")
mocker.patch.object(dummy_dataframe, "to_sql", side_effect=_err)
pattern = r"No module named \'unknown_module\'"
with pytest.raises(DataSetError, match=pattern):
table_data_set.save(dummy_dataframe)
@pytest.mark.parametrize(
"table_data_set", [{"save_args": dict(name="TABLE_B")}], indirect=True
)
def test_save_ignore_table_name_override(
self, mocker, table_data_set, dummy_dataframe
):
"""Test that putting the table name is `save_args` does not have any
effect"""
mocker.patch.object(dummy_dataframe, "to_sql")
table_data_set.save(dummy_dataframe)
self._assert_to_sql_called_once(dummy_dataframe)
class TestSQLTableDataSet:
@staticmethod
def _assert_sqlalchemy_called_once(*args):
_callable = sqlalchemy.engine.Engine.table_names
if args:
_callable.assert_called_once_with(*args)
else:
assert _callable.call_count == 1
def test_str_representation_table(self, table_data_set):
"""Test the data set instance string representation"""
str_repr = str(table_data_set)
assert (
"SQLTableDataSet(save_args={'index': False}, table_name=%s)" % TABLE_NAME
in str_repr
)
assert CONNECTION not in str(str_repr)
def test_table_exists(self, mocker, table_data_set):
"""Test `exists` method invocation"""
mocker.patch("sqlalchemy.engine.Engine.table_names")
assert not table_data_set.exists()
self._assert_sqlalchemy_called_once()
@pytest.mark.parametrize(
"table_data_set", [{"load_args": dict(schema="ingested")}], indirect=True
)
def test_able_exists_schema(self, mocker, table_data_set):
"""Test `exists` method invocation with DB schema provided"""
mocker.patch("sqlalchemy.engine.Engine.table_names")
assert not table_data_set.exists()
self._assert_sqlalchemy_called_once("ingested")
def test_table_exists_mocked(self, mocker, table_data_set):
"""Test `exists` method invocation with mocked list of tables"""
mocker.patch("sqlalchemy.engine.Engine.table_names", return_value=[TABLE_NAME])
assert table_data_set.exists()
self._assert_sqlalchemy_called_once()
class TestSQLQueryDataSet:
@staticmethod
def _assert_pd_called_once():
_callable = pd.read_sql_query
_callable.assert_called_once_with(sql=SQL_QUERY, con=CONNECTION)
def test_empty_query_error(self):
"""Check the error when instantiating with empty query"""
pattern = r"`sql` argument cannot be empty\. " r"Please provide a sql query"
with pytest.raises(DataSetError, match=pattern):
SQLQueryDataSet(sql="", credentials=dict(con=CONNECTION))
def test_empty_con_error(self):
"""Check the error when instantiating with empty connection string"""
pattern = (
r"`con` argument cannot be empty\. Please provide "
r"a SQLAlchemy connection string"
)
with pytest.raises(DataSetError, match=pattern):
SQLQueryDataSet(sql=SQL_QUERY, credentials=dict(con=""))
def test_load(self, mocker, query_data_set):
"""Test `load` method invocation"""
mocker.patch("pandas.read_sql_query")
query_data_set.load()
self._assert_pd_called_once()
def test_load_driver_missing(self, mocker, query_data_set):
"""Test that if an unknown module/driver is encountered by SQLAlchemy
then the error should contain the original error message"""
_err = ImportError("No module named 'mysqldb'")
mocker.patch("pandas.read_sql_query", side_effect=_err)
with pytest.raises(DataSetError, match=ERROR_PREFIX + "mysqlclient"):
query_data_set.load()
def test_invalid_module(self, mocker, query_data_set):
"""Test that if an unknown module/driver is encountered by SQLAlchemy
then the error should contain the original error message"""
_err = ImportError("Invalid module some_module")
mocker.patch("pandas.read_sql_query", side_effect=_err)
pattern = ERROR_PREFIX + r"Invalid module some\_module"
with pytest.raises(DataSetError, match=pattern):
query_data_set.load()
def test_load_unknown_module(self, mocker, query_data_set):
"""Test that if an unknown module/driver is encountered by SQLAlchemy
then the error should contain the original error message"""
_err = ImportError("No module named 'unknown_module'")
mocker.patch("pandas.read_sql_query", side_effect=_err)
pattern = ERROR_PREFIX + r"No module named \'unknown\_module\'"
with pytest.raises(DataSetError, match=pattern):
query_data_set.load()
@pytest.mark.parametrize(
"query_data_set", [{"credentials": dict(con=FAKE_CONN_STR)}], indirect=True
)
def test_load_unknown_sql(self, query_data_set):
"""Check the error when unknown SQL dialect is provided
in the connection string"""
pattern = (
r"The SQL dialect in your connection is not supported " r"by SQLAlchemy"
)
with pytest.raises(DataSetError, match=pattern):
query_data_set.load()
def test_save_error(self, query_data_set, dummy_dataframe):
"""Check the error when trying to save to the data set"""
pattern = r"`save` is not supported on SQLQueryDataSet"
with pytest.raises(DataSetError, match=pattern):
query_data_set.save(dummy_dataframe)
def test_str_representation_sql(self, query_data_set):
"""Test the data set instance string representation"""
str_repr = str(query_data_set)
assert "SQLQueryDataSet(sql={})".format(SQL_QUERY) in str_repr
assert CONNECTION not in str_repr
| 42.664577
| 88
| 0.695151
|
d4e1aa888f4ef91750786901fa793520229ccba0
| 25,567
|
py
|
Python
|
pook/mock.py
|
qiao-meng-zefr/pook
|
0637f22ed90bc865f2793a49fa51d44b0689a4b5
|
[
"MIT"
] | null | null | null |
pook/mock.py
|
qiao-meng-zefr/pook
|
0637f22ed90bc865f2793a49fa51d44b0689a4b5
|
[
"MIT"
] | null | null | null |
pook/mock.py
|
qiao-meng-zefr/pook
|
0637f22ed90bc865f2793a49fa51d44b0689a4b5
|
[
"MIT"
] | null | null | null |
import re
import functools
from furl import furl
from inspect import isfunction, ismethod
from .decorators import fluent
from .response import Response
from .constants import TYPES
from .request import Request
from .matcher import MatcherEngine
from .helpers import trigger_methods
from .exceptions import PookExpiredMock
from .matchers import init as matcher
def _append_funcs(target, items):
"""
Helper function to append functions into a given list.
Arguments:
target (list): receptor list to append functions.
items (iterable): iterable that yields elements to append.
"""
[target.append(item) for item in items
if isfunction(item) or ismethod(item)]
def _trigger_request(instance, request):
"""
Triggers request mock definition methods dynamically based on input
keyword arguments passed to `pook.Mock` constructor.
This is used to provide a more Pythonic interface vs chainable API
approach.
"""
if not isinstance(request, Request):
raise TypeError('request must be instance of pook.Request')
# Register request matchers
for key in request.keys:
if hasattr(instance, key):
getattr(instance, key)(getattr(request, key))
class Mock(object):
"""
Mock is used to declare and compose the HTTP request/response mock
definition and matching expectations, which provides fluent API DSL.
Arguments:
url (str): URL to match.
E.g: ``server.com/api?foo=bar``.
method (str): HTTP method name to match.
E.g: ``GET``.
path (str): URL path to match.
E.g: ``/api/users``.
headers (dict): Header values to match.
E.g: ``{'server': 'nginx'}``.
header_present (str): Matches is a header is present.
headers_present (list|tuple): Matches if multiple headers are present.
type (str): Matches MIME ``Content-Type`` header.
E.g: ``json``, ``xml``, ``html``, ``text/plain``
content (str): Same as ``type`` argument.
params (dict): Matches the given URL params.
param_exists (str): Matches if a given URL param exists.
params_exists (list|tuple): Matches if a given URL params exists.
body (str|regex): Matches the payload body by regex or
strict comparison.
json (dict|list|str|regex): Matches the payload body against the given
JSON or regular expression.
jsonschema (dict|str): Matches the payload body against the given
JSONSchema.
xml (str|regex): matches the payload body against the given XML string
or regular expression.
file (str): Disk file path to load body from. Analog to ``body`` param.
times (int): Mock TTL or maximum number of times that the mock can be
matched.
persist (bool): Enable persistent mode. Mock won't be flushed even if
it matched one or multiple times.
delay (int): Optional network delay simulation (only applicable when
using ``aiohttp`` HTTP client).
callback (function): optional callback function called every time the
mock is matched.
reply (int): Mock response status. Defaults to ``200``.
response_status (int): Mock response status. Alias to ``reply`` param.
response_headers (dict): Response headers to use.
response_type (str): Response MIME type expression or alias.
Analog to ``type`` param. E.g: ``json``, ``xml``, ``text/plain``.
response_body (str): Response body to use.
response_json (dict|list|str): Response JSON to use. If Python is
passed, it will be serialized as JSON transparently.
response_xml (str): XML body string to use.
request (pook.Request): Optional. Request mock definition object.
response (pook.Response): Optional. Response mock definition
object.
Returns:
pook.Mock
"""
def __init__(self, request=None, response=None, **kw):
# Stores the number of times the mock should live
self._times = 1
# Stores the number of times the mock has been matched
self._matches = 0
# Stores the simulated error exception
self._error = None
# Stores the optional network delay in milliseconds
self._delay = 0
# Stores the mock persistance mode. `True` means it will live forever
self._persist = False
# Optional binded engine where the mock belongs to
self._engine = None
# Store request-response mock matched calls
self._calls = []
# Stores the input request instance
self._request = request or Request()
# Stores the response mock instance
self._response = response or Response()
# Stores the mock matcher engine used for outgoing traffic matching
self.matchers = MatcherEngine()
# Stores filters used to filter outgoing HTTP requests.
self.filters = []
# Stores HTTP request mappers used by the mock.
self.mappers = []
# Stores callback functions that will be triggered if the mock
# matches outgoing traffic.
self.callbacks = []
# Triggers instance methods based on argument names
trigger_methods(self, kw)
# Trigger matchers based on predefined request object, if needed
if request:
_trigger_request(self, request)
@fluent
def url(self, url):
"""
Defines the mock URL to match.
It can be a full URL with path and query params.
Protocol schema is optional, defaults to ``http://``.
Arguments:
url (str): mock URL to match. E.g: ``server.com/api``.
Returns:
self: current Mock instance.
"""
self._request.url = url
self.add_matcher(matcher('URLMatcher', url))
@fluent
def method(self, method):
"""
Defines the HTTP method to match.
Use ``*`` to match any method.
Arguments:
method (str): method value to match. E.g: ``GET``.
Returns:
self: current Mock instance.
"""
self._request.method = method
self.add_matcher(matcher('MethodMatcher', method))
@fluent
def path(self, path):
"""
Defines a URL path to match.
Only call this method if the URL has no path already defined.
Arguments:
path (str): URL path value to match. E.g: ``/api/users``.
Returns:
self: current Mock instance.
"""
url = furl(self._request.rawurl)
url.path = path
self._request.url = url.url
self.add_matcher(matcher('PathMatcher', path))
@fluent
def header(self, name, value):
"""
Defines a URL path to match.
Only call this method if the URL has no path already defined.
Arguments:
path (str): URL path value to match. E.g: ``/api/users``.
Returns:
self: current Mock instance.
"""
headers = {name: value}
self._request.headers = headers
self.add_matcher(matcher('HeadersMatcher', headers))
@fluent
def headers(self, headers=None, **kw):
"""
Defines a dictionary of arguments.
Header keys are case insensitive.
Arguments:
headers (dict): headers to match.
**headers (dict): headers to match as variadic keyword arguments.
Returns:
self: current Mock instance.
"""
headers = kw if kw else headers
self._request.headers = headers
self.add_matcher(matcher('HeadersMatcher', headers))
@fluent
def header_present(self, *names):
"""
Defines a new header matcher expectation that must be present in the
outgoing request in order to be satisfied, no matter what value it
hosts.
Header keys are case insensitive.
Arguments:
*names (str): header or headers names to match.
Returns:
self: current Mock instance.
Example::
(pook.get('server.com/api')
.header_present('content-type'))
"""
for name in names:
headers = {name: re.compile('(.*)')}
self.add_matcher(matcher('HeadersMatcher', headers))
@fluent
def headers_present(self, headers):
"""
Defines a list of headers that must be present in the
outgoing request in order to satisfy the matcher, no matter what value
the headers hosts.
Header keys are case insensitive.
Arguments:
headers (list|tuple): header keys to match.
Returns:
self: current Mock instance.
Example::
(pook.get('server.com/api')
.headers_present(['content-type', 'Authorization']))
"""
headers = {name: re.compile('(.*)') for name in headers}
self.add_matcher(matcher('HeadersMatcher', headers))
@fluent
def type(self, value):
"""
Defines the request ``Content-Type`` header to match.
You can pass one of the following aliases instead of the full
MIME type representation:
- ``json`` = ``application/json``
- ``xml`` = ``application/xml``
- ``html`` = ``text/html``
- ``text`` = ``text/plain``
- ``urlencoded`` = ``application/x-www-form-urlencoded``
- ``form`` = ``application/x-www-form-urlencoded``
- ``form-data`` = ``application/x-www-form-urlencoded``
Arguments:
value (str): type alias or header value to match.
Returns:
self: current Mock instance.
"""
self.content(value)
@fluent
def content(self, value):
"""
Defines the ``Content-Type`` outgoing header value to match.
You can pass one of the following type aliases instead of the full
MIME type representation:
- ``json`` = ``application/json``
- ``xml`` = ``application/xml``
- ``html`` = ``text/html``
- ``text`` = ``text/plain``
- ``urlencoded`` = ``application/x-www-form-urlencoded``
- ``form`` = ``application/x-www-form-urlencoded``
- ``form-data`` = ``application/x-www-form-urlencoded``
Arguments:
value (str): type alias or header value to match.
Returns:
self: current Mock instance.
"""
header = {'Content-Type': TYPES.get(value, value)}
self._request.headers = header
self.add_matcher(matcher('HeadersMatcher', header))
@fluent
def param(self, name, value):
"""
Defines an URL param key and value to match.
Arguments:
name (str): param name value to match.
value (str): param name value to match.
Returns:
self: current Mock instance.
"""
self.params({name: value})
@fluent
def param_exists(self, name):
"""
Checks if a given URL param name is present in the URL.
Arguments:
name (str): param name to check existence.
Returns:
self: current Mock instance.
"""
self.params({name: re.compile('(.*)')})
@fluent
def params(self, params):
"""
Defines a set of URL query params to match.
Arguments:
params (dict): set of params to match.
Returns:
self: current Mock instance.
"""
url = furl(self._request.rawurl)
url = url.add(params)
self._request.url = url.url
self.add_matcher(matcher('QueryMatcher', params))
@fluent
def body(self, body):
"""
Defines the body data to match.
``body`` argument can be a ``str``, ``binary`` or a regular expression.
Arguments:
body (str|binary|regex): body data to match.
Returns:
self: current Mock instance.
"""
self._request.body = body
self.add_matcher(matcher('BodyMatcher', body))
@fluent
def json(self, json):
"""
Defines the JSON body to match.
``json`` argument can be an JSON string, a JSON serializable
Python structure, such as a ``dict`` or ``list`` or it can be
a regular expression used to match the body.
Arguments:
json (str|dict|list|regex): body JSON to match.
Returns:
self: current Mock instance.
"""
self._request.json = json
self.add_matcher(matcher('JSONMatcher', json))
@fluent
def jsonschema(self, schema):
"""
Defines a JSONSchema representation to be used for body matching.
Arguments:
schema (str|dict): dict or JSONSchema string to use.
Returns:
self: current Mock instance.
"""
self.add_matcher(matcher('JSONSchemaMatcher', schema))
@fluent
def xml(self, xml):
"""
Defines a XML body value to match.
Arguments:
xml (str|regex): body XML to match.
Returns:
self: current Mock instance.
"""
self._request.xml = xml
self.add_matcher(matcher('XMLMatcher', xml))
@fluent
def file(self, path):
"""
Reads the body to match from a disk file.
Arguments:
path (str): relative or absolute path to file to read from.
Returns:
self: current Mock instance.
"""
with open(path, 'r') as f:
self.body(str(f.read()))
@fluent
def add_matcher(self, matcher):
"""
Adds one or multiple custom matchers instances.
Matchers must implement the following interface:
- ``.__init__(expectation)``
- ``.match(request)``
- ``.name = str``
Matchers can optionally inherit from ``pook.matchers.BaseMatcher``.
Arguments:
*matchers (pook.matchers.BaseMatcher): matchers to add.
Returns:
self: current Mock instance.
"""
self.matchers.add(matcher)
@fluent
def use(self, *matchers):
"""
Adds one or multiple custom matchers instances.
Matchers must implement the following interface:
- ``.__init__(expectation)``
- ``.match(request)``
- ``.name = str``
Matchers can optionally inherit from ``pook.matchers.BaseMatcher``.
Arguments:
*matchers (pook.matchers.BaseMatcher): matchers to add.
Returns:
self: current Mock instance.
"""
[self.add_matcher(matcher) for matcher in matchers]
@fluent
def times(self, times=1):
"""
Defines the TTL limit for the current mock.
The TTL number will determine the maximum number of times that the
current mock can be matched and therefore consumed.
Arguments:
times (int): TTL number. Defaults to ``1``.
Returns:
self: current Mock instance.
"""
self._times = times
@fluent
def persist(self, status=None):
"""
Enables persistent mode for the current mock.
Returns:
self: current Mock instance.
"""
self._persist = status if type(status) is bool else True
@fluent
def filter(self, *filters):
"""
Registers one o multiple request filters used during the matching
phase.
Arguments:
*mappers (function): variadic mapper functions.
Returns:
self: current Mock instance.
"""
_append_funcs(self.filters, filters)
@fluent
def map(self, *mappers):
"""
Registers one o multiple request mappers used during the mapping
phase.
Arguments:
*mappers (function): variadic mapper functions.
Returns:
self: current Mock instance.
"""
_append_funcs(self.mappers, mappers)
@fluent
def callback(self, *callbacks):
"""
Registers one or multiple callback that will be called every time the
current mock matches an outgoing HTTP request.
Arguments:
*callbacks (function): callback functions to call.
Returns:
self: current Mock instance.
"""
_append_funcs(self.callbacks, callbacks)
@fluent
def delay(self, delay=1000):
"""
Delay network response with certain milliseconds.
Only supported by asynchronous HTTP clients, such as ``aiohttp``.
Arguments:
delay (int): milliseconds to delay response.
Returns:
self: current Mock instance.
"""
self._delay = int(delay)
@fluent
def error(self, error):
"""
Defines a simulated exception error that will be raised.
Arguments:
error (str|Exception): error to raise.
Returns:
self: current Mock instance.
"""
self._error = RuntimeError(error) if isinstance(error, str) else error
def reply(self, status=200, **kw):
"""
Defines the mock response.
Arguments:
status (int, optional): response status code. Defaults to ``200``.
**kw (dict): optional keyword arguments passed to ``pook.Response``
constructor.
Returns:
pook.Response: mock response definition instance.
"""
# Use or create a Response mock instance
res = self._response or Response(**kw)
# Define HTTP mandatory response status
res.status(status or res._status)
# Expose current mock instance in response for self-reference
res.mock = self
# Define mock response
self._response = res
# Return response
return res
def status(self, code=200):
"""
Defines the response status code.
Equivalent to ``self.reply(code)``.
Arguments:
code (int): response status code. Defaults to ``200``.
Returns:
pook.Response: mock response definition instance.
"""
return self.reply(status=code)
def response(self, status=200, **kw):
"""
Defines the mock response. Alias to ``.reply()``
Arguments:
status (int): response status code. Defaults to ``200``.
**kw (dict): optional keyword arguments passed to ``pook.Response``
constructor.
Returns:
pook.Response: mock response definition instance.
"""
return self.reply(status=status, **kw)
def isdone(self):
"""
Returns ``True`` if the mock has been matched by outgoing HTTP traffic.
Returns:
bool: ``True`` if the mock was matched succesfully.
"""
return (self._persist and self._matches > 0) or self._times <= 0
def ismatched(self):
"""
Returns ``True`` if the mock has been matched at least once time.
Returns:
bool
"""
return self._matches > 0
@property
def done(self):
"""
Attribute accessor that would be ``True`` if the current mock
is done, and therefore have been matched multiple times.
Returns:
bool
"""
return self.isdone()
@property
def matched(self):
"""
Accessor property that would be ``True`` if the current mock
have been matched at least once.
See ``Mock.total_matches`` for more information.
Returns:
bool
"""
return self._matches > 0
@property
def total_matches(self):
"""
Accessor property to retrieve the total number of times that the
current mock has been matched.
Returns:
int
"""
return self._matches
@property
def matches(self):
"""
Accessor to retrieve the mock match calls registry.
Returns:
list[MockCall]
"""
return self._calls
@property
def calls(self):
"""
Accessor to retrieve the amount of mock matched calls.
Returns:
int
"""
return len(self.matches)
def match(self, request):
"""
Matches an outgoing HTTP request against the current mock matchers.
This method acts like a delegator to `pook.MatcherEngine`.
Arguments:
request (pook.Request): request instance to match.
Raises:
Exception: if the mock has an exception defined.
Returns:
tuple(bool, list[Exception]): ``True`` if the mock matches
the outgoing HTTP request, otherwise ``False``. Also returns
an optional list of error exceptions.
"""
# If mock already expired, fail it
if self._times <= 0:
raise PookExpiredMock('Mock expired')
# Trigger mock filters
for test in self.filters:
if not test(request, self):
return False, []
# Trigger mock mappers
for mapper in self.mappers:
request = mapper(request, self)
if not request:
raise ValueError('map function must return a request object')
# Match incoming request against registered mock matchers
matches, errors = self.matchers.match(request)
# If not matched, return False
if not matches:
return False, errors
# Register matched request for further inspecion and reference
self._calls.append(request)
# Increase mock call counter
self._matches += 1
if not self._persist:
self._times -= 1
# Raise simulated error
if self._error:
raise self._error
# Trigger callback when matched
for callback in self.callbacks:
callback(request, self)
return True, []
def __call__(self, fn):
"""
Overload Mock instance as callable object in order to be used
as decorator definition syntax.
Arguments:
fn (function): function to decorate.
Returns:
function or pook.Mock
"""
# Support chain sequences of mock definitions
if isinstance(fn, Response):
return fn.mock
if isinstance(fn, Mock):
return fn
# Force type assertion and raise an error if it is not a function
if not isfunction(fn) and not ismethod(fn):
raise TypeError('first argument must be a method or function')
# Remove mock to prevent decorator definition scope collision
self._engine.remove_mock(self)
@functools.wraps(fn)
def decorator(*args, **kw):
# Re-register mock on decorator call
self._engine.add_mock(self)
# Force engine activation, if available
# This prevents state issue while declaring mocks as decorators.
# This might be removed in the future.
engine_active = self._engine.active
if not engine_active:
self._engine.activate()
# Call decorated target function
try:
return fn(*args, **kw)
finally:
# Finally remove mock after function execution
# to prevent shared state
self._engine.remove_mock(self)
# If the engine was not previously active, disable it
if not engine_active:
self._engine.disable()
return decorator
def __repr__(self):
"""
Returns an human friendly readable instance data representation.
Returns:
str
"""
keys = ('matches', 'times', 'persist', 'matchers', 'response')
args = []
for key in keys:
if key == 'matchers':
value = repr(self.matchers).replace('\n ', '\n ')
value = value[:-2] + ' ])'
elif key == 'response':
value = repr(self._response)
value = value[:-1] + ' )'
else:
value = repr(getattr(self, '_' + key))
args.append('{}={}'.format(key, value))
args = '(\n {}\n)'.format(',\n '.join(args))
return type(self).__name__ + args
def __enter__(self):
"""
Implements context manager enter interface.
"""
# Make mock persistent if using default times
if self._times == 1:
self._persist = True
# Automatically enable the mock engine, if needed
if not self._engine.active:
self._engine.activate()
self._disable_engine = True
return self
def __exit__(self, etype, value, traceback):
"""
Implements context manager exit interface.
"""
# Force disable mock
self._times = 0
# Automatically disable the mock engine, if needed
if getattr(self, '_disable_engine', False):
self._disable_engine = False
self._engine.disable()
if etype is not None:
raise value
| 29.798368
| 79
| 0.577385
|
2a129fa5630ad5e214432273c36f2e8d6d386c57
| 30,944
|
py
|
Python
|
tests/unit/compile/test_serialization.py
|
ethan-asapp/flambe
|
70257167058c7b82ee39f74167a6161bd264ad18
|
[
"MIT"
] | null | null | null |
tests/unit/compile/test_serialization.py
|
ethan-asapp/flambe
|
70257167058c7b82ee39f74167a6161bd264ad18
|
[
"MIT"
] | null | null | null |
tests/unit/compile/test_serialization.py
|
ethan-asapp/flambe
|
70257167058c7b82ee39f74167a6161bd264ad18
|
[
"MIT"
] | null | null | null |
import pytest
import tempfile
from collections import abc, OrderedDict
import os
import torch
import dill
import mock
from ruamel.yaml.compat import StringIO
from ruamel.yaml import YAML
from typing import Mapping, Any, Optional
# from flambe.compile import yaml
from flambe import Component, save_state_to_file, load_state_from_file, load, save
from flambe.compile import Registrable, yaml, make_component, Schema
from flambe.compile.serialization import _extract_prefix
FLAMBE_SOURCE_KEY = '_flambe_source'
FLAMBE_CLASS_KEY = '_flambe_class'
FLAMBE_CONFIG_KEY = '_flambe_config'
FLAMBE_DIRECTORIES_KEY = '_flambe_directories'
KEEP_VARS_KEY = 'keep_vars'
VERSION_KEY = '_flambe_version'
def list_files(startpath):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f))
def check_mapping_equivalence(x, y, exclude_config=False):
for key in x.keys():
if key == KEEP_VARS_KEY or key == 'version':
continue
if key == FLAMBE_CONFIG_KEY and exclude_config:
continue
assert key in y
if isinstance(x[key], abc.Mapping):
check_mapping_equivalence(x[key], y[key], exclude_config=exclude_config)
elif isinstance(x[key], torch.Tensor):
assert isinstance(y[key], torch.Tensor)
torch.equal(x[key], y[key])
else:
assert x[key] == y[key]
EXAMPLE_TRAINER_CONFIG = """
!Trainer
train_sampler: !BaseSampler
val_sampler: !BaseSampler
dataset: !TabularDataset
train: [['']]
model: !RNNEncoder
input_size: 300
rnn_type: lstm
n_layers: 2
hidden_size: 256
loss_fn: !torch.NLLLoss
metric_fn: !Accuracy
optimizer: !torch.Adam
params: []
max_steps: 2
iter_per_step: 2
"""
@pytest.fixture
def make_classes_2():
class A(Component):
def __init__(self, akw1=0, akw2=None):
self.akw1 = akw1
self.akw2 = akw2
class B(Component):
def __init__(self, bkw1=0, bkw2='', bkw3=99):
self.bkw1 = bkw1
self.bkw2 = bkw2
self.bkw3 = bkw3
class C(Component):
def __init__(self, one, two):
self.one = one
self.two = two
return A, B
class Basic(Component):
pass
class Composite(Component):
def __init__(self):
self.leaf = Basic()
class BasicStateful(Component):
def __init__(self, x):
self.x = x
self.register_attrs('x')
# self.b = Basic()
class BasicStatefulTwo(Component, torch.nn.Module):
def __init__(self, y):
super().__init__()
self.y = y
self.register_attrs('y')
class IntermediateTorch(Component, torch.nn.Module):
def __init__(self):
super().__init__()
self.leaf = Basic()
# TODO fix usage for x
class IntermediateStatefulTorch(Component, torch.nn.Module):
def __init__(self, x):
super().__init__()
self.leaf = BasicStateful(x=x)
self.linear = torch.nn.Linear(2, 2)
class IntermediateTorchOnly(torch.nn.Module):
def __init__(self, component):
super().__init__()
self.child = component
self.linear = torch.nn.Linear(2, 2)
class RootTorch(Component):
def __init__(self, x):
super().__init__()
self.model = IntermediateStatefulTorch(x=x)
# self.linear = torch.nn.Linear(2, 2)
self.optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()))
self.lr_scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, 0.01)
class ComposableTorchStateful(Component, torch.nn.Module):
def __init__(self, a: Component, b: int, c: torch.nn.Module):
super().__init__()
self.child = a
self.other_data = b
self.linear = c
self.register_attrs('other_data')
class ComposableTorchStatefulPrime(Component, torch.nn.Module):
def __init__(self, a: Component, b: int, c: torch.nn.Linear):
super().__init__()
self.child = a
self.other_data = b
self.linear = c
def _state(self, state_dict, prefix, local_metadata):
state_dict[prefix + 'other_data'] = self.other_data
return state_dict
def _load_state(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
assert prefix + 'other_data' in state_dict
self.other_data = state_dict[prefix + 'other_data']
class ComposableTorchStatefulTorchOnlyChild(Component, torch.nn.Module):
def __init__(self, a: Component, b: int, c: Component):
super().__init__()
self.child = a
self.other_data = b
self.torch_only = IntermediateTorchOnly(c)
self.register_attrs('other_data')
class ComposableContainer(Component):
def __init__(self, item: Any):
self.item = item
class Org(Component):
def __init__(self, item: Any, extra):
self.item = item
self.torch_only_extra = extra
def create_factory(class_):
def _factory(from_config):
if from_config:
config = f"!{class_.__name__} {{}}\n"
obj = yaml.load(config)()
return obj
else:
obj = class_()
return obj
return _factory
@pytest.fixture
def basic_object():
return create_factory(Basic)
@pytest.fixture
def nested_object():
return create_factory(Composite)
@pytest.fixture
def basic_object_with_state():
def _factory(from_config, x=-1):
if from_config:
config = f"!BasicStateful\nx: {x}\n"
obj = yaml.load(config)()
return obj
else:
obj = BasicStateful(x=x)
return obj
return _factory
@pytest.fixture
def alternating_nn_module_with_state():
def _factory(from_config, x=-1):
if from_config:
config = f"!RootTorch\nx: {x}\n"
obj = yaml.load(config)()
return obj
else:
obj = RootTorch(x=x)
return obj
return _factory
def schema_builder():
config = """
!Basic
"""
obj = yaml.load(config)
return obj
def complex_builder(from_config, schema=False, x=-1):
if from_config:
config = """
!ComposableTorchStateful
a: !ComposableTorchStateful
a: !ComposableTorchStateful
a: !BasicStateful
x: {}
b: 2021
c: !torch.Linear
in_features: 2
out_features: 2
b: 2022
c: !torch.Linear
in_features: 2
out_features: 2
b: 2023
c: !torch.Linear
in_features: 2
out_features: 2
"""
config.format(x)
obj = yaml.load(config)
if not schema:
obj = obj()
return obj
else:
a1 = BasicStateful(x=x)
b1 = 2021
c1 = torch.nn.Linear(2, 2)
a2 = ComposableTorchStateful(a1, b1, c1)
b2 = 2022
c2 = torch.nn.Linear(2, 2)
a3 = ComposableTorchStateful(a2, b2, c2)
b3 = 2023
c3 = torch.nn.Linear(2, 2)
obj = ComposableTorchStateful(a3, b3, c3)
return obj
def complex_builder_nontorch_root(from_config, schema=False, x=-1):
if from_config:
config = """
!ComposableContainer
item:
!ComposableTorchStatefulPrime
a: !ComposableTorchStateful
a: !ComposableTorchStateful
a: !BasicStateful
x: {}
b: 2021
c: !torch.Linear
in_features: 2
out_features: 2
b: 2022
c: !torch.Linear
in_features: 2
out_features: 2
b: 2023
c: !torch.Linear
in_features: 2
out_features: 2
"""
config.format(x)
obj = yaml.load(config)
if not schema:
obj = obj()
return obj
else:
a1 = BasicStateful(x=x)
b1 = 2021
c1 = torch.nn.Linear(2, 2)
a2 = ComposableTorchStateful(a1, b1, c1)
b2 = 2022
c2 = torch.nn.Linear(2, 2)
a3 = ComposableTorchStateful(a2, b2, c2)
b3 = 2023
c3 = torch.nn.Linear(2, 2)
item = ComposableTorchStateful(a3, b3, c3)
obj = ComposableContainer(item)
return obj
@pytest.fixture
def complex_multi_layered():
return complex_builder
@pytest.fixture
def complex_multi_layered_nontorch_root():
return complex_builder_nontorch_root
@pytest.fixture
def schema():
return schema_builder
class TestHelpers:
def test_extract_prefix(self):
_extract_prefix
class TestState:
def test_state_returns_not_None(self, basic_object):
obj = basic_object(from_config=True)
assert obj.get_state() is not None
def test_state_metadata(self, basic_object):
state = basic_object(from_config=True).get_state()
assert hasattr(state, '_metadata')
assert '' in state._metadata
assert FLAMBE_DIRECTORIES_KEY in state._metadata
assert FLAMBE_SOURCE_KEY in state._metadata['']
assert VERSION_KEY in state._metadata['']
assert state._metadata[''][FLAMBE_SOURCE_KEY] == "class Basic(Component):\n pass\n"
# assert state[FLAMBE_CONFIG_KEY] == ''
assert '' in state._metadata[FLAMBE_DIRECTORIES_KEY] and len(state._metadata[FLAMBE_DIRECTORIES_KEY]) == 1
assert state._metadata[''][VERSION_KEY] == '0.0.0'
def test_state_config(self, basic_object):
assert FLAMBE_CONFIG_KEY not in basic_object(from_config=False).get_state()._metadata['']
obj = basic_object(from_config=True)
state = obj.get_state()
assert FLAMBE_CONFIG_KEY in state._metadata['']
assert state._metadata[''][FLAMBE_CONFIG_KEY] == "!Basic {}\n"
def test_state_nested_but_empty(self, nested_object):
expected_state = {}
expected_metadata = {'': {FLAMBE_SOURCE_KEY: "class Composite(Component):\n def __init__(self):\n self.leaf = Basic()\n", VERSION_KEY: "0.0.0", FLAMBE_CLASS_KEY: 'Composite'}, 'leaf': {FLAMBE_SOURCE_KEY: 'class Basic(Component):\n pass\n', VERSION_KEY: '0.0.0', FLAMBE_CLASS_KEY: 'Basic'}, FLAMBE_DIRECTORIES_KEY: {'', 'leaf'}, KEEP_VARS_KEY: False}
obj = nested_object(from_config=False)
state = obj.get_state()
assert state == expected_state
check_mapping_equivalence(expected_metadata, state._metadata)
check_mapping_equivalence(state._metadata, expected_metadata)
def test_state_custom(self, basic_object_with_state):
obj = basic_object_with_state(from_config=True)
x = obj.x
expected_state = {'x': x}
assert obj.get_state() == expected_state
# def test_state_custom_nested(nested_object_with_state):
# obj = nested_object_with_state()
# expected_state = {}
# assert obj.get_state() == expected_state
#
# def test_state_pytorch_empty(nn_modules):
# cls, cls_torch_first = nn_modules
# obj, obj_torch_first = cls(), cls_torch_first()
# expected_state = {}
# assert obj.get_state() == expected_state
# assert obj_torch_first.get_state() == expected_state
#
# def test_state_pytorch_nested_no_modules_no_parameters(nested_nn_module):
# obj = nested_nn_module()
# expected_state = {}
# assert obj.get_state() == expected_state
#
# def test_state_pytorch_alternating_nesting(alternating_nn_module):
# obj = alternating_nn_module()
# expected_state = {}
# assert obj.get_state() == expected_state
def test_state_pytorch_alternating_nested_with_modules(self, alternating_nn_module_with_state):
obj = alternating_nn_module_with_state(from_config=True, x=1)
t1 = obj.model.linear.weight
t2 = obj.model.linear.bias
x = obj.model.leaf.x
expected_state = {'model.leaf.x': x, 'model.linear.weight': t1, 'model.linear.bias': t2}
root_source_code = dill.source.getsource(RootTorch)
intermediate_source_code = dill.source.getsource(IntermediateStatefulTorch)
leaf_source_code = dill.source.getsource(BasicStateful)
expected_metadata = OrderedDict({FLAMBE_DIRECTORIES_KEY: set(['', 'model', 'model.leaf']), 'keep_vars': False, '': {VERSION_KEY: '0.0.0', FLAMBE_CLASS_KEY: 'RootTorch', FLAMBE_SOURCE_KEY: root_source_code, FLAMBE_CONFIG_KEY: "!RootTorch\nx: 1\n"},
'model': {VERSION_KEY: '0.0.0', FLAMBE_CLASS_KEY: 'IntermediateStatefulTorch', FLAMBE_SOURCE_KEY: intermediate_source_code, 'version': 1}, # TODO add config back: FLAMBE_CONFIG_KEY: "!IntermediateStatefulTorch {}\n"
'model.leaf': {VERSION_KEY: '0.0.0', FLAMBE_CLASS_KEY: 'BasicStateful', FLAMBE_SOURCE_KEY: leaf_source_code}, # TODO add config back: FLAMBE_CONFIG_KEY: "!BasicStateful {}\n"
'model.linear': {'version': 1}})
state = obj.get_state()
check_mapping_equivalence(state._metadata, expected_metadata)
check_mapping_equivalence(expected_metadata, state._metadata)
check_mapping_equivalence(state, expected_state)
check_mapping_equivalence(expected_state, state)
class TestLoadState:
def test_load_state_empty(self):
pass
def test_load_state_nested_empty(self):
pass
def test_load_state_custom_nested(self):
pass
def test_load_state_pytorch(self):
pass
def test_load_state_pytorch_alternating_nested(self):
pass
def test_state_complex_multilayered_nontorch_root(self, complex_multi_layered_nontorch_root):
TORCH_TAG_PREFIX = "torch"
exclude = ['torch.nn.quantized', 'torch.nn.qat']
make_component(
torch.nn.Module,
TORCH_TAG_PREFIX,
only_module='torch.nn',
exclude=exclude
)
obj = complex_multi_layered_nontorch_root(from_config=True, x=1)
t1 = obj.item.child.linear.weight.data
state = obj.get_state()
new_obj = complex_multi_layered_nontorch_root(from_config=True, x=2)
new_obj.load_state(state)
t2 = new_obj.item.child.linear.weight.data
assert t1.equal(t2)
check_mapping_equivalence(new_obj.get_state(), obj.get_state())
check_mapping_equivalence(obj.get_state(), new_obj.get_state())
def test_custom_attrs_load(self, complex_multi_layered):
obj = complex_multi_layered(False)
state = obj.state_dict()
with pytest.raises(RuntimeError) as excinfo:
obj.load_state_dict(state, strict=True)
assert "Unexpected key(s)" in str(excinfo.value)
obj.load_state_dict(state, strict=False)
class TestClassSave:
def test_class_save(self):
pass
class TestClassLoad:
def test_class_load(self):
pass
class TestModuleSave:
def test_save_single_object(self, basic_object):
pass
def test_save_nested_object(self, nested_object):
pass
def test_save_pytorch_nested_alternating(self, alternating_nn_module_with_state):
pass
class TestModuleLoad:
def test_load_directory_single_file(self, basic_object):
pass
def test_load_nested_directory(self, nested_object):
pass
def test_load_pytorch_alternating(self, alternating_nn_module_with_state):
pass
class TestSerializationIntegration:
def test_state_and_load_roundtrip_single_object(self, basic_object):
old_obj = basic_object(from_config=True)
state = old_obj.get_state()
new_obj = basic_object(from_config=False)
new_obj.load_state(state, strict=False)
assert old_obj.get_state() == new_obj.get_state()
# def test_state_and_load_roundtrip_nested_object(self):
# pass
def test_state_and_load_roundtrip_pytorch_alternating(self, alternating_nn_module_with_state):
old_obj = alternating_nn_module_with_state(from_config=True, x=1)
state = old_obj.get_state()
new_obj = alternating_nn_module_with_state(from_config=False, x=2)
new_obj.load_state(state, strict=False)
old_state = old_obj.get_state()
new_state = new_obj.get_state()
check_mapping_equivalence(new_state, old_state)
check_mapping_equivalence(old_state._metadata, new_state._metadata, exclude_config=True)
# def test_class_save_and_load_roundtrip():
# pass
#
# def test_class_save_and_load_roundtrip_nested():
# pass
#
# def test_class_save_and_load_roundtrip_pytorch():
# pass
def test_save_to_file_and_load_from_file_roundtrip(self, basic_object):
old_obj = basic_object(from_config=True)
state = old_obj.get_state()
with tempfile.TemporaryDirectory() as path:
save_state_to_file(state, path)
state = load_state_from_file(path)
new_obj = basic_object(from_config=False)
new_obj.load_state(state, strict=False)
old_state = old_obj.get_state()
new_state = new_obj.get_state()
check_mapping_equivalence(new_state, old_state)
check_mapping_equivalence(old_state._metadata, new_state._metadata, exclude_config=True)
def test_save_to_file_and_load_from_file_roundtrip_pytorch(self, alternating_nn_module_with_state):
old_obj = alternating_nn_module_with_state(from_config=False, x=1)
state = old_obj.get_state()
with tempfile.TemporaryDirectory() as path:
save_state_to_file(state, path)
state = load_state_from_file(path)
new_obj = alternating_nn_module_with_state(from_config=False, x=2)
new_obj.load_state(state, strict=False)
old_state = old_obj.get_state()
new_state = new_obj.get_state()
check_mapping_equivalence(new_state, old_state)
check_mapping_equivalence(old_state._metadata, new_state._metadata, exclude_config=False)
def test_save_to_file_and_load_from_file_roundtrip_complex(self, complex_multi_layered):
TORCH_TAG_PREFIX = "torch"
exclude = ['torch.nn.quantized', 'torch.nn.qat']
make_component(
torch.nn.Module,
TORCH_TAG_PREFIX,
only_module='torch.nn',
exclude=exclude
)
old_obj = complex_multi_layered(from_config=True, x=1)
# Test that the current state is actually saved, for a
# Component-only child of torch objects
old_obj.child.child.child.x = 24
state = old_obj.get_state()
with tempfile.TemporaryDirectory() as path:
save_state_to_file(state, path)
list_files(path)
state_loaded = load_state_from_file(path)
check_mapping_equivalence(state, state_loaded)
# assert False
new_obj = complex_multi_layered(from_config=True, x=2)
new_obj.load_state(state_loaded, strict=False)
old_state = old_obj.get_state()
new_state = new_obj.get_state()
check_mapping_equivalence(new_state, old_state)
check_mapping_equivalence(old_state._metadata, new_state._metadata, exclude_config=False)
@pytest.mark.parametrize("pickle_only", [True, False])
@pytest.mark.parametrize("compress_save_file", [True, False])
def test_save_to_file_and_load_from_file_roundtrip_complex_nontorch_root(self,
complex_multi_layered_nontorch_root, pickle_only, compress_save_file):
TORCH_TAG_PREFIX = "torch"
exclude = ['torch.nn.quantized', 'torch.nn.qat']
make_component(
torch.nn.Module,
TORCH_TAG_PREFIX,
only_module='torch.nn',
exclude=exclude
)
old_obj = complex_multi_layered_nontorch_root(from_config=True, x=1)
state = old_obj.get_state()
with tempfile.TemporaryDirectory() as root_path:
path = os.path.join(root_path, 'savefile.flambe')
save_state_to_file(state, path, compress_save_file, pickle_only)
list_files(path)
if pickle_only:
path += '.pkl'
if compress_save_file:
path += '.tar.gz'
state_loaded = load_state_from_file(path)
check_mapping_equivalence(state, state_loaded)
check_mapping_equivalence(state._metadata, state_loaded._metadata)
new_obj = complex_multi_layered_nontorch_root(from_config=True, x=2)
int_state = new_obj.get_state()
new_obj.load_state(state_loaded, strict=False)
old_state = old_obj.get_state()
new_state = new_obj.get_state()
check_mapping_equivalence(new_state, old_state)
check_mapping_equivalence(old_state._metadata, new_state._metadata)
check_mapping_equivalence(int_state._metadata, state_loaded._metadata)
@pytest.mark.parametrize("pickle_only", [True, False])
@pytest.mark.parametrize("compress_save_file", [True, False])
def test_module_save_and_load_roundtrip(self, basic_object, pickle_only, compress_save_file):
old_obj = basic_object(from_config=True)
with tempfile.TemporaryDirectory() as root_path:
path = os.path.join(root_path, 'savefile.flambe')
save(old_obj, path, compress_save_file, pickle_only)
if pickle_only:
path += '.pkl'
if compress_save_file:
path += '.tar.gz'
new_obj = load(path)
old_state = old_obj.get_state()
new_state = new_obj.get_state()
check_mapping_equivalence(new_state, old_state)
check_mapping_equivalence(old_state._metadata, new_state._metadata, exclude_config=False)
@pytest.mark.parametrize("pickle_only", [True, False])
@pytest.mark.parametrize("compress_save_file", [True, False])
def test_module_save_and_load_roundtrip_pytorch(self,
alternating_nn_module_with_state,
pickle_only,
compress_save_file):
old_obj = alternating_nn_module_with_state(from_config=True, x=1)
with tempfile.TemporaryDirectory() as root_path:
path = os.path.join(root_path, 'savefile.flambe')
save(old_obj, path, compress_save_file, pickle_only)
if pickle_only:
path += '.pkl'
if compress_save_file:
path += '.tar.gz'
new_obj = load(path)
old_state = old_obj.get_state()
new_state = new_obj.get_state()
check_mapping_equivalence(new_state, old_state)
check_mapping_equivalence(old_state._metadata, new_state._metadata, exclude_config=False)
def test_module_save_and_load_roundtrip_pytorch_only_bridge(self):
a = BasicStateful.compile(x=3)
b = 100
c = BasicStatefulTwo.compile(y=0)
item = ComposableTorchStatefulTorchOnlyChild.compile(a=a, b=b, c=c)
extra = torch.nn.Linear(2, 2)
old_obj = Org.compile(item=item, extra=None)
# x for a2 should be different from instance a
a2 = BasicStateful.compile(x=4)
b2 = 101
# y for c2 should be different from instance c
c2 = BasicStatefulTwo.compile(y=1)
item2 = ComposableTorchStatefulTorchOnlyChild.compile(a=a2, b=b2, c=c2)
extra2 = torch.nn.Linear(2, 2)
new_obj = Org.compile(item=item2, extra=None)
with tempfile.TemporaryDirectory() as root_path:
path = os.path.join(root_path, 'asavefile2.flambe')
old_state = old_obj.get_state()
save_state_to_file(old_state, path)
new_state = load_state_from_file(path)
new_obj.load_state(new_state)
# save(old_obj, path)
# new_obj = load(path)
old_state_get = old_obj.get_state()
new_state_get = new_obj.get_state()
check_mapping_equivalence(new_state, old_state)
check_mapping_equivalence(old_state._metadata, new_state._metadata, exclude_config=False)
check_mapping_equivalence(new_state_get, old_state_get)
check_mapping_equivalence(old_state_get._metadata, new_state_get._metadata, exclude_config=True)
# def test_module_save_and_load_example_encoder(self):
# TORCH_TAG_PREFIX = "torch"
# make_component(torch.nn.Module, TORCH_TAG_PREFIX, only_module='torch.nn')
# make_component(torch.optim.Optimizer, TORCH_TAG_PREFIX, only_module='torch.optim')
# trainer = yaml.load(EXAMPLE_TRAINER_CONFIG)()
# with tempfile.TemporaryDirectory() as path:
# save(trainer, path)
# loaded_trainer = load(path)
# old_state = trainer.get_state()
# new_state = loaded_trainer.get_state()
# check_mapping_equivalence(new_state, old_state)
# check_mapping_equivalence(old_state._metadata, new_state._metadata, exclude_config=False)
def test_module_save_and_load_single_instance_appears_twice(self, make_classes_2):
txt = """
!C
one: !A
akw2: &theb !B
bkw2: test
bkw1: 1
akw1: 8
two: !A
akw1: 8
# Comment Here
akw2: *theb
"""
c = yaml.load(txt)()
c.one.akw2.bkw1 = 6
assert c.one.akw2 is c.two.akw2
assert c.one.akw2.bkw1 == c.two.akw2.bkw1
with tempfile.TemporaryDirectory() as path:
save(c, path)
state = load_state_from_file(path)
loaded_c = load(path)
assert loaded_c.one.akw2 is loaded_c.two.akw2
assert loaded_c.one.akw2.bkw1 == loaded_c.two.akw2.bkw1
class TestSerializationExtensions:
EXTENSIONS = {
"ext1": "my_extension_1",
"ext2": "my_extension_2",
"ext3": "my_extension_3",
}
@pytest.mark.parametrize("pickle_only", [True, False])
@pytest.mark.parametrize("compress_save_file", [True, False])
@mock.patch('flambe.compile.serialization.is_installed_module')
@mock.patch('flambe.compile.serialization.import_modules')
@mock.patch('flambe.compile.component.Schema.add_extensions_metadata')
def test_save_to_file_and_load_from_file_with_extensions(
self, mock_add_extensions,
mock_import_module, mock_installed_module,
compress_save_file, pickle_only, schema):
"""Test that extensions are saved to the output config.yaml
and they are also added when loading back the object."""
mock_installed_module.return_value = True
schema_obj = schema()
# Add extensions manually because if we use add_extensions_metadata
# then no extensions will be added as the schema doesn't container_folder
# any prefix.
schema_obj._extensions = TestSerializationExtensions.EXTENSIONS
obj = schema_obj()
state = obj.get_state()
with tempfile.TemporaryDirectory() as root_path:
path = os.path.join(root_path, 'savefile.flambe')
save_state_to_file(state, path, compress_save_file, pickle_only)
list_files(path)
if pickle_only:
path += '.pkl'
if compress_save_file:
path += '.tar.gz'
state_loaded = load_state_from_file(path)
check_mapping_equivalence(state, state_loaded)
check_mapping_equivalence(state._metadata, state_loaded._metadata)
_ = Basic.load_from_path(path)
mock_add_extensions.assert_called_once_with(TestSerializationExtensions.EXTENSIONS)
def test_add_extensions_metadata(self, schema):
"""Test that add_extensions_metadata doesn't add extensions that are not used"""
schema_obj = schema()
assert schema_obj._extensions == {}
schema_obj.add_extensions_metadata(TestSerializationExtensions.EXTENSIONS)
assert schema_obj._extensions == {}
def test_add_extensions_metadata_2(self):
"""Test that add_extensions_metadata doesn't add extensions that are not used.
In this case we will use a config containing torch, but we will make_component
on torch so that it can be compiled. After that, we add_extensions_metadata with
torch, which is a valid extensions for the config (redundant, but valid).
"""
TORCH_TAG_PREFIX = "torch"
exclude = ['torch.nn.quantized', 'torch.nn.qat']
make_component(
torch.nn.Module,
TORCH_TAG_PREFIX,
only_module='torch.nn',
exclude=exclude
)
config = """
!torch.Linear
in_features: 2
out_features: 2
"""
schema = yaml.load(config)
schema.add_extensions_metadata({"torch": "torch"})
assert schema._extensions == {"torch": "torch"}
mixed_ext = TestSerializationExtensions.EXTENSIONS.copy()
mixed_ext.update({"torch": "torch"})
schema.add_extensions_metadata(mixed_ext)
assert schema._extensions == {"torch": "torch"}
def test_add_extensions_metadata_3(self, complex_multi_layered_nontorch_root):
"""Test that add_extensions_metadata doesn't add extensions that are not used
In this case we will use a config containing torch, but we will make_component
on torch so that it can be compiled. After that, we add_extensions_metadata with
torch, which is a valid extensions for the config (redundant, but valid).
"""
TORCH_TAG_PREFIX = "torch"
exclude = ['torch.nn.quantized', 'torch.nn.qat']
make_component(
torch.nn.Module,
TORCH_TAG_PREFIX,
only_module='torch.nn',
exclude=exclude
)
schema = complex_multi_layered_nontorch_root(from_config=True, schema=True)
schema.add_extensions_metadata({"torch": "torch"})
# This method asserts recursively that torch is added to extensions when
# there is a subcomponent that uses torch.
# It returns if at least one component with torch was found, that should
# always happen based on the complex_multi_layered_nontorch_root.
def helper(data):
found = False
if isinstance(data, Schema):
if data.component_subclass.__module__.startswith("torch."):
found = True
assert data._extensions == {"torch": "torch"}
for val in data.keywords.values():
f = helper(val)
if f:
found = f
elif isinstance(data, Mapping):
for val in data.values():
f = helper(val)
if f:
found = f
return found
assert helper(schema)
| 35.163636
| 370
| 0.650885
|
ffc6f683fcf9781b23e01bc925af097eac266215
| 546
|
py
|
Python
|
dataset.py
|
aurelienserre/samestats
|
91a4471e8e85cff198b206ec9a36883811215596
|
[
"BSD-3-Clause"
] | null | null | null |
dataset.py
|
aurelienserre/samestats
|
91a4471e8e85cff198b206ec9a36883811215596
|
[
"BSD-3-Clause"
] | null | null | null |
dataset.py
|
aurelienserre/samestats
|
91a4471e8e85cff198b206ec9a36883811215596
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class Dataset(nn.Module):
"""Module for representing the points of the data set."""
def __init__(self, points):
"""Initializes the Dataset module with the given points
:points: (np.array or torch.Tensor) points of the dataset
dimensions: (n_samples, n_coords_of_points)
"""
super(Dataset, self).__init__()
self.points = Parameter(torch.Tensor(points))
def forward(self):
return self.points
| 23.73913
| 65
| 0.663004
|
6b94dc083177a8a7307dfad8df32da9d04c2150d
| 4,610
|
py
|
Python
|
02_model_training.py
|
tum-fml/dofos
|
85556764353c15c5b7ae3088a135de8e96d1021e
|
[
"CC0-1.0"
] | null | null | null |
02_model_training.py
|
tum-fml/dofos
|
85556764353c15c5b7ae3088a135de8e96d1021e
|
[
"CC0-1.0"
] | null | null | null |
02_model_training.py
|
tum-fml/dofos
|
85556764353c15c5b7ae3088a135de8e96d1021e
|
[
"CC0-1.0"
] | null | null | null |
# ######################################################################################################################
#
# Skript zum antrainieren von Mustererkennungsmodellen.
#
# Datenbank mit Mustern zu den einzelnen Betriebsvorgängen muss vorhanden sein.
#
# ######################################################################################################################
import os
from collections import Counter
import classes as c
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import joblib
# ######################################################################################################################
condition = {1: 'aufschlagen_zinke',
2: 'schleifen',
3: 'heben',
4: 'senken',
5: 'aufschlagen_hubgeruest',
6: 'fahren',
7: 'stehen',
0: 'andere'}
# ######################################################################################################################
# # Set up Data
# # Get all Data together and attache label
# # Filter (median) and clean data from NaN and Inf
# # Scale (min-max) for later training of knn etc.
# Get folder names in database
folder = [name for name in os.listdir("Datenbank_Betriebszustände")]
dataframe_list = [] # All pattern in one list. Every list object is an pandas dataframe.
labels = [] # List of labels for every pattern. Predefined.
# Set up given data with labels
for i in range(len(folder)):
label = folder[i][0:2]
operating_condition = folder[i][3:]
path = "Datenbank_Betriebszustände/" + folder[i]
list_dataframes = c.data(path=path, label=label, operating_condition=operating_condition)
list_dataframes.list(dataframe_list=dataframe_list, labels=labels)
# median_window_size = 9 # Window size for median filter.
# Median filter and cleaning data (Nan, Inf, etc.)
for i in range(len(dataframe_list)):
# dataframe_list[i] = c.med_filter(dataframe_list[i], median_window_size)
dataframe_list[i] = c.clean_data(dataframe_list[i])
# print(dataframe_list[i][~dataframe_list[i].applymap(np.isreal).all(1)])
# print(i)
for i in range(len(dataframe_list)):
dataframe_list[i] = c.normalize(dataframe_list[i])
print()
print("Dataframes in List: " + str(len(dataframe_list)))
print("Labels in List: " + str(len(labels)))
# Comment out if models to differentiate all states is desired
labels = [x if x == 1 or x == 2 else 0.0 for x in labels ]
# ######################################################################################################################
# # Features Extraktion for each pattern
# 0: Peak-to-Peak, 1: euclidean, 2: FFT, 3: Min, 4: Max, 5: Varianz, 6: Standard Deviation, 7: Skewness, 8: Kurtosis
# Select the desired Features from the top selection and put the numbers into the array.
features = [0,4, 5]
feature_matrix = np.zeros((len(dataframe_list), 3*(len(features))))
extraction = c.features(len(features), feature_matrix, 2)
l = 0
if 0 in features:
for n in range(len(dataframe_list)):
extraction.peak_to_peak(dataframe_list[n], n, l)
l += 3
if 1 in features:
for n in range(len(dataframe_list)):
extraction.euclidean_Dist(dataframe_list[n], n, l)
l += 3
if 2 in features:
for n in range(len(dataframe_list)):
extraction.fft(dataframe_list[n], n, l)
l += 3
if 3 in features:
for n in range(len(dataframe_list)):
extraction.min(dataframe_list[n], n, l)
l += 3
if 4 in features:
for n in range(len(dataframe_list)):
extraction.max(dataframe_list[n], n, l)
l += 3
if 5 in features:
for n in range(len(dataframe_list)):
extraction.var(dataframe_list[n], n, l)
l += 3
if 6 in features:
for n in range(len(dataframe_list)):
extraction.std(dataframe_list[n], n, l)
l += 3
if 7 in features:
for n in range(len(dataframe_list)):
extraction.skewness(dataframe_list[n], n, l)
l += 3
if 8 in features:
for n in range(len(dataframe_list)):
extraction.kurtosis(dataframe_list[n], n, l)
l += 3
# ######################################################################################################################
# Model Training
model = c.training(features=feature_matrix, labels=labels)
knn = model.knn(10)
svm = model.svm()
dt = model.dt()
rf = model.rf()
# ######################################################################################################################
# # Model Saving
# # Comment out to save models
# save_model = c.save_model(knn, svm, dt, rf)
# save_model.save()
| 33.405797
| 120
| 0.55141
|
5d535200e7549ca3e8a85332ceb9b4a4f83e874e
| 2,777
|
py
|
Python
|
projects/PartialReID/partialreid/dsr_evaluation.py
|
lb-y/fast-reid
|
ec500abc94bf69774f3f6c2e4848edacb0d6761b
|
[
"Apache-2.0"
] | 1
|
2021-05-31T01:34:36.000Z
|
2021-05-31T01:34:36.000Z
|
projects/PartialReID/partialreid/dsr_evaluation.py
|
lb-y/fast-reid
|
ec500abc94bf69774f3f6c2e4848edacb0d6761b
|
[
"Apache-2.0"
] | null | null | null |
projects/PartialReID/partialreid/dsr_evaluation.py
|
lb-y/fast-reid
|
ec500abc94bf69774f3f6c2e4848edacb0d6761b
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
"""
@author: liaoxingyu
@contact: sherlockliao01@gmail.com
"""
import copy
import logging
from collections import OrderedDict
import numpy as np
import torch
import torch.nn.functional as F
from fastreid.evaluation.evaluator import DatasetEvaluator
from fastreid.evaluation.rank import evaluate_rank
from fastreid.evaluation.roc import evaluate_roc
from .dsr_distance import compute_dsr_dist
logger = logging.getLogger('fastreid.partialreid.dsr_evaluation')
class DsrEvaluator(DatasetEvaluator):
def __init__(self, cfg, num_query, output_dir=None):
self.cfg = cfg
self._num_query = num_query
self._output_dir = output_dir
self.features = []
self.spatial_features = []
self.scores = []
self.pids = []
self.camids = []
def reset(self):
self.features = []
self.spatial_features = []
self.scores = []
self.pids = []
self.camids = []
def process(self, outputs):
self.features.append(F.normalize(outputs[0][0]).cpu())
outputs1 = F.normalize(outputs[0][1].data).cpu().numpy()
self.spatial_features.append(outputs1)
self.scores.append(outputs[0][2])
self.pids.extend(outputs[1].cpu().numpy())
self.camids.extend(outputs[2].cpu().numpy())
def evaluate(self):
features = torch.cat(self.features, dim=0)
spatial_features = np.vstack(self.spatial_features)
scores = torch.cat(self.scores, dim=0)
# query feature, person ids and camera ids
query_features = features[:self._num_query]
query_pids = np.asarray(self.pids[:self._num_query])
query_camids = np.asarray(self.camids[:self._num_query])
# gallery features, person ids and camera ids
gallery_features = features[self._num_query:]
gallery_pids = np.asarray(self.pids[self._num_query:])
gallery_camids = np.asarray(self.camids[self._num_query:])
dist = 1 - torch.mm(query_features, gallery_features.t()).numpy()
logger.info("Testing without DSR setting")
self._results = OrderedDict()
if self.cfg.TEST.DSR.ENABLED:
topk = self.cfg.TEST.DSR.TOPK
dist = compute_dsr_dist(spatial_features[:self._num_query], spatial_features[self._num_query:], dist,
scores[:self._num_query], topk)
logger.info("Testing with DSR setting")
cmc, all_AP, all_INP = evaluate_rank(dist, query_pids, gallery_pids, query_camids, gallery_camids)
mAP = np.mean(all_AP)
mINP = np.mean(all_INP)
self._results['R-1'] = cmc[0]
self._results['mAP'] = mAP
self._results['mINP'] = mINP
return copy.deepcopy(self._results)
| 33.865854
| 113
| 0.651422
|
1e63db39ad3c237fb1d06e7c50e04023a8e528a3
| 2,386
|
py
|
Python
|
contracts/tests/test_fee_bank_contract.py
|
agatsoh/shutter
|
d8b99f8c3e9863838eeef935ffdd90dc8e9b1d5b
|
[
"MIT"
] | 13
|
2021-07-23T21:03:57.000Z
|
2022-03-29T19:38:07.000Z
|
contracts/tests/test_fee_bank_contract.py
|
derekbar90/shutter
|
27ff92ca3e7d80213d01821886f6bc8a10f81239
|
[
"MIT"
] | 22
|
2021-04-12T09:02:12.000Z
|
2021-06-16T06:26:37.000Z
|
contracts/tests/test_fee_bank_contract.py
|
derekbar90/shutter
|
27ff92ca3e7d80213d01821886f6bc8a10f81239
|
[
"MIT"
] | 3
|
2021-08-06T20:17:59.000Z
|
2021-12-08T20:04:13.000Z
|
from typing import Any
from typing import Sequence
import brownie
import pytest
from brownie.network.account import Account
@pytest.fixture
def depositor(accounts: Sequence[Account]) -> Account:
return accounts[1]
@pytest.fixture
def receiver(accounts: Sequence[Account]) -> Account:
return accounts[2]
def test_deposit(fee_bank_contract: Any, depositor: Account, receiver: Account) -> None:
assert fee_bank_contract.deposits(receiver) == 0
amounts = [100, 200, 1, 500]
for i, amount in enumerate(amounts):
tx = fee_bank_contract.deposit(receiver, {"from": depositor, "value": amount})
assert fee_bank_contract.deposits(receiver) == sum(amounts[: i + 1])
assert len(tx.events) == 1
assert tx.events[0] == {
"depositor": depositor,
"receiver": receiver,
"amount": amount,
"totalAmount": sum(amounts[: i + 1]),
}
def test_withdraw(
fee_bank_contract: Any, depositor: Account, receiver: Account, accounts: Sequence[Account]
) -> None:
fee_bank_contract.deposit(receiver, {"from": depositor, "value": 1000})
with brownie.reverts():
fee_bank_contract.withdraw({"from": depositor})
with brownie.reverts():
fee_bank_contract.withdraw(receiver, 1001, {"from": receiver})
different_receiver = accounts[-1]
assert different_receiver != receiver
receiver_pre_balance = receiver.balance()
different_receiver_pre_balance = different_receiver.balance()
tx = fee_bank_contract.withdraw(different_receiver, 100, {"from": receiver})
assert fee_bank_contract.deposits(receiver) == 900
assert different_receiver.balance() == different_receiver_pre_balance + 100
assert receiver.balance() == receiver_pre_balance
assert len(tx.events) == 1
assert tx.events[0] == {
"sender": receiver,
"receiver": different_receiver,
"amount": 100,
"totalAmount": 900,
}
tx = fee_bank_contract.withdraw({"from": receiver})
assert fee_bank_contract.deposits(receiver) == 0
assert receiver.balance() == receiver_pre_balance + 900
assert different_receiver.balance() == different_receiver_pre_balance + 100
assert len(tx.events) == 1
assert tx.events[0] == {
"sender": receiver,
"receiver": receiver,
"amount": 900,
"totalAmount": 0,
}
| 32.684932
| 94
| 0.672674
|
611517a6170b22927e5da06a3c9453942c17621b
| 545
|
py
|
Python
|
filekeep/xml.py
|
goncalomb/filekeep
|
653de1b8c859cb3ec2e7203259125989f3618f56
|
[
"MIT"
] | null | null | null |
filekeep/xml.py
|
goncalomb/filekeep
|
653de1b8c859cb3ec2e7203259125989f3618f56
|
[
"MIT"
] | null | null | null |
filekeep/xml.py
|
goncalomb/filekeep
|
653de1b8c859cb3ec2e7203259125989f3618f56
|
[
"MIT"
] | null | null | null |
import xml.etree.ElementTree as ET
import xml.dom.minidom as minidom
def read(path):
root = ET.parse(path).getroot()
for elem in root.iter('*'):
if elem.text != None and elem.text.strip() == "":
elem.text = None
if elem.tail != None and elem.tail.strip() == "":
elem.tail = None
return root
def write(path, root):
xml = ET.tostring(root, encoding="UTF-8")
xml = minidom.parseString(xml).toprettyxml(indent=" ", encoding="UTF-8")
with open(path, "wb") as f:
f.write(xml)
| 30.277778
| 77
| 0.59633
|
f3d9a9ad120b2fd74a5172a6279ac0c9ac56bea9
| 5,230
|
py
|
Python
|
python/100daysofpython/r1d8_data_structures.py
|
juancarlosqr/datascience
|
2e4d78365b059a3e501e988bee53970ac0d718fc
|
[
"MIT"
] | null | null | null |
python/100daysofpython/r1d8_data_structures.py
|
juancarlosqr/datascience
|
2e4d78365b059a3e501e988bee53970ac0d718fc
|
[
"MIT"
] | null | null | null |
python/100daysofpython/r1d8_data_structures.py
|
juancarlosqr/datascience
|
2e4d78365b059a3e501e988bee53970ac0d718fc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
def run_list():
''''''
print('\nlists')
# shopping list
shop = ['apple', 'mango', 'carrot', 'banana']
print(1, 'i have', len(shop), 'items')
print(2, 'the items are', end=' ')
for item in shop:
print(item, end=' ')
print()
print(3, 'adding rice')
shop.append('rice')
print(4, 'shop list', shop)
shop.sort()
print(5, 'sort', shop)
print(6, 'first item is', shop[0])
olditem = shop[0]
del shop[0]
print(7, 'i bought', olditem)
print(8, 'shop list now', shop)
print(9, shop[::2]) # third argument on slicing is the step
def run_tuple():
''''''
zoo = ('python', 'elephant', 'penguin')
print('\ntuples')
print(1, 'numbers of animals', len(zoo))
print(2, 'zoo', zoo)
# parentheses not required but are a good idea
new_zoo = 'monkey', 'camel', zoo
print(3, 'cages in the new zoo', len(new_zoo))
print(4, 'new zoo', new_zoo)
print(5, 'animals from old zoo', new_zoo[2])
print(6, 'last animal from old zoo is', new_zoo[2][2])
print(7, 'number of animals in new zoo', len(new_zoo)-1+len(new_zoo[2]))
not_a_tuple = (2)
print(8, type(not_a_tuple))
a_tuple = (2,)
print(9, type(a_tuple), len(a_tuple))
def run_dict():
''''''
print('\ndictionaries')
# ab is short for address book
ab = {
'swaroop': 'swaroop@swaroop.com',
'larry': 'larry@swaroop.com',
'matsumoto': 'matsumoto@swaroop.com',
'spammer': 'spammer@swaroop.com',
}
print(1, 'address book', ab)
print(2, 'swaroop email is', ab['swaroop'])
del ab['spammer']
print(3, 'address book', ab)
print(4, f'there are {len(ab)} contacts in the address book')
for name, email in ab.items():
print(f'{name}\'s email is {email}')
ab['guido'] = 'guido@python.org'
if 'guido' in ab:
print(5, 'we have guido!')
def run_set():
''''''
print('\nsets')
bri = set(['brazil', 'russia', 'india'])
print(1, 'bri', bri)
print(2, 'us in bri:', 'us' in bri)
print(3, 'india in bri:', 'india' in bri)
bric = bri.copy()
bric.add('china')
print(4, 'bric', bric)
print(5, 'bric is superset of bri?', bric.issuperset(bri))
print(6, 'bri is subset of bric', bri.issubset(bric))
bri.remove('russia')
print(7, bri & bric) # or bri.intersect
def run_reference():
''''''
print('\nreferences')
print(1, 'assignment')
shop1 = ['apple', 'mango', 'carrot']
# shop2 is just another name pointing to the same object
shop2 = shop1
print(2, 'shop1', id(shop1), shop1)
print(3, 'shop2', id(shop2), shop2)
del shop1[0]
print(4, 'first item removed')
print(5, 'shop1', id(shop1), shop1)
print(6, 'shop2', id(shop2), shop2)
# make a copy by doing a full slice
shop3 = shop1[:]
print(7, 'shop1', id(shop1), shop1)
print(8, 'shop3', id(shop3), shop3)
del shop1[0]
print(9, 'first item removed')
print(10, 'shop1', id(shop1), shop1)
print(11, 'shop3', id(shop3), shop3)
def run_string():
''''''
print('\nstrings')
name = 'swaroop'
print(1, 'find', name.find('oop')) # index 4
delimiter = '_*_'
bric = ['brazil', 'russia', 'india', 'china']
print(2, delimiter.join(bric))
if __name__ == '__main__':
run_list()
run_tuple()
run_dict()
run_set()
run_reference()
run_string()
'''
output:
lists
1 i have 4 items
2 the items are apple mango carrot banana
3 adding rice
4 shop list ['apple', 'mango', 'carrot', 'banana', 'rice']
5 sort ['apple', 'banana', 'carrot', 'mango', 'rice']
6 first item is apple
7 i bought apple
8 shop list now ['banana', 'carrot', 'mango', 'rice']
9 ['banana', 'mango']
tuples
1 numbers of animals 3
2 zoo ('python', 'elephant', 'penguin')
3 cages in the new zoo 3
4 new zoo ('monkey', 'camel', ('python', 'elephant', 'penguin'))
5 animals from old zoo ('python', 'elephant', 'penguin')
6 last animal from old zoo is penguin
7 number of animals in new zoo 5
8 <class 'int'>
9 <class 'tuple'> 1
dictionaries
1 address book {'swaroop': 'swaroop@swaroop.com', 'larry': 'larry@swaroop.com', 'matsumoto': 'matsumoto@swaroop.com', 'spammer': 'spammer@swaroop.com'}
2 swaroop email is swaroop@swaroop.com
3 address book {'swaroop': 'swaroop@swaroop.com', 'larry': 'larry@swaroop.com', 'matsumoto': 'matsumoto@swaroop.com'}
4 there are 3 contacts in the address book
swaroop's email is swaroop@swaroop.com
larry's email is larry@swaroop.com
matsumoto's email is matsumoto@swaroop.com
5 we have guido!
sets
1 bri {'india', 'russia', 'brazil'}
2 us in bri: False
3 india in bri: True
4 bric {'china', 'india', 'russia', 'brazil'}
5 bric is superset of bri? True
6 bri is subset of bric True
7 {'india', 'brazil'}
references
1 assignment
2 shop1 4392613192 ['apple', 'mango', 'carrot']
3 shop2 4392613192 ['apple', 'mango', 'carrot']
4 first item removed
5 shop1 4392613192 ['mango', 'carrot']
6 shop2 4392613192 ['mango', 'carrot']
7 shop1 4392613192 ['mango', 'carrot']
8 shop3 4392837192 ['mango', 'carrot']
9 first item removed
10 shop1 4392613192 ['carrot']
11 shop3 4392837192 ['mango', 'carrot']
strings
1 find 4
2 brazil_*_russia_*_india_*_china
'''
| 29.217877
| 151
| 0.616635
|
d9c59d46731da16d7c7e03fb2e55c5a23c3cc848
| 1,929
|
py
|
Python
|
test/test_cnnf_host_audit.py
|
hi-artem/twistlock-py
|
9888e905f5b9d3cc00f9b84244588c0992f8e4f4
|
[
"RSA-MD"
] | null | null | null |
test/test_cnnf_host_audit.py
|
hi-artem/twistlock-py
|
9888e905f5b9d3cc00f9b84244588c0992f8e4f4
|
[
"RSA-MD"
] | null | null | null |
test/test_cnnf_host_audit.py
|
hi-artem/twistlock-py
|
9888e905f5b9d3cc00f9b84244588c0992f8e4f4
|
[
"RSA-MD"
] | null | null | null |
# coding: utf-8
"""
Prisma Cloud Compute API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 21.04.439
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.cnnf_host_audit import CnnfHostAudit # noqa: E501
from openapi_client.rest import ApiException
class TestCnnfHostAudit(unittest.TestCase):
"""CnnfHostAudit unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test CnnfHostAudit
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.cnnf_host_audit.CnnfHostAudit() # noqa: E501
if include_optional :
return CnnfHostAudit(
account_id = '',
block = True,
cluster = '',
count = 56,
dst_hostname = '',
dst_port = 56,
dst_subnet = '',
msg = '',
rule_id = 56,
src_hash = 56,
src_hostname = '',
src_subnet = '',
time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
type = '[\"unexpectedConnection\"]'
)
else :
return CnnfHostAudit(
)
def testCnnfHostAudit(self):
"""Test CnnfHostAudit"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 29.676923
| 124
| 0.589943
|
24e942bfac7339e544024cdb7a182f18b91ff4b0
| 4,133
|
py
|
Python
|
configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/crowdpose/res101_crowdpose_320x256.py
|
nightfuryyy/mmpose
|
910d9e31dd9d46e3329be1b7567e6309d70ab64c
|
[
"Apache-2.0"
] | 1,775
|
2020-07-10T01:20:01.000Z
|
2022-03-31T16:31:50.000Z
|
configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/crowdpose/res101_crowdpose_320x256.py
|
KHB1698/mmpose
|
93c3a742c540dfb4ca515ad545cef705a07d90b4
|
[
"Apache-2.0"
] | 1,021
|
2020-07-11T11:40:24.000Z
|
2022-03-31T14:32:26.000Z
|
configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/crowdpose/res101_crowdpose_320x256.py
|
KHB1698/mmpose
|
93c3a742c540dfb4ca515ad545cef705a07d90b4
|
[
"Apache-2.0"
] | 477
|
2020-07-11T11:27:51.000Z
|
2022-03-31T09:42:25.000Z
|
_base_ = ['../../../../_base_/datasets/crowdpose.py']
log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=14,
dataset_joints=14,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
],
inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13])
# model settings
model = dict(
type='TopDown',
pretrained='torchvision://resnet101',
backbone=dict(type='ResNet', depth=101),
keypoint_head=dict(
type='TopdownHeatmapSimpleHead',
in_channels=2048,
out_channels=channel_cfg['num_output_channels'],
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[256, 320],
heatmap_size=[64, 80],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
crowd_matching=False,
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='data/crowdpose/annotations/'
'det_for_crowd_test_0.1_0.5.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=6,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/crowdpose'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=32),
test_dataloader=dict(samples_per_gpu=32),
train=dict(
type='TopDownCrowdPoseDataset',
ann_file=f'{data_root}/annotations/mmpose_crowdpose_trainval.json',
img_prefix=f'{data_root}/images/',
data_cfg=data_cfg,
pipeline=train_pipeline,
dataset_info={{_base_.dataset_info}}),
val=dict(
type='TopDownCrowdPoseDataset',
ann_file=f'{data_root}/annotations/mmpose_crowdpose_test.json',
img_prefix=f'{data_root}/images/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info={{_base_.dataset_info}}),
test=dict(
type='TopDownCrowdPoseDataset',
ann_file=f'{data_root}/annotations/mmpose_crowdpose_test.json',
img_prefix=f'{data_root}/images/',
data_cfg=data_cfg,
pipeline=test_pipeline,
dataset_info={{_base_.dataset_info}}))
| 28.701389
| 79
| 0.633922
|
19860ba37fc7699d145a2fc92d3a627b018c790e
| 397
|
py
|
Python
|
disposable_emails/contrib/django/__init__.py
|
maxmalysh/disposable-emails
|
f4772ec3badb8d6df2d64d265fa9c3b2d982bb09
|
[
"MIT"
] | 3
|
2017-05-05T02:00:13.000Z
|
2019-12-06T15:22:58.000Z
|
disposable_emails/contrib/django/__init__.py
|
maxmalysh/disposable-emails
|
f4772ec3badb8d6df2d64d265fa9c3b2d982bb09
|
[
"MIT"
] | null | null | null |
disposable_emails/contrib/django/__init__.py
|
maxmalysh/disposable-emails
|
f4772ec3badb8d6df2d64d265fa9c3b2d982bb09
|
[
"MIT"
] | null | null | null |
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from disposable_emails import is_disposable_email, extract_domain
def disposable_validator(value):
if is_disposable_email(value):
raise ValidationError(
_('%(value)s is a blacklisted email provider'),
params={'value': extract_domain(value)},
)
| 33.083333
| 65
| 0.732997
|
23c9f09cdde78e48b1c8bd19b4d107ee1574c800
| 31
|
py
|
Python
|
src/__init__.py
|
mancaf/planetarium
|
507fe252778d3c5143b522193cf25c7fdb2412aa
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
mancaf/planetarium
|
507fe252778d3c5143b522193cf25c7fdb2412aa
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
mancaf/planetarium
|
507fe252778d3c5143b522193cf25c7fdb2412aa
|
[
"MIT"
] | null | null | null |
from . import core, gui, parse
| 15.5
| 30
| 0.709677
|
b22a10d1d99899bbb8ec94d5103377a392c7849a
| 1,824
|
py
|
Python
|
core/gui/elements.py
|
ranjian0/triggered_python
|
bcd95da3e82b384f9e1274c90af000986d2bad18
|
[
"MIT"
] | null | null | null |
core/gui/elements.py
|
ranjian0/triggered_python
|
bcd95da3e82b384f9e1274c90af000986d2bad18
|
[
"MIT"
] | null | null | null |
core/gui/elements.py
|
ranjian0/triggered_python
|
bcd95da3e82b384f9e1274c90af000986d2bad18
|
[
"MIT"
] | null | null | null |
import pyglet as pg
class LabelElement(pg.text.Label):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.anchor_y = "top"
self.anchor_x = "left"
def update_batch(self, batch, group):
self.batch = batch
layout = pg.text.layout
self.top_group = layout.TextLayoutGroup(group)
self.background_group = pg.graphics.OrderedGroup(0, self.top_group)
self.foreground_group = layout.TextLayoutForegroundGroup(
1, self.top_group
)
self.foreground_decoration_group = layout.TextLayoutForegroundDecorationGroup(
2, self.top_group
)
self._update()
class InputElement(object):
def __init__(self, text):
self.document = pg.text.document.UnformattedDocument(text)
self.layout = pg.text.layout.IncrementalTextLayout(
self.document, 1, 1, multiline=False
)
self.caret = pg.text.caret.Caret(self.layout)
def _get_text(self):
return self.document.text
def _set_text(self, text):
self.document.text = text
text = property(_get_text, _set_text)
def update_batch(self, batch, group):
self.caret.delete()
self.layout.delete()
# workaround for pyglet issue 408
self.layout.batch = None
if self.layout._document:
self.layout._document.remove_handlers(self.layout)
self.layout._document = None
# end workaround
self.layout = pg.text.layout.IncrementalTextLayout(
self.document,
self.layout.width,
self.layout.height,
multiline=False,
batch=batch,
group=group,
)
self.caret = pg.text.caret.Caret(self.layout)
self.caret.visible = False
| 29.419355
| 86
| 0.618969
|
4af8331f007e6be5a1475039f2fc2f615a90df90
| 5,527
|
py
|
Python
|
tempest/services/compute/json/images_client.py
|
queria/my-tempest
|
a9cdee0201bb956c7502fd372dab467b056ba67f
|
[
"Apache-2.0"
] | null | null | null |
tempest/services/compute/json/images_client.py
|
queria/my-tempest
|
a9cdee0201bb956c7502fd372dab467b056ba67f
|
[
"Apache-2.0"
] | null | null | null |
tempest/services/compute/json/images_client.py
|
queria/my-tempest
|
a9cdee0201bb956c7502fd372dab467b056ba67f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import urllib
from tempest.api_schema.response.compute.v2 import images as schema
from tempest.common import rest_client
from tempest.common import waiters
from tempest import config
from tempest import exceptions
CONF = config.CONF
class ImagesClientJSON(rest_client.RestClient):
def __init__(self, auth_provider):
super(ImagesClientJSON, self).__init__(auth_provider)
self.service = CONF.compute.catalog_type
self.build_interval = CONF.compute.build_interval
self.build_timeout = CONF.compute.build_timeout
def create_image(self, server_id, name, meta=None):
"""Creates an image of the original server."""
post_body = {
'createImage': {
'name': name,
}
}
if meta is not None:
post_body['createImage']['metadata'] = meta
post_body = json.dumps(post_body)
resp, body = self.post('servers/%s/action' % str(server_id),
post_body)
self.validate_response(schema.create_image, resp, body)
return resp, body
def list_images(self, params=None):
"""Returns a list of all images filtered by any parameters."""
url = 'images'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.list_images, resp, body)
return resp, body['images']
def list_images_with_detail(self, params=None):
"""Returns a detailed list of images filtered by any parameters."""
url = 'images/detail'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.list_images_details, resp, body)
return resp, body['images']
def get_image(self, image_id):
"""Returns the details of a single image."""
resp, body = self.get("images/%s" % str(image_id))
self.expected_success(200, resp.status)
body = json.loads(body)
self.validate_response(schema.get_image, resp, body)
return resp, body['image']
def delete_image(self, image_id):
"""Deletes the provided image."""
resp, body = self.delete("images/%s" % str(image_id))
self.validate_response(schema.delete, resp, body)
return resp, body
def wait_for_image_status(self, image_id, status):
"""Waits for an image to reach a given status."""
waiters.wait_for_image_status(self, image_id, status)
def list_image_metadata(self, image_id):
"""Lists all metadata items for an image."""
resp, body = self.get("images/%s/metadata" % str(image_id))
body = json.loads(body)
self.validate_response(schema.image_metadata, resp, body)
return resp, body['metadata']
def set_image_metadata(self, image_id, meta):
"""Sets the metadata for an image."""
post_body = json.dumps({'metadata': meta})
resp, body = self.put('images/%s/metadata' % str(image_id), post_body)
body = json.loads(body)
self.validate_response(schema.image_metadata, resp, body)
return resp, body['metadata']
def update_image_metadata(self, image_id, meta):
"""Updates the metadata for an image."""
post_body = json.dumps({'metadata': meta})
resp, body = self.post('images/%s/metadata' % str(image_id), post_body)
body = json.loads(body)
self.validate_response(schema.image_metadata, resp, body)
return resp, body['metadata']
def get_image_metadata_item(self, image_id, key):
"""Returns the value for a specific image metadata key."""
resp, body = self.get("images/%s/metadata/%s" % (str(image_id), key))
body = json.loads(body)
self.validate_response(schema.image_meta_item, resp, body)
return resp, body['meta']
def set_image_metadata_item(self, image_id, key, meta):
"""Sets the value for a specific image metadata key."""
post_body = json.dumps({'meta': meta})
resp, body = self.put('images/%s/metadata/%s' % (str(image_id), key),
post_body)
body = json.loads(body)
self.validate_response(schema.image_meta_item, resp, body)
return resp, body['meta']
def delete_image_metadata_item(self, image_id, key):
"""Deletes a single image metadata key/value pair."""
resp, body = self.delete("images/%s/metadata/%s" %
(str(image_id), key))
self.validate_response(schema.delete, resp, body)
return resp, body
def is_resource_deleted(self, id):
try:
self.get_image(id)
except exceptions.NotFound:
return True
return False
| 37.856164
| 79
| 0.637778
|
7a017ef2a6640c77f846412cef08139ee5f1db44
| 672
|
py
|
Python
|
examples/cursor_v1_1.py
|
haleelsada/tweepy
|
22d47d9ae0e06a0c7d34a3871c2ea89ef71ac45c
|
[
"MIT"
] | null | null | null |
examples/cursor_v1_1.py
|
haleelsada/tweepy
|
22d47d9ae0e06a0c7d34a3871c2ea89ef71ac45c
|
[
"MIT"
] | null | null | null |
examples/cursor_v1_1.py
|
haleelsada/tweepy
|
22d47d9ae0e06a0c7d34a3871c2ea89ef71ac45c
|
[
"MIT"
] | null | null | null |
import tweepy
consumer_key = ""
consumer_secret = ""
access_token = ""
access_token_secret = ""
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# The app can only retrieve fixed number of objects per
# request, with pagination it could be solved and more than
# that amount can be gathered
for status in tweepy.Cursor(api.search_tweets, "search query here",
count=100).items(200):
print(status.text)
for page in tweepy.Cursor(api.get_followers, screen_name="user name here",
count=100).pages(3):
print(len(page))
| 28
| 74
| 0.700893
|
dca200ab30789415540e9a485f0749fcce23cfbe
| 1,353
|
py
|
Python
|
briskGenerator/brisk.py
|
itssmutnuri/AugPy
|
2bf5639700ae60efbf375017a70400fa486f862f
|
[
"MIT"
] | 6
|
2019-10-12T06:58:32.000Z
|
2021-04-01T17:38:17.000Z
|
briskGenerator/brisk.py
|
muralikrishnarar/AugPy
|
2bf5639700ae60efbf375017a70400fa486f862f
|
[
"MIT"
] | 1
|
2020-10-27T15:09:08.000Z
|
2020-10-27T18:49:09.000Z
|
briskGenerator/brisk.py
|
muralikrishnarar/AugPy
|
2bf5639700ae60efbf375017a70400fa486f862f
|
[
"MIT"
] | 3
|
2019-10-12T07:00:21.000Z
|
2021-02-14T01:25:23.000Z
|
#source: https://www.andreasjakl.com/basics-of-ar-anchors-keypoints-feature-detection/
import cv2
# 1. Load the original image
img = cv2.imread('reference/cover.jpg')
# 2. Create BRISK algorithm
# OpenCV default threshold = 30, octaves = 3
# Using 4 octaves as cited as typical value by the original paper by Leutenegger et al.
# Using 70 as detection threshold similar to real-world example of this paper
brisk = cv2.BRISK_create(70,4)
# 3. Combined call to let the BRISK implementation detect keypoints
# as well as calculate the descriptors, based on the image.
# These are returned in two arrays.
(kps, descs) = brisk.detectAndCompute(img, None)
# 4. Print the number of keypoints and descriptors found
print("# keypoints: {}, descriptors: {}".format(len(kps), descs.shape))
# To verify: how many bits are contained in a feature descriptor?
# Should be 64 * 8 = 512 bits according to the algorithm paper.
print(len(descs[1]) * 8)
# 5. Use the generic drawKeypoints method from OpenCV to draw the
# calculated keypoints into the original image.
# The flag for rich keypoints also draws circles to indicate
# direction and scale of the keypoints.
imgBrisk = cv2.drawKeypoints(img, kps, img, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# 6. Finally, write the resulting image to the disk
cv2.imwrite('brisk_keypoints.jpg', imgBrisk)
| 41
| 93
| 0.761271
|
04b4a8c2449b6d0b5abd8fb47fd50c4c396aeebf
| 7,409
|
py
|
Python
|
libcloud/dns/drivers/softlayer.py
|
carletes/libcloud
|
6be31b9d57b4a94d91320d4c33e94071759fa876
|
[
"Apache-2.0"
] | 3
|
2015-09-11T15:42:16.000Z
|
2021-05-12T01:10:05.000Z
|
libcloud/dns/drivers/softlayer.py
|
carletes/libcloud
|
6be31b9d57b4a94d91320d4c33e94071759fa876
|
[
"Apache-2.0"
] | null | null | null |
libcloud/dns/drivers/softlayer.py
|
carletes/libcloud
|
6be31b9d57b4a94d91320d4c33e94071759fa876
|
[
"Apache-2.0"
] | 3
|
2016-02-08T23:38:18.000Z
|
2019-11-05T00:31:34.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'SoftLayerDNSDriver'
]
from libcloud.common.softlayer import SoftLayerConnection
from libcloud.common.softlayer import SoftLayerObjectDoesntExist
from libcloud.dns.types import Provider, RecordType
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
from libcloud.dns.base import DNSDriver, Zone, Record
VALID_RECORD_EXTRA_PARAMS = ['priority', 'ttl']
class SoftLayerDNSDriver(DNSDriver):
type = Provider.SOFTLAYER
name = 'Softlayer DNS'
website = 'https://www.softlayer.com'
connectionCls = SoftLayerConnection
RECORD_TYPE_MAP = {
RecordType.A: 'a',
RecordType.AAAA: 'aaaa',
RecordType.CNAME: 'cname',
RecordType.MX: 'mx',
RecordType.NS: 'ns',
RecordType.PTR: 'ptr',
RecordType.SOA: 'soa',
RecordType.SPF: 'spf',
RecordType.SRV: 'srv',
RecordType.TXT: 'txt',
}
def create_zone(self, domain, ttl=None, extra=None):
self.connection.set_context({'resource': 'zone', 'id': domain})
data = {
'name': domain,
'resourceRecords': []
}
response = self.connection.request(
'SoftLayer_Dns_Domain', 'createObject', data
).object
zone = Zone(id=response['id'], domain=domain,
type='master', ttl=3600, driver=self)
return zone
def get_zone(self, zone_id):
self.connection.set_context({'resource': 'zone', 'id': zone_id})
try:
response = self.connection.request(
'SoftLayer_Dns_Domain', 'getObject', id=zone_id
).object
except SoftLayerObjectDoesntExist:
raise ZoneDoesNotExistError(value='', driver=self,
zone_id=zone_id)
return self._to_zone(response)
def delete_zone(self, zone):
self.connection.set_context({'resource': 'zone', 'id': zone.id})
try:
self.connection.request(
'SoftLayer_Dns_Domain', 'deleteObject', id=zone.id
).object
except SoftLayerObjectDoesntExist:
raise ZoneDoesNotExistError(value='', driver=self,
zone_id=zone.id)
else:
return True
def iterate_zones(self):
zones_list = self.connection.request(
'SoftLayer_Dns_Domain', 'getByDomainName', '.'
).object
for item in zones_list:
yield self._to_zone(item)
def iterate_records(self, zone):
self.connection.set_context({'resource': 'zone', 'id': zone.id})
records_list = self.connection.request(
'SoftLayer_Dns_Domain', 'getResourceRecords', id=zone.id
).object
for item in records_list:
yield self._to_record(item, zone=zone)
def get_record(self, zone_id, record_id):
try:
record = self.connection.request(
'SoftLayer_Dns_Domain_ResourceRecord',
'getObject',
id=record_id
).object
return self._to_record(record, zone=self.get_zone(zone_id))
except SoftLayerObjectDoesntExist:
raise RecordDoesNotExistError(value='', driver=self,
record_id=record_id)
def delete_record(self, record):
try:
self.connection.request(
'SoftLayer_Dns_Domain_ResourceRecord',
'deleteObject',
id=record.id
).object
except SoftLayerObjectDoesntExist:
raise RecordDoesNotExistError(value='', driver=self,
record_id=record.id)
else:
return True
def create_record(self, name, zone, type, data, extra=None):
params = {
'domainId': zone.id,
'type': self.RECORD_TYPE_MAP[type],
'host': name,
'data': data
}
if extra:
if extra.get('ttl'):
params['ttl'] = extra['ttl']
if extra.get('refresh'):
params['refresh'] = extra['refresh']
if extra.get('retry'):
params['retry'] = extra['retry']
if extra.get('expire'):
params['expire'] = extra['expire']
if extra.get('priority'):
params['mxPriority'] = extra['priority']
response = self.connection.request(
'SoftLayer_Dns_Domain_ResourceRecord',
'createObject',
params
).object
return self._to_record(response, zone=zone)
def update_record(
self, record, name=None, type=None, data=None, extra=None):
params = {}
if type:
params['type'] = self.RECORD_TYPE_MAP[type]
if name:
params['host'] = name
if data:
params['data'] = data
if extra:
if extra.get('ttl'):
params['ttl'] = extra['ttl']
if extra.get('refresh'):
params['refresh'] = extra['refresh']
if extra.get('retry'):
params['retry'] = extra['retry']
if extra.get('expire'):
params['expire'] = extra['expire']
if extra.get('priority'):
params['mxPriority'] = extra['priority']
response = self.connection.request(
'SoftLayer_Dns_Domain_ResourceRecord',
'editObject',
params,
id=record.id,
).object
if response:
changed_record = self.connection.request(
'SoftLayer_Dns_Domain_ResourceRecord',
'getObject',
id=record.id,
).object
return self._to_record(changed_record, zone=record.zone)
else:
return False
def _to_zone(self, item):
ttl = item.get('ttl', 3600)
zone = Zone(id=item['id'], domain=item['name'],
type='master', ttl=ttl, driver=self)
return zone
def _to_record(self, item, zone=None):
extra = {
'ttl': item['ttl'],
'expire': item['expire'],
'mxPriority': item['mxPriority'],
'refresh': item['refresh'],
'retry': item['retry'],
}
record = Record(
id=item['id'],
name=item['host'],
type=self._string_to_record_type(item['type']),
data=item['data'],
zone=zone,
driver=self,
extra=extra
)
return record
| 34.621495
| 77
| 0.565933
|
f36288f4f4fb0c31d4e9725f56bf1190a29aa98f
| 19,050
|
py
|
Python
|
kivy/clock.py
|
sirpercival/kivy
|
29ef854a200e6764aae60ea29324379c69d271a3
|
[
"MIT"
] | 1
|
2020-02-24T19:03:54.000Z
|
2020-02-24T19:03:54.000Z
|
kivy/clock.py
|
sirpercival/kivy
|
29ef854a200e6764aae60ea29324379c69d271a3
|
[
"MIT"
] | null | null | null |
kivy/clock.py
|
sirpercival/kivy
|
29ef854a200e6764aae60ea29324379c69d271a3
|
[
"MIT"
] | null | null | null |
'''
Clock object
============
The :class:`Clock` object allows you to schedule a function call in the
future; once or repeatedly at specified intervals::
def my_callback(dt):
pass
# call my_callback every 0.5 seconds
Clock.schedule_interval(my_callback, 0.5)
# call my_callback in 5 seconds
Clock.schedule_once(my_callback, 5)
# call my_callback as soon as possible (usually next frame.)
Clock.schedule_once(my_callback)
.. note::
If the callback returns False, the schedule will be removed.
If you want to schedule a function to call with default arguments, you can use
the `functools.partial
<http://docs.python.org/library/functools.html#functools.partial>`_ python
module::
from functools import partial
def my_callback(value, key, *largs):
pass
Clock.schedule_interval(partial(my_callback, 'my value', 'my key'), 0.5)
Conversely, if you want to schedule a function that doesn't accept the dt
argument, you can use a `lambda
<http://docs.python.org/2/reference/expressions.html#lambda>`_ expression
to write a short function that does accept dt. For Example::
def no_args_func():
print("I accept no arguments, so don't schedule me in the clock")
Clock.schedule_once(lambda dt: no_args_func(), 0.5)
.. note::
You cannot unschedule an anonymous function unless you keep a
reference to it. It's better to add \*args to your function
definition so that it can be called with an arbitrary number of
parameters.
.. important::
The callback is weak-referenced: you are responsible for keeping a
reference to your original object/callback. If you don't keep a
reference, the ClockBase will never execute your callback. For
example::
class Foo(object):
def start(self):
Clock.schedule_interval(self.callback, 0.5)
def callback(self, dt):
print('In callback')
# A Foo object is created and the method start is called.
# Because no reference is kept to the instance returned from Foo(),
# the object will be collected by the Python Garbage Collector and
# your callback will be never called.
Foo().start()
# So you should do the following and keep a reference to the instance
# of foo until you don't need it anymore!
foo = Foo()
foo.start()
.. _schedule-before-frame:
Schedule before frame
---------------------
.. versionadded:: 1.0.5
Sometimes you need to schedule a callback BEFORE the next frame. Starting
from 1.0.5, you can use a timeout of -1::
Clock.schedule_once(my_callback, 0) # call after the next frame
Clock.schedule_once(my_callback, -1) # call before the next frame
The Clock will execute all the callbacks with a timeout of -1 before the
next frame even if you add a new callback with -1 from a running
callback. However, :class:`Clock` has an iteration limit for these
callbacks: it defaults to 10.
If you schedule a callback that schedules a callback that schedules a .. etc
more than 10 times, it will leave the loop and send a warning to the console,
then continue after the next frame. This is implemented to prevent bugs from
hanging or crashing the application.
If you need to increase the limit, set the :attr:`max_iteration` property::
from kivy.clock import Clock
Clock.max_iteration = 20
.. _triggered-events:
Triggered Events
----------------
.. versionadded:: 1.0.5
A triggered event is a way to defer a callback exactly like schedule_once(),
but with some added convenience. The callback will only be scheduled once per
frame even if you call the trigger twice (or more). This is not the case
with :meth:`Clock.schedule_once`::
# will run the callback twice before the next frame
Clock.schedule_once(my_callback)
Clock.schedule_once(my_callback)
# will run the callback once before the next frame
t = Clock.create_trigger(my_callback)
t()
t()
Before triggered events, you may have used this approach in a widget::
def trigger_callback(self, *largs):
Clock.unschedule(self.callback)
Clock.schedule_once(self.callback)
As soon as you call `trigger_callback()`, it will correctly schedule the
callback once in the next frame. It is more convenient to create and bind to
the triggered event than using :meth:`Clock.schedule_once` in a function::
from kivy.clock import Clock
from kivy.uix.widget import Widget
class Sample(Widget):
def __init__(self, **kwargs):
self._trigger = Clock.create_trigger(self.cb)
super(Sample, self).__init__(**kwargs)
self.bind(x=self._trigger, y=self._trigger)
def cb(self, *largs):
pass
Even if x and y changes within one frame, the callback is only run once.
.. note::
:meth:`ClockBase.create_trigger` also has a timeout parameter that
behaves exactly like :meth:`ClockBase.schedule_once`.
'''
__all__ = ('Clock', 'ClockBase', 'ClockEvent', 'mainthread')
from sys import platform
from os import environ
from kivy.context import register_context
from kivy.weakmethod import WeakMethod
from kivy.config import Config
from kivy.logger import Logger
import time
try:
import ctypes
if platform in ('win32', 'cygwin'):
# Win32 Sleep function is only 10-millisecond resolution, so
# instead use a waitable timer object, which has up to
# 100-nanosecond resolution (hardware and implementation
# dependent, of course).
_kernel32 = ctypes.windll.kernel32
class _ClockBase(object):
def __init__(self):
self._timer = _kernel32.CreateWaitableTimerA(None, True, None)
def usleep(self, microseconds):
delay = ctypes.c_longlong(int(-microseconds * 10))
_kernel32.SetWaitableTimer(
self._timer, ctypes.byref(delay), 0,
ctypes.c_void_p(), ctypes.c_void_p(), False)
_kernel32.WaitForSingleObject(self._timer, 0xffffffff)
_default_time = time.clock
else:
if platform == 'darwin':
_libc = ctypes.CDLL('libc.dylib')
else:
_libc = ctypes.CDLL('libc.so')
_libc.usleep.argtypes = [ctypes.c_ulong]
_libc_usleep = _libc.usleep
class _ClockBase(object):
def usleep(self, microseconds):
_libc_usleep(int(microseconds))
_default_time = time.time
except (OSError, ImportError):
# ImportError: ctypes is not available on python-for-android.
# OSError: if the libc cannot be readed (like with buildbot: invalid ELF
# header)
_default_time = time.time
_default_sleep = time.sleep
class _ClockBase(object):
def usleep(self, microseconds):
_default_sleep(microseconds / 1000000.)
def _hash(cb):
try:
return cb.__name__
except:
# if a callback with partial is used... use func
try:
return cb.func.__name__
except:
# nothing work, use default hash.
return 'default'
class ClockEvent(object):
def __init__(self, clock, loop, callback, timeout, starttime, cid):
self.clock = clock
self.cid = cid
self.loop = loop
self.weak_callback = None
self.callback = callback
self.timeout = timeout
self._is_triggered = False
self._last_dt = starttime
self._dt = 0.
def __call__(self, *largs):
# if the event is not yet triggered, do it !
if self._is_triggered is False:
self._is_triggered = True
events = self.clock._events
cid = self.cid
if cid not in events:
events[cid] = []
events[cid].append(self)
# update starttime
self._last_dt = self.clock._last_tick
return True
def get_callback(self):
callback = self.callback
if callback is not None:
return callback
callback = self.weak_callback
if callback.is_dead():
return None
return callback()
@property
def is_triggered(self):
return self._is_triggered
def cancel(self):
if self._is_triggered:
events = self.clock._events
cid = self.cid
if cid in events and self in events[cid]:
events[cid].remove(self)
self._is_triggered = False
def do(self, dt):
callback = self.get_callback()
if callback is None:
return False
callback(dt)
def release(self):
self.weak_callback = WeakMethod(self.callback)
self.callback = None
def tick(self, curtime):
# timeout happened ? (check also if we would miss from 5ms) this
# 5ms increase the accuracy if the timing of animation for
# example.
if curtime - self._last_dt < self.timeout - 0.005:
return True
# calculate current timediff for this event
self._dt = curtime - self._last_dt
self._last_dt = curtime
# get the callback
callback = self.get_callback()
if callback is None:
self._is_triggered = False
return False
# if it's a trigger, allow to retrigger inside the callback
if not self.loop:
self._is_triggered = False
# call the callback
ret = callback(self._dt)
# if it's a once event, don't care about the result
# just remove the event
if not self.loop:
return False
# if the user returns False explicitly,
# remove the event
if ret is False:
return False
return True
def __repr__(self):
return '<ClockEvent callback=%r>' % self.get_callback()
class ClockBase(_ClockBase):
'''A clock object with event support.
'''
__slots__ = ('_dt', '_last_fps_tick', '_last_tick', '_fps', '_rfps',
'_start_tick', '_fps_counter', '_rfps_counter', '_events',
'_frames', '_frames_displayed',
'_max_fps', 'max_iteration')
MIN_SLEEP = 0.005
SLEEP_UNDERSHOOT = MIN_SLEEP - 0.001
def __init__(self):
super(ClockBase, self).__init__()
self._dt = 0.0001
self._start_tick = self._last_tick = _default_time()
self._fps = 0
self._rfps = 0
self._fps_counter = 0
self._rfps_counter = 0
self._last_fps_tick = None
self._frames = 0
self._frames_displayed = 0
self._events = {}
self._max_fps = float(Config.getint('graphics', 'maxfps'))
#: .. versionadded:: 1.0.5
#: When a schedule_once is used with -1, you can add a limit on
#: how iteration will be allowed. That is here to prevent too much
#: relayout.
self.max_iteration = 10
@property
def frametime(self):
'''Time spent between the last frame and the current frame
(in seconds).
.. versionadded:: 1.8.0
'''
return self._dt
@property
def frames(self):
'''Number of internal frames (not necesseraly drawed) from the start of
the clock.
.. versionadded:: 1.8.0
'''
return self._frames
@property
def frames_displayed(self):
'''Number of displayed frames from the start of the clock.
'''
return self._frames_displayed
def tick(self):
'''Advance the clock to the next step. Must be called every frame.
The default clock has a tick() function called by the core Kivy
framework.'''
self._release_references()
if self._fps_counter % 100 == 0:
self._remove_empty()
# do we need to sleep ?
if self._max_fps > 0:
min_sleep = self.MIN_SLEEP
sleep_undershoot = self.SLEEP_UNDERSHOOT
fps = self._max_fps
usleep = self.usleep
sleeptime = 1 / fps - (_default_time() - self._last_tick)
while sleeptime - sleep_undershoot > min_sleep:
usleep(1000000 * (sleeptime - sleep_undershoot))
sleeptime = 1 / fps - (_default_time() - self._last_tick)
# tick the current time
current = _default_time()
self._dt = current - self._last_tick
self._frames += 1
self._fps_counter += 1
self._last_tick = current
# calculate fps things
if self._last_fps_tick is None:
self._last_fps_tick = current
elif current - self._last_fps_tick > 1:
d = float(current - self._last_fps_tick)
self._fps = self._fps_counter / d
self._rfps = self._rfps_counter
self._last_fps_tick = current
self._fps_counter = 0
self._rfps_counter = 0
# process event
self._process_events()
return self._dt
def tick_draw(self):
'''Tick the drawing counter.
'''
self._process_events_before_frame()
self._rfps_counter += 1
self._frames_displayed += 1
def get_fps(self):
'''Get the current average FPS calculated by the clock.
'''
return self._fps
def get_rfps(self):
'''Get the current "real" FPS calculated by the clock.
This counter reflects the real framerate displayed on the screen.
In contrast to get_fps(), this function returns a counter of the
number of frames, not the average of frames per second.
'''
return self._rfps
def get_time(self):
'''Get the last tick made by the clock.'''
return self._last_tick
def get_boottime(self):
'''Get the time in seconds from the application start.'''
return self._last_tick - self._start_tick
def create_trigger(self, callback, timeout=0):
'''Create a Trigger event. Check module documentation for more
information.
.. versionadded:: 1.0.5
'''
cid = _hash(callback)
ev = ClockEvent(self, False, callback, timeout, 0, cid)
ev.release()
return ev
def schedule_once(self, callback, timeout=0):
'''Schedule an event in <timeout> seconds. If <timeout> is unspecified
or 0, the callback will be called after the next frame is rendered.
.. versionchanged:: 1.0.5
If the timeout is -1, the callback will be called before the next
frame (at :meth:`tick_draw`).
'''
if not callable(callback):
raise ValueError('callback must be a callable, got %s' % callback)
cid = _hash(callback)
event = ClockEvent(
self, False, callback, timeout, self._last_tick, cid)
events = self._events
if not cid in events:
events[cid] = []
events[cid].append(event)
return event
def schedule_interval(self, callback, timeout):
'''Schedule an event to be called every <timeout> seconds.'''
if not callable(callback):
raise ValueError('callback must be a callable, got %s' % callback)
cid = _hash(callback)
event = ClockEvent(
self, True, callback, timeout, self._last_tick, cid)
events = self._events
if not cid in events:
events[cid] = []
events[cid].append(event)
return event
def unschedule(self, callback):
'''Remove a previously scheduled event.
'''
events = self._events
if isinstance(callback, ClockEvent):
# already done, nothing to schedule
if callback.is_done:
return
cid = callback.cid
if cid in events:
for event in events[cid][:]:
if event is callback:
events[cid].remove(event)
else:
cid = _hash(callback)
if cid in events:
for event in events[cid][:]:
if event.get_callback() == callback:
events[cid].remove(event)
def _release_references(self):
# call that function to release all the direct reference to any
# callback and replace it with a weakref
events = self._events
for cid in list(events.keys())[:]:
[x.release() for x in events[cid] if x.callback is not None]
def _remove_empty(self):
# remove empty entry in the event list
events = self._events
for cid in list(events.keys())[:]:
if not events[cid]:
del events[cid]
def _process_events(self):
events = self._events
for cid in list(events.keys())[:]:
for event in events[cid][:]:
if event.tick(self._last_tick) is False:
# event may be already removed by the callback
if event in events[cid]:
events[cid].remove(event)
def _process_events_before_frame(self):
found = True
count = self.max_iteration
events = self._events
while found:
count -= 1
if count == -1:
Logger.critical(
'Clock: Warning, too much iteration done before'
' the next frame. Check your code, or increase'
' the Clock.max_iteration attribute')
break
# search event that have timeout = -1
found = False
for cid in list(events.keys())[:]:
for event in events[cid][:]:
if event.timeout != -1:
continue
found = True
if event.tick(self._last_tick) is False:
# event may be already removed by the callback
if event in events[cid]:
events[cid].remove(event)
def mainthread(func):
'''Decorator that will schedule the call of the function in the
mainthread. It can be useful when you use
:class:`~kivy.network.urlrequest.UrlRequest` or when you do Thread
programming: you cannot do any OpenGL-related work in a thread.
Please note that this method will return directly and no result can be
returned::
@mainthread
def callback(self, *args):
print('The request succedded!'
'This callback is call in the main thread')
self.req = UrlRequest(url='http://...', on_success=callback)
.. versionadded:: 1.8.0
'''
def delayed_func(*args, **kwargs):
def callback_func(dt):
func(*args, **kwargs)
Clock.schedule_once(callback_func, 0)
return delayed_func
if 'KIVY_DOC_INCLUDE' in environ:
#: Instance of :class:`ClockBase`.
Clock = None
else:
Clock = register_context('Clock', ClockBase)
| 31.909548
| 79
| 0.610184
|
ed0b5145ce3b1b1c4056e823032565f1afe8b0a7
| 6,151
|
py
|
Python
|
ocellaris/solver_parts/boundary_conditions/neumann.py
|
TormodLandet/Ocellaris
|
6b4b2515fb881b1ed8d8fd8d8c23a8e1990ada58
|
[
"Apache-2.0"
] | 1
|
2017-11-07T12:19:44.000Z
|
2017-11-07T12:19:44.000Z
|
ocellaris/solver_parts/boundary_conditions/neumann.py
|
TormodLandet/Ocellaris
|
6b4b2515fb881b1ed8d8fd8d8c23a8e1990ada58
|
[
"Apache-2.0"
] | null | null | null |
ocellaris/solver_parts/boundary_conditions/neumann.py
|
TormodLandet/Ocellaris
|
6b4b2515fb881b1ed8d8fd8d8c23a8e1990ada58
|
[
"Apache-2.0"
] | 2
|
2018-05-02T17:17:01.000Z
|
2019-03-11T13:09:40.000Z
|
# Copyright (C) 2015-2019 Tormod Landet
# SPDX-License-Identifier: Apache-2.0
import dolfin
from . import register_boundary_condition, BoundaryConditionCreator
from ocellaris.utils import CodedExpression, OcellarisCppExpression
DEFAULT_ENFORCE_ZERO_FLUX = False
class OcellarisNeumannBC(object):
def __init__(
self, simulation, value, subdomain_id, enforce_zero_flux=DEFAULT_ENFORCE_ZERO_FLUX
):
"""
A simple storage class for Neumann boundary conditions
"""
self.simulation = simulation
self._value = value
self.subdomain_id = subdomain_id
self.enforce_zero_flux = enforce_zero_flux
def func(self):
"""
The boundary value derivative function
"""
return self._value
def ds(self):
"""
Returns the ds measure of the subdomain
"""
return self.simulation.data['ds'](self.subdomain_id)
def __repr__(self):
return '<OcellarisNeumannBC on subdomain %d>' % self.subdomain_id
@register_boundary_condition('ConstantGradient')
class NeumannBoundary(BoundaryConditionCreator):
description = 'A prescribed constant value Neumann condition'
def __init__(self, simulation, var_name, inp_dict, subdomains, subdomain_id):
"""
Neumann condition
"""
self.simulation = simulation
value = inp_dict.get_value('value', required_type='any')
enforce_zero_flux = inp_dict.get_value(
'enforce_zero_flux', DEFAULT_ENFORCE_ZERO_FLUX, 'bool'
)
if isinstance(value, list):
assert len(value) == simulation.ndim
for d in range(simulation.ndim):
name = '%s%d' % (var_name, d)
self.register_neumann_condition(name, value[d], subdomain_id)
else:
self.register_neumann_condition(var_name, value, subdomain_id, enforce_zero_flux)
def register_neumann_condition(self, var_name, value, subdomain_id, enforce_zero_flux):
"""
Add a Neumann condition to this variable
"""
assert isinstance(value, (float, int))
df_value = dolfin.Constant(value)
# Store the boundary condition for use in the solver
bc = OcellarisNeumannBC(self.simulation, df_value, subdomain_id, enforce_zero_flux)
bcs = self.simulation.data['neumann_bcs']
bcs.setdefault(var_name, []).append(bc)
self.simulation.log.info(' ConstantGradient %r for %s' % (value, var_name))
@register_boundary_condition('CodedGradient')
class CodedNeumannBoundary(BoundaryConditionCreator):
description = 'A coded Neumann condition'
def __init__(self, simulation, var_name, inp_dict, subdomains, subdomain_id):
"""
Neumann condition with coded value
"""
self.simulation = simulation
# Make a dolfin Expression object that runs the code string
code = inp_dict.get_value('code', required_type='any')
enforce_zero_flux = inp_dict.get_value(
'enforce_zero_flux', DEFAULT_ENFORCE_ZERO_FLUX, 'bool'
)
if isinstance(code, list):
assert len(code) == simulation.ndim
for d in range(simulation.ndim):
name = '%s%d' % (var_name, d)
description = 'coded gradient boundary condition for %s' % name
sub_code = inp_dict.get_value('code/%d' % d, required_type='string')
expr = CodedExpression(simulation, sub_code, description)
self.register_neumann_condition(
name, expr, subdomains, subdomain_id, enforce_zero_flux
)
else:
description = 'coded gradient boundary condition for %s' % var_name
expr = CodedExpression(simulation, code, description)
self.register_neumann_condition(
var_name, expr, subdomains, subdomain_id, enforce_zero_flux
)
def register_neumann_condition(
self, var_name, expr, subdomains, subdomain_id, enforce_zero_flux
):
"""
Store the boundary condition for use in the solver
"""
bc = OcellarisNeumannBC(self.simulation, expr, subdomain_id, enforce_zero_flux)
bcs = self.simulation.data['neumann_bcs']
bcs.setdefault(var_name, []).append(bc)
self.simulation.log.info(' Coded gradient for %s' % var_name)
@register_boundary_condition('CppCodedGradient')
class CppCodedNeumannBoundary(BoundaryConditionCreator):
description = 'A C++ coded Neumann boundary condition'
def __init__(self, simulation, var_name, inp_dict, subdomains, subdomain_id):
"""
C++ coded Neumann condition
"""
self.simulation = simulation
self.func_space = simulation.data['V%s' % var_name]
cpp_code = inp_dict.get_value('cpp_code', required_type='any')
enforce_zero_flux = inp_dict.get_value(
'enforce_zero_flux', DEFAULT_ENFORCE_ZERO_FLUX, 'bool'
)
if isinstance(cpp_code, list):
assert len(cpp_code) == simulation.ndim
for d in range(simulation.ndim):
name = '%s%d' % (var_name, d)
sub_code = inp_dict.get_value('cpp_code/%d' % d, required_type='string')
self.register_neumann_condition(name, sub_code, subdomain_id, enforce_zero_flux)
else:
self.register_neumann_condition(var_name, cpp_code, subdomain_id, enforce_zero_flux)
def register_neumann_condition(self, var_name, cpp_code, subdomain_id, enforce_zero_flux):
"""
Add a C++ coded Neumann condition to this variable
"""
description = 'boundary condititon for %s' % var_name
P = self.func_space.ufl_element().degree()
expr = OcellarisCppExpression(self.simulation, cpp_code, description, P, update=True)
bc = OcellarisNeumannBC(self.simulation, expr, subdomain_id, enforce_zero_flux)
bcs = self.simulation.data['neumann_bcs']
bcs.setdefault(var_name, []).append(bc)
self.simulation.log.info(' C++ coded gradient for %s' % var_name)
| 39.178344
| 96
| 0.654203
|
cb6faaf5785cc042e204cb50723f7b675fc18e21
| 4,785
|
py
|
Python
|
tools/j2objc/j2objc_header_map.py
|
Dyryamo/bazel
|
c60eb5d324da4d81f6be93f442ac6d7576741e8e
|
[
"Apache-2.0"
] | null | null | null |
tools/j2objc/j2objc_header_map.py
|
Dyryamo/bazel
|
c60eb5d324da4d81f6be93f442ac6d7576741e8e
|
[
"Apache-2.0"
] | 3
|
2017-07-10T13:18:04.000Z
|
2018-08-30T19:29:46.000Z
|
tools/j2objc/j2objc_header_map.py
|
Dyryamo/bazel
|
c60eb5d324da4d81f6be93f442ac6d7576741e8e
|
[
"Apache-2.0"
] | 1
|
2022-01-12T18:08:14.000Z
|
2022-01-12T18:08:14.000Z
|
#!/usr/bin/python2.7
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script to generate Java class to ObjC header mapping for J2ObjC.
This script generates a text file containing mapping between top-level Java
classes to associated ObjC headers, generated by J2ObjC.
The mapping file is used by dependent J2ObjC transpilation actions to locate
the correct header import paths for dependent Java classes.
Inside the script, we read the Java source files and source jars of a single
Java rule, and parse out the package names from the package statements, using
regular expression matching.
Note that we cannot guarantee 100% correctness by using just regular expression,
but it should be good enough. This allows us to avoid doing any further complex
parsing of the source files and keep the script light-weight without other
dependencies. In the future, we can consider implementing a simple Java lexer
here that correctly parses the package statements out of Java files.
"""
import argparse
import os
import re
import zipfile
_PACKAGE_RE = re.compile(r'(package)\s+([\w\.]+);')
def _get_file_map_entry(java_file_path, java_file):
"""Returns the top-level Java class and header file path tuple.
Args:
java_file_path: The file path of the source Java file.
java_file: The actual file of the source java file.
Returns:
A tuple containing top-level Java class and associated header file path. Or
None if no package statement exists in the source file.
"""
for line in java_file:
stripped_line = line.strip()
package_statement = _PACKAGE_RE.search(stripped_line)
# We identified a potential package statement.
if package_statement:
preceding_characters = stripped_line[0:package_statement.start(1)]
# We have preceding characters before the package statement. We need to
# look further into them.
if preceding_characters:
# Skip comment line.
if preceding_characters.startswith('//'):
continue
# Preceding characters also must end with a space, represent an end
# of comment, or end of a statement.
# Otherwise, we skip the current line.
if not (preceding_characters[len(preceding_characters) - 1].isspace() or
preceding_characters.endswith(';') or
preceding_characters.endswith('*/')):
continue
package_name = package_statement.group(2)
class_name = os.path.splitext(os.path.basename(java_file_path))[0]
header_file = os.path.splitext(java_file_path)[0] + '.h'
return (package_name + '.' + class_name, header_file)
return None
def main():
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
parser.add_argument(
'--source_files',
required=False,
help='The source files')
parser.add_argument(
'--source_jars',
required=False,
help='The source jars.')
parser.add_argument(
'--output_mapping_file',
required=False,
help='The output mapping file')
args, _ = parser.parse_known_args()
class_to_header_map = dict()
# Process the source files.
if args.source_files:
source_files = args.source_files.split(',')
for source_file in source_files:
with open(source_file, 'r') as f:
entry = _get_file_map_entry(source_file, f)
if entry:
class_to_header_map[entry[0]] = entry[1]
# Process the source jars.
if args.source_jars:
source_jars = args.source_jars.split(',')
for source_jar in source_jars:
with zipfile.ZipFile(source_jar, 'r') as jar:
for jar_entry in jar.namelist():
if jar_entry.endswith('.java'):
with jar.open(jar_entry) as jar_entry_file:
entry = _get_file_map_entry(jar_entry, jar_entry_file)
if entry:
class_to_header_map[entry[0]] = entry[1]
# Generate the output header mapping file.
if args.output_mapping_file:
with open(args.output_mapping_file, 'w') as output_mapping_file:
for class_name in sorted(class_to_header_map):
header_path = class_to_header_map[class_name]
output_mapping_file.write(class_name + '=' + header_path + '\n')
if __name__ == '__main__':
main()
| 36.807692
| 80
| 0.712226
|
066b46d94e4cf92724f0a51ad876ed84f6d0cbe2
| 176
|
py
|
Python
|
Exercicio_12.py
|
Matheus2037/Exercicio_LP_1B
|
7fab8246e999d7dde0ddb96f2a4ca45216c229d0
|
[
"MIT"
] | null | null | null |
Exercicio_12.py
|
Matheus2037/Exercicio_LP_1B
|
7fab8246e999d7dde0ddb96f2a4ca45216c229d0
|
[
"MIT"
] | null | null | null |
Exercicio_12.py
|
Matheus2037/Exercicio_LP_1B
|
7fab8246e999d7dde0ddb96f2a4ca45216c229d0
|
[
"MIT"
] | null | null | null |
salario = float(input("Digite o seu salário: "))
nsalario = (salario * 1.15)
print("O seu salario antigo era de {}, após o aumento será de {:.2f}" .format(salario, nsalario))
| 35.2
| 97
| 0.681818
|
fbb68af7bb9122ad2f7f51ff0b09e6093590353d
| 36
|
py
|
Python
|
venv/Lib/site-packages/PyQt4Enhanced/__init__.py
|
TEDxVienna/continuum
|
85cefbc274fc59e2059c313bc0d3b9b93a34ba6d
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/PyQt4Enhanced/__init__.py
|
TEDxVienna/continuum
|
85cefbc274fc59e2059c313bc0d3b9b93a34ba6d
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/PyQt4Enhanced/__init__.py
|
TEDxVienna/continuum
|
85cefbc274fc59e2059c313bc0d3b9b93a34ba6d
|
[
"MIT"
] | null | null | null |
from .main import QEMatplotlibWidget
| 36
| 36
| 0.888889
|
682fe23c493bdf681d92d2b130c8c9057915bd85
| 379
|
py
|
Python
|
hatter_orm/config.py
|
ndrwpvlv/hatter_orm
|
b358c8858a93eaf60b41e8b81940b6348d78bf68
|
[
"MIT"
] | 2
|
2020-01-08T13:03:26.000Z
|
2021-07-15T12:33:45.000Z
|
hatter_orm/config.py
|
ndrwpvlv/hatter_orm
|
b358c8858a93eaf60b41e8b81940b6348d78bf68
|
[
"MIT"
] | null | null | null |
hatter_orm/config.py
|
ndrwpvlv/hatter_orm
|
b358c8858a93eaf60b41e8b81940b6348d78bf68
|
[
"MIT"
] | null | null | null |
import sys
class Config:
PYTHON_MAJOR_VERSION = sys.version_info[0]
PYTHON_MINOR_VERSION = sys.version_info[1]
SQL_RESPONSE_LIMIT = 1844674407370955161
PRAGMA_SCRIPT = {
'foreign_key': 'PRAGMA foreign_keys = "1";\n',
'auto_vacuum': 'PRAGMA auto_vacuum = 0;',
}
JOIN_TYPES = ['LEFT', 'INNER', 'RIGHT', 'FULL', ]
DEFAULT_JOIN = 'LEFT'
| 27.071429
| 54
| 0.649077
|
f8376dc891d30308ea62637ba33cdbb54ce84f2e
| 297
|
py
|
Python
|
Interview Based/Leader_in_array.py
|
nane121/HacktoberFest2020
|
29eb99754ee93f643d4b0bd7e18570079e718d59
|
[
"MIT"
] | 25
|
2020-10-01T05:44:04.000Z
|
2020-10-30T17:30:26.000Z
|
Interview Based/Leader_in_array.py
|
nane121/HacktoberFest2020
|
29eb99754ee93f643d4b0bd7e18570079e718d59
|
[
"MIT"
] | 14
|
2020-10-01T09:32:47.000Z
|
2020-11-05T16:17:12.000Z
|
Interview Based/Leader_in_array.py
|
nane121/HacktoberFest2020
|
29eb99754ee93f643d4b0bd7e18570079e718d59
|
[
"MIT"
] | 143
|
2020-10-01T05:47:04.000Z
|
2021-10-03T04:25:42.000Z
|
# -*- coding: utf-8 -*-
#7
#63 70 80 33 33 47 20
#
#Its Correct output is:
#80 47 20
#
#And Your Code's output is:
#70 80 47 20
N=int(input())
l=[]
A=[]
for i in range(0,N):
l.append((input()))
for i in range(0,N-1):
if(l[i]<l[i+1]):
A.append(l[i+1])
A.append(l[N-1])
print(A)
| 11
| 27
| 0.538721
|
095bdc779f385acbc4c0731cfd0a357c6253a003
| 6,351
|
py
|
Python
|
trio/_path.py
|
guilledk/trio
|
d09c21df3ffe401ee4314d869d82a886bd776e3c
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
trio/_path.py
|
guilledk/trio
|
d09c21df3ffe401ee4314d869d82a886bd776e3c
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
trio/_path.py
|
guilledk/trio
|
d09c21df3ffe401ee4314d869d82a886bd776e3c
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
from functools import wraps, partial
import os
import types
import pathlib
import trio
from trio._util import async_wraps, SubclassingDeprecatedIn_v0_15_0
# re-wrap return value from methods that return new instances of pathlib.Path
def rewrap_path(value):
if isinstance(value, pathlib.Path):
value = Path(value)
return value
def _forward_factory(cls, attr_name, attr):
@wraps(attr)
def wrapper(self, *args, **kwargs):
attr = getattr(self._wrapped, attr_name)
value = attr(*args, **kwargs)
return rewrap_path(value)
return wrapper
def _forward_magic(cls, attr):
sentinel = object()
@wraps(attr)
def wrapper(self, other=sentinel):
if other is sentinel:
return attr(self._wrapped)
if isinstance(other, cls):
other = other._wrapped
value = attr(self._wrapped, other)
return rewrap_path(value)
return wrapper
def iter_wrapper_factory(cls, meth_name):
@async_wraps(cls, cls._wraps, meth_name)
async def wrapper(self, *args, **kwargs):
meth = getattr(self._wrapped, meth_name)
func = partial(meth, *args, **kwargs)
# Make sure that the full iteration is performed in the thread
# by converting the generator produced by pathlib into a list
items = await trio.to_thread.run_sync(lambda: list(func()))
return (rewrap_path(item) for item in items)
return wrapper
def thread_wrapper_factory(cls, meth_name):
@async_wraps(cls, cls._wraps, meth_name)
async def wrapper(self, *args, **kwargs):
meth = getattr(self._wrapped, meth_name)
func = partial(meth, *args, **kwargs)
value = await trio.to_thread.run_sync(func)
return rewrap_path(value)
return wrapper
def classmethod_wrapper_factory(cls, meth_name):
@classmethod
@async_wraps(cls, cls._wraps, meth_name)
async def wrapper(cls, *args, **kwargs):
meth = getattr(cls._wraps, meth_name)
func = partial(meth, *args, **kwargs)
value = await trio.to_thread.run_sync(func)
return rewrap_path(value)
return wrapper
class AsyncAutoWrapperType(SubclassingDeprecatedIn_v0_15_0):
def __init__(cls, name, bases, attrs):
super().__init__(name, bases, attrs)
cls._forward = []
type(cls).generate_forwards(cls, attrs)
type(cls).generate_wraps(cls, attrs)
type(cls).generate_magic(cls, attrs)
type(cls).generate_iter(cls, attrs)
def generate_forwards(cls, attrs):
# forward functions of _forwards
for attr_name, attr in cls._forwards.__dict__.items():
if attr_name.startswith("_") or attr_name in attrs:
continue
if isinstance(attr, property):
cls._forward.append(attr_name)
elif isinstance(attr, types.FunctionType):
wrapper = _forward_factory(cls, attr_name, attr)
setattr(cls, attr_name, wrapper)
else:
raise TypeError(attr_name, type(attr))
def generate_wraps(cls, attrs):
# generate wrappers for functions of _wraps
for attr_name, attr in cls._wraps.__dict__.items():
# .z. exclude cls._wrap_iter
if attr_name.startswith("_") or attr_name in attrs:
continue
if isinstance(attr, classmethod):
wrapper = classmethod_wrapper_factory(cls, attr_name)
setattr(cls, attr_name, wrapper)
elif isinstance(attr, types.FunctionType):
wrapper = thread_wrapper_factory(cls, attr_name)
setattr(cls, attr_name, wrapper)
else:
raise TypeError(attr_name, type(attr))
def generate_magic(cls, attrs):
# generate wrappers for magic
for attr_name in cls._forward_magic:
attr = getattr(cls._forwards, attr_name)
wrapper = _forward_magic(cls, attr)
setattr(cls, attr_name, wrapper)
def generate_iter(cls, attrs):
# generate wrappers for methods that return iterators
for attr_name, attr in cls._wraps.__dict__.items():
if attr_name in cls._wrap_iter:
wrapper = iter_wrapper_factory(cls, attr_name)
setattr(cls, attr_name, wrapper)
class Path(metaclass=AsyncAutoWrapperType):
"""A :class:`pathlib.Path` wrapper that executes blocking methods in
:meth:`trio.to_thread.run_sync`.
"""
_wraps = pathlib.Path
_forwards = pathlib.PurePath
_forward_magic = [
"__str__",
"__bytes__",
"__truediv__",
"__rtruediv__",
"__eq__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__hash__",
]
_wrap_iter = ["glob", "rglob", "iterdir"]
def __init__(self, *args):
self._wrapped = pathlib.Path(*args)
def __getattr__(self, name):
if name in self._forward:
value = getattr(self._wrapped, name)
return rewrap_path(value)
raise AttributeError(name)
def __dir__(self):
return super().__dir__() + self._forward
def __repr__(self):
return "trio.Path({})".format(repr(str(self)))
def __fspath__(self):
return os.fspath(self._wrapped)
@wraps(pathlib.Path.open)
async def open(self, *args, **kwargs):
"""Open the file pointed to by the path, like the :func:`trio.open_file`
function does.
"""
func = partial(self._wrapped.open, *args, **kwargs)
value = await trio.to_thread.run_sync(func)
return trio.wrap_file(value)
Path.iterdir.__doc__ = """
Like :meth:`pathlib.Path.iterdir`, but async.
This is an async method that returns a synchronous iterator, so you
use it like::
for subpath in await mypath.iterdir():
...
Note that it actually loads the whole directory list into memory
immediately, during the initial call. (See `issue #501
<https://github.com/python-trio/trio/issues/501>`__ for discussion.)
"""
# The value of Path.absolute.__doc__ makes a reference to
# :meth:~pathlib.Path.absolute, which does not exist. Removing this makes more
# sense than inventing our own special docstring for this.
del Path.absolute.__doc__
os.PathLike.register(Path)
| 30.980488
| 80
| 0.638167
|
3e8caec344f79e03010c4a44621e89341ecbeb97
| 3,750
|
py
|
Python
|
src/model/MIM/MIM.py
|
dreaming-coder/DeepLab
|
3020544e2f9e139dde7bd04f6ff59e6f44d49c6e
|
[
"Apache-2.0"
] | 3
|
2021-05-31T09:25:59.000Z
|
2022-03-10T08:09:51.000Z
|
src/model/MIM/MIM.py
|
dreaming-coder/DeepLab
|
3020544e2f9e139dde7bd04f6ff59e6f44d49c6e
|
[
"Apache-2.0"
] | 1
|
2021-09-26T16:37:39.000Z
|
2021-09-28T00:43:05.000Z
|
src/model/MIM/MIM.py
|
dreaming-coder/DeepLab
|
3020544e2f9e139dde7bd04f6ff59e6f44d49c6e
|
[
"Apache-2.0"
] | null | null | null |
r"""
Memory In Memory 的实现,出自论文《Memory In Memory: A Predictive Neural Network for Learning Higher-Order
Non-Stationarity from Spatiotemporal Dynamics》
"""
from typing import List
from torch import nn, Tensor
import torch
from MIMBlock import MIMBlock
from SpatiotemporalLSTM import SpatiotemporalLSTM
__all__ = ["MIM"]
class MIM(nn.Module):
def __init__(self, in_channels: int, hidden_channels_list: List[int], kernel_size_list: List[int],
forget_bias: float = 0.01):
r"""
:param in_channels: 输入通道数
:param hidden_channels_list: 每一堆叠层的隐藏层通道数
:param kernel_size_list: 每一堆叠层的卷积核尺寸
:param forget_bias: 偏移量
"""
super().__init__()
self.in_channels = in_channels
self.hidden_channels_list = hidden_channels_list
self.layers = len(hidden_channels_list)
cell_list = nn.ModuleList([])
cell_list.append(
SpatiotemporalLSTM(
in_channels=in_channels, hidden_channels=hidden_channels_list[0],
kernel_size=kernel_size_list[0], forget_bias=forget_bias
)
)
for i in range(1, self.layers):
cell_list.append(
MIMBlock(
in_channels=hidden_channels_list[i - 1], hidden_channels=hidden_channels_list[i],
kernel_size=kernel_size_list[i], forget_bias=forget_bias
)
)
self.cell_list = cell_list
self.conv_last = nn.Conv2d(
in_channels=hidden_channels_list[-1], out_channels=in_channels,
kernel_size=1, stride=1, padding=0, bias=False
)
# noinspection PyUnboundLocalVariable
def forward(self, inputs: Tensor, out_len: int = 10) -> Tensor:
device = inputs.device
batch, sequence, _, height, width = inputs.shape
h = []
h_ = []
c = []
# n和s第二层开始才有,故列表第一个元素置为None
n = []
s = []
pred = []
# 初始化最开始的隐藏状态
for i in range(self.layers):
zero_tensor_h = torch.zeros(batch, self.hidden_channels_list[i], height, width).to(device)
zero_tensor_c = torch.zeros(batch, self.hidden_channels_list[i], height, width).to(device)
zero_tensor_h_ = torch.zeros(batch, self.hidden_channels_list[i], height, width).to(device)
h.append(zero_tensor_h)
c.append(zero_tensor_c)
h_.append(zero_tensor_h_)
if i > 0:
zero_tensor_n = torch.zeros(batch, self.hidden_channels_list[i], height, width).to(device)
zero_tensor_s = torch.zeros(batch, self.hidden_channels_list[i], height, width).to(device)
n.append(zero_tensor_n)
s.append(zero_tensor_s)
else:
n.append(None)
s.append(None)
m = torch.zeros(batch, self.hidden_channels_list[0], height, width).to(device)
for seq in range(sequence + out_len):
if seq < sequence:
x = inputs[:, seq]
else:
x = x_pred
h[0], c[0], m = self.cell_list[0](x, h[0], c[0], m)
h_[0] = h[0]
for i in range(1, self.layers):
c[i], h[i], m, n[i], s[i] = self.cell_list[i](h_[i - 1], h[i - 1], c[i], h[i], m, n[i], s[i])
h_[i] = h[i]
x_pred = self.conv_last(h[self.layers - 1])
if seq >= sequence:
pred.append(x_pred)
# [length, batch, channel, height, width] -> [batch, length, channel, height, width]
prediction = torch.stack(pred, dim=0).permute(1, 0, 2, 3, 4)
return prediction
| 34.40367
| 109
| 0.5712
|
7ca64f8bd25f6eee29ceb284774bae2e59c2abd2
| 1,225
|
py
|
Python
|
ciphey/basemods/Resources/cipheydists.py
|
blackcat-917/Ciphey
|
d24deea87cec2dea2e04ec3859b9e77e121d192a
|
[
"MIT"
] | 1
|
2021-11-28T17:55:04.000Z
|
2021-11-28T17:55:04.000Z
|
ciphey/basemods/Resources/cipheydists.py
|
ScarlettHoefler/Ciphey
|
f7d21ce0993eeff0b53cec8717dfbd8f8419f8f5
|
[
"MIT"
] | 10
|
2021-06-14T08:22:45.000Z
|
2022-03-18T04:17:05.000Z
|
ciphey/basemods/Resources/cipheydists.py
|
ScarlettHoefler/Ciphey
|
f7d21ce0993eeff0b53cec8717dfbd8f8419f8f5
|
[
"MIT"
] | 1
|
2020-10-05T17:35:00.000Z
|
2020-10-05T17:35:00.000Z
|
from functools import lru_cache
from typing import Any, Dict, Optional, Set
import cipheydists
import loguru
from ciphey.iface import (
Config,
Distribution,
ParamSpec,
ResourceLoader,
Translation,
WordList,
registry,
)
@registry.register_multi(WordList, Distribution, Translation)
class CipheyDists(ResourceLoader):
# _wordlists: Set[str] = frozenset({"english", "english1000", "englishStopWords"})
# _brandons: Set[str] = frozenset({"english"})
# _dists: Set[str] = frozenset({"twist"})
# _translates: Set[str] = frozenset({"morse"})
_getters = {
"list": cipheydists.get_list,
"dist": cipheydists.get_dist,
"brandon": cipheydists.get_brandon,
"translate": cipheydists.get_translate,
}
def whatResources(self) -> Optional[Set[str]]:
pass
@lru_cache()
def getResource(self, name: str) -> Any:
loguru.logger.trace(f"Loading cipheydists resource {name}")
prefix, name = name.split("::", 1)
return self._getters[prefix](name)
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
| 26.630435
| 86
| 0.653878
|
e53c69c4c869faf44d91c6dda380d2c56fcd4237
| 11,367
|
py
|
Python
|
familydoctor/DUrlConsumer.py
|
i2it/XYWYCrawler
|
01d848f77690bfafff1fdc28efb09b6c0e9e11bc
|
[
"MIT"
] | 1
|
2020-04-11T04:20:33.000Z
|
2020-04-11T04:20:33.000Z
|
familydoctor/DUrlConsumer.py
|
i2it/XYWYCrawler
|
01d848f77690bfafff1fdc28efb09b6c0e9e11bc
|
[
"MIT"
] | null | null | null |
familydoctor/DUrlConsumer.py
|
i2it/XYWYCrawler
|
01d848f77690bfafff1fdc28efb09b6c0e9e11bc
|
[
"MIT"
] | 1
|
2020-04-11T03:55:01.000Z
|
2020-04-11T03:55:01.000Z
|
#! /usr/bin/python
import traceback
from datetime import datetime
import gevent
from gevent import monkey
from common.DBHandler import MySQL
from common.DBHandler import Redis
from common.IOHandler import FileIO
from common.IOHandler import NetworkIO
monkey.patch_all()
def getAllInfo(dbkey):
urlPool = Redis().listUrls(dbkey, 1)
# urlPool = ['http://club.xywy.com/familyDoctor/pay/86054846', 'http://club.xywy.com/familyDoctor/pay/43983196',
# 'http://club.xywy.com/familyDoctor/pay/28476935']
while 1:
if len(urlPool) > 0:
for url in urlPool:
getInfo(url)
getInfo2(url + '?info=1&page=1#name2')
getInfo3(url + '?info=1&page=1#name2')
getInfo4(url + '?info=2&page=1#name3')
urlPool = Redis().listUrls(dbkey, 1)
else:
break
def getInfo(url):
# http://club.xywy.com/familyDoctor/pay/43983196 对应的页面信息
try:
html = NetworkIO().requestHtml(url)
if html is not None:
# 医生姓名
doctorName = html.findtext('.//i[@class="fwei fl"]')
doctorName = doctorName[:-6] if doctorName is not None and len(doctorName) > 6 else None
# 医生职称和医院科室
doctorRankAndHosp = html.find('.//div[@class=" lh200 pt10 f14"]')
doctorRank = doctorRankAndHosp.text
doctorHosp = doctorRankAndHosp.find('./br')
# 获取医生的勋章
medalsBlock = html.findall('.//div[@class="HomePth"]/span')
medals = ''
for medal in medalsBlock:
medals += medal.get('data-th') + ','
# 医生的寄语
sendWord = html.find('.//div[@class="f12 graydeep club_home_icon HomePj"]/span').tail
# 医生的服务类型
serviceTypeBlock = {0: html.find('.//div[@class="fl pr"]'), 1: None}
if serviceTypeBlock[0] is None:
serviceTypeBlock[1] = html.find('.//div[@class="fl f14"]')
serviceTypes = {0: '', 1: ''}
oldServiceTypes = {0: '', 1: ''}
if serviceTypeBlock[0] is not None:
serviceTypeBlock2 = serviceTypeBlock[0].findall('.//a[@cate]')
for index, item in enumerate(serviceTypeBlock2):
for text in item.itertext():
serviceTypes[index] += text.strip()
elif serviceTypeBlock[1] is not None:
# 各服务原始价格
serviceTypeBlock2 = serviceTypeBlock[1].findall('.//a[@cate]')
for index, item in enumerate(serviceTypeBlock2):
for text in item.itertext():
serviceTypes[index] += text.strip()
serviceTypeBlock2 = serviceTypeBlock[1].findall('.//span[@class="f14 col99 ml10"]')
for index, item in enumerate(serviceTypeBlock2):
for text in item.itertext():
oldServiceTypes[index] += text.strip()
# 用户评分(放到用户评价界面抓取)
# evaluateScore = html.findtext('.//span[@class="fl colClass01 fwei"]')
# 签约家庭和帮助用户
helpedInfo = {0: None, 1: None}
helpedInfoBlock = html.findall('.//span[@class="fb f16 ml5"]')
for index, item in enumerate(helpedInfoBlock):
helpedInfo[index] = item.text
# 擅长、简介以及荣誉
infos = {0: '', 1: '', 2: ''}
infoBlock = html.findall('.//div[@class="HomeJie f14 fwei pt20"]')
for item in infoBlock:
tmp = item.findtext('./h4')
textblock = item.find('./div')
tmptext = ''
for text in textblock.itertext():
tmptext += text.strip()
if '擅长' in tmp:
infos[0] = tmptext
elif '简介' in tmp:
infos[1] = tmptext
elif '荣誉' in tmp:
infos[2] = tmptext
dbInfo = (url, doctorName, doctorRank, doctorHosp.tail, medals, sendWord, serviceTypes[0], serviceTypes[1],
oldServiceTypes[0], oldServiceTypes[1], helpedInfo[0], helpedInfo[1], infos[0], infos[1],
infos[2])
MySQL().saveDoctorInfo(dbInfo)
except:
doExpt('url1', url, 'url1')
def getInfo2(url):
# http://club.xywy.com/familyDoctor/pay/43983196?info=1&page=2#name2 对应页面总的用户评价相关信息
try:
html = NetworkIO().requestHtml(url)
if html is not None:
evaluateScore = html.findtext('.//h4[@class="f30 colClass01 fWei tc"]').strip()
evaluateStat = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0}
evaluateStatBlock = html.findall('.//div[@class="HomSptop_Ri fWei f14 mt20 fl"]/span')
for index, item in enumerate(evaluateStatBlock):
tmptext = item.text
evaluateStat[index] = 0 if len(tmptext) == 0 else int(tmptext[tmptext.find('(') + 1:tmptext.find(')')])
dbInfo = (url, evaluateScore, evaluateStat[0], evaluateStat[1], evaluateStat[2], evaluateStat[3],
evaluateStat[4], evaluateStat[5], evaluateStat[6], evaluateStat[7])
MySQL().saveDoctorEvaluation(dbInfo)
except:
doExpt('url2', url, 'url2')
def getInfo3(url):
# http://club.xywy.com/familyDoctor/pay/43983196?info=1&page=2#name2 对应的用户评价具体内容
try:
# 当第一次访问页面时,除了获取评论信息,也要获取全部的评论页的总数
html = NetworkIO().requestHtml(url)
if html is not None:
evaluateBlock = html.findall('.//div[@class="User_eval lh180 btn-a f14 fwei mt10"]')
for index, block in enumerate(evaluateBlock):
uName = block.findtext('.//span[@class="mr10 fl"]').strip()
evalAtti = block.findtext('.//span[@class="fl colbd mr10"]').strip()
evalScore = block.findtext('.//span[@class="colClass01 fl"]').strip()
evalText = block.findtext('.//div[@class="pt5"]').strip()
evalTime = block.findtext('.//span[@class="colbd f12 db pt10"]').strip()
dbInfo = (url + '#' + str(index), uName, evalAtti, evalScore, evalText,
datetime.strptime(evalTime, '%Y-%m-%d %H:%M:%S'))
MySQL().saveDoctorEvaluationText(dbInfo)
# 评价共有多少页
totalPageInfo = html.find('.//div[@class="mt20 HomeFen f14"]/span[@class="mr5"]')
totalPageInfo = 1 if totalPageInfo is None else totalPageInfo.text.strip()[1:-3]
# 目前评价页的索引值
tmpIndex = url.find('page=') + 5
currentPageIndex = url[tmpIndex:-6]
# 获取当前页以后的评论页的评论信息
if int(currentPageIndex) < int(totalPageInfo):
for pageIndex in range(int(currentPageIndex) + 1, int(totalPageInfo) + 1):
url = url[:int(tmpIndex)] + str(pageIndex) + '#name2'
html = NetworkIO().requestHtml(url)
if html is not None:
evaluateBlock = html.findall('.//div[@class="User_eval lh180 btn-a f14 fwei mt10"]')
for index, block in enumerate(evaluateBlock):
uName = block.findtext('.//span[@class="mr10 fl"]').strip()
evalAtti = block.findtext('.//span[@class="fl colbd mr10"]').strip()
evalScore = block.findtext('.//span[@class="colClass01 fl"]').strip()
evalText = block.findtext('.//div[@class="pt5"]').strip()
evalTime = block.findtext('.//span[@class="colbd f12 db pt10"]').strip()
dbInfo = (url + '#' + str(index), uName, evalAtti, evalScore, evalText,
datetime.strptime(evalTime, '%Y-%m-%d %H:%M:%S'))
MySQL().saveDoctorEvaluationText(dbInfo)
except:
doExpt('url3', url, 'url3')
def getInfo4(url):
# http://club.xywy.com/familyDoctor/pay/43983196?info=2&page=2#name3 对应的服务购买信息
try:
html = NetworkIO().requestHtml(url)
if html is not None:
serviceBuyBlock = html.findall('.//div[@class="HomBone fwei f14"]')
for index, block in enumerate(serviceBuyBlock):
uName = block.findtext('.//span[@class="w100"]').strip()
serviceType = 1 if '包月' in block.findtext('.//span[@class="w200 tl"]').strip() else 0
serviceCount = block.findtext('.//span[@class="w60 tc"]').strip()
servicePrice = block.findtext('.//span[@class="colClass01 fb w80 tc"]').strip()
serviceStatus = block.findtext('.//span[@class="club_home_icon HomBsuc"]').strip()
serviceTime = block.findtext('.//span[@class="col99 ml20 tc"]').strip()
dbInfo = (url + '#' + str(index), uName, serviceType, serviceCount, servicePrice, serviceStatus,
serviceTime)
MySQL().saveServiceInfo(dbInfo)
# 评价共有多少页
totalPageInfo = html.find('.//div[@class="mt20 HomeFen f14"]/span[@class="mr5"]')
totalPageInfo = 1 if totalPageInfo is None else totalPageInfo.text.strip()[1:-3]
# 目前评价页的索引值
tmpIndex = url.find('page=') + 5
currentPageIndex = url[tmpIndex:-6]
# 获取当前页以后的评论页的评论信息
if int(currentPageIndex) < int(totalPageInfo):
for pageIndex in range(int(currentPageIndex) + 1, int(totalPageInfo) + 1):
url = url[:int(tmpIndex)] + str(pageIndex) + '#name3'
html = NetworkIO().requestHtml(url)
if html is not None:
serviceBuyBlock = html.findall('.//div[@class="HomBone fwei f14"]')
for index, block in enumerate(serviceBuyBlock):
uName = block.findtext('.//span[@class="w100"]').strip()
serviceType = 1 if '包月' in block.findtext('.//span[@class="w200 tl"]').strip() else 0
serviceCount = block.findtext('.//span[@class="w60 tc"]').strip()
servicePrice = block.findtext('.//span[@class="colClass01 fb w80 tc"]').strip()
serviceStatus = block.findtext('.//span[@class="club_home_icon HomBsuc"]').strip()
serviceTime = block.findtext('.//span[@class="col99 ml20 tc"]').strip()
dbInfo = (url + '#' + str(index), uName, serviceType, serviceCount, servicePrice,
serviceStatus, serviceTime)
MySQL().saveServiceInfo(dbInfo)
except:
doExpt('url4', url, 'url4')
def doExpt(key, url, logIdentifier):
Redis().saveUrl(key, url)
FileIO.handleExpt(traceback.format_exc(), url, logIdentifier)
if __name__ == '__main__':
dbkey = input('请输入医生url列表名称:')
# threadList = []
# for i in range(5):
# tmpThread = threading.Thread(target=getQPageInfo, args=(tmpYear, None if tmpPwd == '' else tmpPwd))
# threadList.append(tmpThread)
# for tmpThread in threadList:
# tmpThread.start()
# for tmpThread in threadList:
# tmpThread.join()
jobs = []
for i in range(5):
jobs.append(gevent.spawn(getAllInfo, dbkey.strip()))
gevent.joinall(jobs)
| 50.52
| 119
| 0.544295
|
5960088035b5df4aefdc1abf2b6dd9894a0c53be
| 5,978
|
py
|
Python
|
estimators.py
|
RakitinDen/pytorch-recursive-gumbel-max-trick
|
44f9854020e727946a074a6e53b20dd593f96cc1
|
[
"Apache-2.0"
] | 20
|
2021-12-03T13:20:17.000Z
|
2022-03-20T18:58:06.000Z
|
estimators.py
|
RakitinDen/pytorch-recursive-gumbel-max-trick
|
44f9854020e727946a074a6e53b20dd593f96cc1
|
[
"Apache-2.0"
] | null | null | null |
estimators.py
|
RakitinDen/pytorch-recursive-gumbel-max-trick
|
44f9854020e727946a074a6e53b20dd593f96cc1
|
[
"Apache-2.0"
] | null | null | null |
# Estimators are partially based on the "estimators.py" from the following repositories:
# https://github.com/agadetsky/pytorch-pl-variance-reduction
# https://github.com/sdrobert/pydrobert-pytorch
import torch
def uniform_to_exp(logits, uniform=None, enable_grad=False):
'''
Converts a tensor of independent uniform samples into a tensor of independent exponential samples
Tensor 'logits' contains log-means of the exponential distributions
Parameters of the exponentials can be represented as
lambda = exp(-logit), since expected value is equal to 1/lambda
'''
if uniform is not None:
assert uniform.size() == logits.size()
else:
uniform = torch.distributions.utils.clamp_probs(torch.rand_like(logits))
exp = torch.exp(logits + torch.log(-torch.log(uniform)))
if enable_grad:
exp.requires_grad_(True)
return exp
def reattach_exp_to_new_logits(logits, exp):
'''
Creates a new tensor of exponential variables that depends on logits in the same way
as if it was obtained by transforming uniform samples via 'uniform_to_exp'
Used in 'relax' to obtain gradient for the detached version of the logits
'''
exp = torch.exp(torch.log(exp.detach()) + logits - logits.detach())
return exp
def E_reinforce(loss_value, logits, exp, plus_samples=1, mask_unused_values=None, **kwargs):
'''
Returns the REINFORCE [williams1992] gradient estimate with respect to the exponential score
grad = loss(X) * (d / d logits) log p(E ; logits)
If plus_samples > 1, the estimate is E-REINFORCE+ / E-REINFORCE with LOO baseline [kool2019buy, richter2020vargrad]
'''
batch_size = logits.shape[0] // plus_samples
loss_value = loss_value.detach()
exp = exp.detach()
log_prob = -logits - torch.exp(torch.log(exp) - logits)
if mask_unused_values is not None:
log_prob = mask_unused_values(log_prob, **kwargs)
dims_except_batch = tuple(-i for i in range(1, logits.ndimension()))
log_prob = log_prob.sum(dim=dims_except_batch)
score = torch.autograd.grad([log_prob], [logits], grad_outputs=torch.ones_like(log_prob))[0]
if plus_samples > 1:
score_shape = (batch_size, plus_samples) + logits.shape[1:]
score = score.view(score_shape)
loss_value = loss_value.view(batch_size, plus_samples)
loss_value = loss_value - loss_value.mean(dim=-1)[:, None]
for i in range(logits.ndimension() - 1):
loss_value = loss_value.unsqueeze(-1)
grad = (loss_value * score).sum(dim=1) / (plus_samples - 1)
else:
for i in range(logits.ndimension() - 1):
loss_value = loss_value.unsqueeze(-1)
grad = loss_value * score
return grad
def T_reinforce(loss_value, struct_var, logits, f_log_prob, plus_samples=1, **kwargs):
'''
Returns the REINFORCE [williams1992] gradient estimate with respect to the score function of the execution trace
grad = loss(X) * (d / d logits) log p(T ; logits)
If plus_samples > 1, the estimate is T-REINFORCE+ / T-REINFORCE with LOO baseline [kool2019buy, richter2020vargrad]
'''
batch_size = logits.shape[0] // plus_samples
loss_value = loss_value.detach()
struct_var = struct_var.detach()
log_prob = f_log_prob(struct_var, logits, **kwargs)
score = torch.autograd.grad([log_prob], [logits], grad_outputs=torch.ones_like(log_prob))[0]
if plus_samples > 1:
score_shape = (batch_size, plus_samples) + logits.shape[1:]
score = score.view(score_shape)
loss_value = loss_value.view(batch_size, plus_samples)
loss_value = loss_value - loss_value.mean(dim=-1)[:, None]
for i in range(logits.ndimension() - 1):
loss_value = loss_value.unsqueeze(-1)
grad = (loss_value * score).sum(dim=1) / (plus_samples - 1)
else:
for i in range(logits.ndimension() - 1):
loss_value = loss_value.unsqueeze(-1)
grad = loss_value * score
return grad
def relax(loss_value, struct_var, logits, exp, critic, f_log_prob, f_cond, uniform=None, **kwargs):
'''
Returns the RELAX [grathwohl2017backpropagation] gradient estimate
grad = (loss(X(T)) - c(e_2)) * (d / d logits) log p(T ; logits) - (d / d logits) c(e_2) + (d / d logits) c(e_1)
e_1 ~ p(E ; logits) - exponential sample
T = T(e_1) - execution trace of the algorithm
X = X(T) - structured variable, obtained as the output of the algorithm
e_2 ~ p(E | T ; logits) - conditional exponential sample
c(.) - critic (typically, a neural network)
e_1 and e_2 are sampled using the reparameterization trick
(d / d logits) c(e_1) and (d / d logits) c(e_2) are the reparameterization gradients
In code, exp := e_1, cond_exp := e_2
'''
loss_value = loss_value.detach()
struct_var = struct_var.detach()
logits = logits.detach().requires_grad_(True)
exp = reattach_exp_to_new_logits(logits, exp)
cond_exp = f_cond(struct_var, logits, uniform, **kwargs)
baseline_exp = critic(exp)
baseline_cond = critic(cond_exp).squeeze()
diff = loss_value - baseline_cond
log_prob = f_log_prob(struct_var, logits, **kwargs)
score, = torch.autograd.grad(
[log_prob],
[logits],
grad_outputs = torch.ones_like(log_prob)
)
d_baseline_exp, = torch.autograd.grad(
[baseline_exp],
[logits],
create_graph=True,
retain_graph=True,
grad_outputs=torch.ones_like(baseline_exp)
)
d_baseline_cond, = torch.autograd.grad(
[baseline_cond],
[logits],
create_graph=True,
retain_graph=True,
grad_outputs=torch.ones_like(baseline_cond)
)
for i in range(logits.ndimension() - 1):
diff = diff.unsqueeze(-1)
grad = diff * score + d_baseline_exp - d_baseline_cond
assert grad.size() == logits.size()
return grad
| 36.674847
| 119
| 0.666109
|
876ea23897293976ed6d4c89dc0ac3f3913fd93a
| 1,155
|
py
|
Python
|
medium/python/c0090_173_binary-search-tree-iterator/00_leetcode_0090.py
|
drunkwater/leetcode
|
8cc4a07763e71efbaedb523015f0c1eff2927f60
|
[
"Ruby"
] | null | null | null |
medium/python/c0090_173_binary-search-tree-iterator/00_leetcode_0090.py
|
drunkwater/leetcode
|
8cc4a07763e71efbaedb523015f0c1eff2927f60
|
[
"Ruby"
] | null | null | null |
medium/python/c0090_173_binary-search-tree-iterator/00_leetcode_0090.py
|
drunkwater/leetcode
|
8cc4a07763e71efbaedb523015f0c1eff2927f60
|
[
"Ruby"
] | 3
|
2018-02-09T02:46:48.000Z
|
2021-02-20T08:32:03.000Z
|
# DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#173. Binary Search Tree Iterator
#Implement an iterator over a binary search tree (BST). Your iterator will be initialized with the root node of a BST.
#Calling next() will return the next smallest number in the BST.
#Note: next() and hasNext() should run in average O(1) time and uses O(h) memory, where h is the height of the tree.
#Credits:
#Special thanks to @ts for adding this problem and creating all test cases.
## Definition for a binary tree node
## class TreeNode(object):
## def __init__(self, x):
## self.val = x
## self.left = None
## self.right = None
#class BSTIterator(object):
# def __init__(self, root):
# """
# :type root: TreeNode
# """
# def hasNext(self):
# """
# :rtype: bool
# """
# def next(self):
# """
# :rtype: int
# """
## Your BSTIterator will be called like this:
## i, v = BSTIterator(root), []
## while i.hasNext(): v.append(i.next())
# Time Is Money
| 33
| 118
| 0.641558
|
b0bff1143e8fc07382e4e4d809b36b296a3f453c
| 240
|
py
|
Python
|
Python/flipping-an-image.py
|
kuanhungchen/leetcode-practice
|
b75e773ada60b685da1576ae5f2234b70bc27842
|
[
"CNRI-Python"
] | 1
|
2020-04-29T06:19:44.000Z
|
2020-04-29T06:19:44.000Z
|
Python/flipping-an-image.py
|
kuanhungchen/leetcode-practice
|
b75e773ada60b685da1576ae5f2234b70bc27842
|
[
"CNRI-Python"
] | null | null | null |
Python/flipping-an-image.py
|
kuanhungchen/leetcode-practice
|
b75e773ada60b685da1576ae5f2234b70bc27842
|
[
"CNRI-Python"
] | null | null | null |
class Solution:
def flipAndInvertImage(self, A):
A = [A[i][::-1] for i in range(len(A))]
for i in range(len(A)):
for j in range(len(A[0])):
A[i][j] = 1 if A[i][j] == 0 else 0
return A
| 30
| 50
| 0.4625
|
1cb593459c122137ee104ce6867fdfaa0bba92a1
| 9,448
|
py
|
Python
|
baseStation/src/context/remote.py
|
olgam4/design3
|
6e05d123a24deae7dda646df535844a158ef5cc0
|
[
"WTFPL"
] | null | null | null |
baseStation/src/context/remote.py
|
olgam4/design3
|
6e05d123a24deae7dda646df535844a158ef5cc0
|
[
"WTFPL"
] | null | null | null |
baseStation/src/context/remote.py
|
olgam4/design3
|
6e05d123a24deae7dda646df535844a158ef5cc0
|
[
"WTFPL"
] | null | null | null |
import tkinter as tk
from tkinter import ttk
from application.domain.application import Application
from application.infrastructure.applicationWorker import ApplicationWorker
from application.infrastructure.chargeStationWatcher import ChargeStationWatcher
from application.infrastructure.robotWatcher import RobotWatcher
from application.infrastructure.tkinterUi import TkinterUi
from communication.infrastructure.socketRobotConnector import SocketRobotConnector
from communication.service.communicationService import CommunicationService
from light.service.lightService import LightService
from objective.service.objectiveService import ObjectiveService
from pathDrawing.infrastructure.openCvDrawer import OpenCvDrawer
from pathDrawing.service.pathDrawingService import PathDrawingService
from pathfinding.domain.angle import Angle
from pathfinding.domain.table import Table
from pathfinding.infrastructure.adaptivePathableCatalog import AdaptivePathableCatalog
from pathfinding.infrastructure.approachPositionFinder import ApproachPositionFinder
from pathfinding.infrastructure.grassfirePathfinderFactory import GrassfirePathfinderFactory
from pathfinding.infrastructure.pathableCatalog import PathableCatalog
from pathfinding.service.pathService import PathService
from pathfinding.service.pathfindingService import PathfindingService
from pathfinding.service.positionService import PositionService
from prehensor.infrastructure.chargeStation import ChargeStation
from prehensor.service.prehensorService import PrehensorService
from remote.infrastructure.remoteControl import RemoteControl
from remote.service.remoteService import RemoteService
from timer.infrastructure.pythonChronometer import PythonChronometer
from timer.service.timeService import TimeService
from ui.domain.directionalControl import DirectionalControl
from ui.domain.indicator.charge import Charge
from ui.domain.indicator.indicators import Indicators
from ui.domain.indicator.light import Light
from ui.domain.indicator.objective import Objective
from ui.domain.indicator.timer import Timer
from ui.domain.onBoardCamera import OnBoardCamera
from ui.domain.subroutine.championshipSubroutine import ChampionshipSubroutine
from ui.domain.subroutine.chargeSubroutine import ChargeSubroutine
from ui.domain.subroutine.dropSubroutine import DropSubroutine
from ui.domain.subroutine.goHomeSubroutine import GoHomeSubroutine
from ui.domain.subroutine.grabSubroutine import GrabSubroutine
from ui.domain.subroutine.magnetSubroutine import MagnetSubroutine
from ui.domain.subroutine.readQrSubroutine import ReadQrSubroutine
from ui.domain.subroutine.sightSubroutine import SightSubroutine
from ui.domain.subroutine.subroutines import Subroutines
from ui.domain.subroutine.updateDirectionsSubroutine import UpdateDirectionsSubroutine
from ui.domain.subroutine.winSubroutine import WinSubroutine
from ui.domain.worldCameraSelector import WorldCameraSelector
from ui.infrastructure.remoteMainView import RemoteMainView
from ui.infrastructure.remoteSubroutineRunner import RemoteSubroutineRunner
from vision.infrastructure.cvCameraCalibrationFactory import CvCameraCalibrationFactory
from vision.infrastructure.cvCameraFactory import CvCameraFactory
from vision.infrastructure.cvGoalFinder import CvGoalFinder
from vision.infrastructure.cvImageDrawer import CvImageDrawer
from vision.infrastructure.cvObstacleFinder import CvObstacleFinder
from vision.infrastructure.cvPlayAreaFinder import CvPlayAreaFinder
from vision.infrastructure.cvRobotFinder import CvRobotFinder
from vision.infrastructure.cvSourceFinder import CvSourceFinder
from vision.service.robotCameraService import RobotCameraService
from vision.service.visionService import VisionService
class Remote:
def __init__(self, port: int, address: str, timeout: float) -> None:
self._play_area_finder = CvPlayAreaFinder()
self._vision_service = VisionService(CvCameraFactory(), CvCameraCalibrationFactory(self._play_area_finder),
CvImageDrawer(), self._play_area_finder, CvGoalFinder(),
CvSourceFinder(), CvObstacleFinder(), CvRobotFinder())
self._time_service = TimeService(PythonChronometer())
self._robot_camera_service = RobotCameraService()
self._robot_connector = SocketRobotConnector(port, address, timeout)
self._communication_service = CommunicationService(self._robot_connector)
self._position_service = PositionService(self._vision_service, self._communication_service)
self._light_service = LightService()
self._objective_service = ObjectiveService()
self._remote_control = RemoteControl(self._communication_service)
self._remote_service = RemoteService(self._remote_control)
self._subroutine_runner = RemoteSubroutineRunner(self._remote_service)
pathfinder_factory = GrassfirePathfinderFactory(Table(111, 231, -1, 27, Angle(0), 15))
self._pathable_catalog = PathableCatalog(self._vision_service, pathfinder_factory, ApproachPositionFinder())
self._pathfinding_service = PathfindingService(self._pathable_catalog)
self._path_service = PathService()
self._path_drawing_service = PathDrawingService(OpenCvDrawer(), self._vision_service, self._path_service)
self._prehensor_service = PrehensorService()
def application(self) -> Application:
tkinter_ui = self._make_tkinter_ui()
robot_watcher = self._make_robot_watcher()
application_worker = self._make_application_worker()
charge_station = ChargeStation()
charge_station_watcher = ChargeStationWatcher(self._communication_service, charge_station)
return Application(tkinter_ui, robot_watcher, application_worker, charge_station_watcher)
def _make_tkinter_ui(self) -> TkinterUi:
return TkinterUi(RemoteMainView(None, self._make_subroutines, self._make_world_camera_selector,
self._make_on_board_camera, self._make_directional_control,
self._make_indicators))
def _make_robot_watcher(self) -> RobotWatcher:
return RobotWatcher(self._communication_service, self._light_service, self._remote_control,
self._robot_camera_service, self._pathfinding_service, self._position_service,
self._path_service, self._prehensor_service, self._objective_service)
def _make_application_worker(self) -> ApplicationWorker:
return ApplicationWorker(self._vision_service, self._time_service)
def _make_subroutines(self, parent: ttk.Frame) -> Subroutines:
subroutines = Subroutines(parent)
subroutines.add(ChargeSubroutine(subroutines, self._subroutine_runner),
text="charge", sticky=tk.N + tk.W + tk.E + tk.S)
subroutines.add(ReadQrSubroutine(subroutines, self._subroutine_runner, self._make_objective),
text="qrCode", sticky=tk.N + tk.W + tk.E + tk.S)
subroutines.add(GrabSubroutine(subroutines, self._subroutine_runner),
text="grab", sticky=tk.N + tk.W + tk.E + tk.S)
subroutines.add(DropSubroutine(subroutines, self._subroutine_runner),
text="drop", sticky=tk.N + tk.W + tk.E + tk.S)
subroutines.add(GoHomeSubroutine(subroutines, self._subroutine_runner),
text="goHome", sticky=tk.N + tk.W + tk.E + tk.S)
subroutines.add(WinSubroutine(subroutines, self._subroutine_runner, self._make_light),
text="win!", sticky=tk.N + tk.W + tk.E + tk.S)
subroutines.add(MagnetSubroutine(subroutines, self._subroutine_runner),
text="magnet", sticky=tk.N + tk.W + tk.E + tk.S)
subroutines.add(UpdateDirectionsSubroutine(subroutines, self._subroutine_runner),
text="direction", sticky=tk.N + tk.W + tk.E + tk.S)
subroutines.add(SightSubroutine(subroutines, self._subroutine_runner),
text="sight", sticky=tk.N + tk.W + tk.E + tk.S)
subroutines.add(ChampionshipSubroutine(subroutines, self._subroutine_runner, self._time_service),
text="championship", sticky=tk.N + tk.W + tk.E + tk.S)
return subroutines
def _make_indicators(self, parent: ttk.Frame) -> Indicators:
return Indicators(parent, self._make_objective, self._make_light, self._make_charge, self._make_timer)
def _make_directional_control(self, parent: ttk.Frame) -> DirectionalControl:
return DirectionalControl(parent, self._subroutine_runner)
def _make_on_board_camera(self, parent: ttk.Frame) -> OnBoardCamera:
return OnBoardCamera(parent, self._robot_camera_service)
def _make_objective(self, parent: ttk.Frame) -> Objective:
return Objective(parent, self._objective_service)
def _make_light(self, parent: ttk.Frame) -> Light:
return Light(parent, self._light_service)
def _make_charge(self, parent: ttk.Frame) -> Charge:
return Charge(parent, self._prehensor_service)
def _make_timer(self, parent: ttk.Frame) -> Timer:
return Timer(parent, self._time_service)
def _make_world_camera_selector(self, parent: ttk.Frame) -> WorldCameraSelector:
return WorldCameraSelector(parent, self._vision_service, self._path_drawing_service)
| 61.350649
| 116
| 0.771804
|
7ad76b4f807d84cf8f6796168fc87a7ecbe4fd01
| 2,890
|
py
|
Python
|
src/function_approx.py
|
AI-Gio/LunarLander
|
867425223a186bc0dd3378e3bd2888cd525d2834
|
[
"MIT"
] | null | null | null |
src/function_approx.py
|
AI-Gio/LunarLander
|
867425223a186bc0dd3378e3bd2888cd525d2834
|
[
"MIT"
] | null | null | null |
src/function_approx.py
|
AI-Gio/LunarLander
|
867425223a186bc0dd3378e3bd2888cd525d2834
|
[
"MIT"
] | null | null | null |
import tensorflow.compat.v1 as tf
from keras.models import Sequential
from keras.layers import Dense
import numpy as np
import warnings
tf.disable_v2_behavior()
warnings.filterwarnings("ignore", category=UserWarning)
class FunctionApprox:
"""
The network class of the simulation. Used as to create a policy network instance and a
target network instance. Contains an artificial neural network from the keras library.
The output of the neural network is a list of q-values representative of each possible action.
"""
def __init__(self, learning_rate: float):
# Create model
self.model = Sequential()
# Add layers
self.model.add(Dense(input_dim=8, units=1))
self.model.add(Dense(32, name="1"))
self.model.add(Dense(64, name="2"))
self.model.add(Dense(4, name="Output"))
# Make Adam Optimizer
adam = tf.keras.optimizers.Adam(learning_rate=learning_rate, name="Adam")
loss = tf.keras.losses.MeanSquaredError(name="mean_squared_error")
# Compile model
self.model.compile(optimizer=adam, loss=loss)
def q_values(self, states: list) -> np.array:
"""
Feeds list of states in model to predict and gives a list of
Q-values, one for each action, in return.
:param states:
:return: np.array() of predictions
"""
predictions = self.model.predict(np.array(states))
return predictions
def save_network(self, filename):
"""
Save the model at [filename]
"""
self.model.save(f"{filename}.h5")
def load_network(self, filename):
"""
Load model by overwriting current model with keras.model object
"""
self.model = tf.keras.models.load_model(f"{filename}.h5")
def train(self, x, y, batch_size, epochs: int, verbose: bool):
"""
Train the model with the given params
:param x: set with train data
:param y: set with labels
:param batch_size: how much of the data should be used to train on
:param epochs: how many epochs the model runs
:param verbose: show training progress bar or not
"""
self.model.fit(x=x, y=y, batch_size=batch_size, epochs=epochs, verbose=verbose)
def set_weights(self, weights: np.array):
"""
Set the weights of the model layer by layer
:param weights: all weights of model
"""
layers = self.model.layers
for i, lw in enumerate(layers):
lw.set_weights(weights[i])
def get_weights(self):
"""
Get all weights from each layers in model
:return: 2d numpy matrix containing all the weights
"""
layers = []
for layer in self.model.layers:
layers.append(layer.get_weights())
return np.array(layers, dtype=object)
| 34
| 98
| 0.634602
|
1fe662bc730b692cd872c696bb2f433c2dc69d6a
| 1,791
|
py
|
Python
|
CondTools/Ecal/python/copyFgrGroup_cfg.py
|
NTrevisani/cmssw
|
a212a27526f34eb9507cf8b875c93896e6544781
|
[
"Apache-2.0"
] | 3
|
2018-08-24T19:10:26.000Z
|
2019-02-19T11:45:32.000Z
|
CondTools/Ecal/python/copyFgrGroup_cfg.py
|
NTrevisani/cmssw
|
a212a27526f34eb9507cf8b875c93896e6544781
|
[
"Apache-2.0"
] | 7
|
2016-07-17T02:34:54.000Z
|
2019-08-13T07:58:37.000Z
|
CondTools/Ecal/python/copyFgrGroup_cfg.py
|
NTrevisani/cmssw
|
a212a27526f34eb9507cf8b875c93896e6544781
|
[
"Apache-2.0"
] | 5
|
2018-08-21T16:37:52.000Z
|
2020-01-09T13:33:17.000Z
|
import FWCore.ParameterSet.Config as cms
import CondTools.Ecal.conddb_init as conddb_init
import CondTools.Ecal.db_credentials as auth
process = cms.Process("ProcessOne")
process.MessageLogger = cms.Service("MessageLogger",
debugModules = cms.untracked.vstring('*'),
cout = cms.untracked.PSet(
threshold = cms.untracked.string('DEBUG')
),
destinations = cms.untracked.vstring('cout')
)
process.source = cms.Source("EmptyIOVSource",
lastValue = cms.uint64(2000000),
timetype = cms.string('runnumber'),
firstValue = cms.uint64(2000000),
interval = cms.uint64(1)
)
process.load("CondCore.CondDB.CondDB_cfi")
process.CondDB.connect = conddb_init.options.destinationDatabase
process.CondDB.DBParameters.authenticationPath = ''
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
process.CondDB,
toPut = cms.VPSet(cms.PSet(
record = cms.string('EcalTPGFineGrainEBGroupRcd'),
tag = cms.string(conddb_init.options.destinationTag)
))
)
db_service,db_user,db_pwd = auth.get_readOnly_db_credentials()
process.Test1 = cms.EDAnalyzer("ExTestEcalTPGFineGrainEBGroupAnalyzer",
record = cms.string('EcalTPGFineGrainEBGroupRcd'),
loggingOn= cms.untracked.bool(True),
IsDestDbCheckedInQueryLog=cms.untracked.bool(True),
SinceAppendMode=cms.bool(True),
Source=cms.PSet(
firstRun = cms.string('200000'),
lastRun = cms.string('10000000'),
OnlineDBSID = cms.string(db_service),
OnlineDBUser = cms.string(db_user),
OnlineDBPassword = cms.string( db_pwd ),
LocationSource = cms.string('P5'),
Location = cms.string('P5_Co'),
GenTag = cms.string('GLOBAL'),
RunType = cms.string('PHYSICS')
)
)
process.p = cms.Path(process.Test1)
| 31.982143
| 71
| 0.707984
|
819ebcfae70128c9cf852d04ce102d22fb6d1b7a
| 473
|
py
|
Python
|
Lib/site-packages/plotly/validators/scatter/_uid.py
|
tytanya/my-first-blog
|
2b40adb0816c3546e90ad6ca1e7fb50d924c1536
|
[
"bzip2-1.0.6"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/scatter/_uid.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 6
|
2021-03-18T22:27:08.000Z
|
2022-03-11T23:40:50.000Z
|
plotly/validators/scatter/_uid.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class UidValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name='uid', parent_name='scatter', **kwargs):
super(UidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop('anim', True),
edit_type=kwargs.pop('edit_type', 'plot'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| 31.533333
| 75
| 0.627907
|
fa2b9bceb9806774fde46881674df156f4a7885c
| 587
|
py
|
Python
|
rowingdata/painsled_desktop_plot.py
|
sanderroosendaal/rowingdata
|
efd8aa1566a926f11fb3f6b5b340665bc26028c4
|
[
"MIT"
] | 4
|
2017-04-24T15:20:46.000Z
|
2021-02-12T23:03:29.000Z
|
build/lib/rowingdata/painsled_desktop_plot.py
|
sanderroosendaal/rowingdata
|
efd8aa1566a926f11fb3f6b5b340665bc26028c4
|
[
"MIT"
] | 38
|
2016-11-02T07:57:50.000Z
|
2022-01-22T13:25:14.000Z
|
build/lib/rowingdata/painsled_desktop_plot.py
|
sanderroosendaal/rowingdata
|
efd8aa1566a926f11fb3f6b5b340665bc26028c4
|
[
"MIT"
] | 6
|
2017-01-19T21:39:46.000Z
|
2021-11-16T14:48:58.000Z
|
#! /usr/bin/python
from __future__ import absolute_import
from __future__ import print_function
from . import rowingdata
from sys import argv
def main():
readFile=argv[1]
try:
rowerFile=argv[2]
except IndexError:
rowerFile="defaultrower.txt"
rower=rowingdata.getrower(rowerFile)
outfile=readFile+"_o.csv"
res=rowingdata.painsledDesktopParser(readFile)
res.write_csv(outfile)
row=rowingdata.rowingdata(outfile,rowtype="Indoor Rower",rower=rower)
row.plotmeters_erg()
print((row.allstats()))
print(("done "+readFile))
| 18.935484
| 73
| 0.708688
|
89289ced29c3cba555d674e90d5616a41995d742
| 24,861
|
py
|
Python
|
lib/galaxy/datatypes/text.py
|
uio-bmi/galaxy-graph-peak-caller
|
0e0e8e9bd6d461a4e25b49cea2e6753043f747e0
|
[
"CC-BY-3.0"
] | 2
|
2017-10-23T14:44:12.000Z
|
2018-01-14T10:37:28.000Z
|
lib/galaxy/datatypes/text.py
|
uio-bmi/galaxy-graph-peak-caller
|
0e0e8e9bd6d461a4e25b49cea2e6753043f747e0
|
[
"CC-BY-3.0"
] | 30
|
2016-10-20T15:35:12.000Z
|
2018-10-02T15:59:54.000Z
|
lib/galaxy/datatypes/text.py
|
uio-bmi/galaxy-graph-peak-caller
|
0e0e8e9bd6d461a4e25b49cea2e6753043f747e0
|
[
"CC-BY-3.0"
] | 4
|
2017-06-12T09:54:31.000Z
|
2019-03-15T12:02:39.000Z
|
# -*- coding: utf-8 -*-
""" Clearing house for generic text datatypes that are not XML or tabular.
"""
import gzip
import json
import logging
import os
import re
import subprocess
import tempfile
from six.moves import shlex_quote
from galaxy.datatypes.data import get_file_peek, Text
from galaxy.datatypes.metadata import MetadataElement, MetadataParameter
from galaxy.datatypes.sniff import iter_headers
from galaxy.util import nice_size, string_as_bool
log = logging.getLogger(__name__)
class Html(Text):
"""Class describing an html file"""
edam_format = "format_2331"
file_ext = "html"
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "HTML file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def get_mime(self):
"""Returns the mime type of the datatype"""
return 'text/html'
def sniff(self, filename):
"""
Determines whether the file is in html format
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'complete.bed' )
>>> Html().sniff( fname )
False
>>> fname = get_test_fname( 'file.html' )
>>> Html().sniff( fname )
True
"""
headers = iter_headers(filename, None)
for i, hdr in enumerate(headers):
if hdr and hdr[0].lower().find('<html>') >= 0:
return True
return False
class Json(Text):
edam_format = "format_3464"
file_ext = "json"
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = get_file_peek(dataset.file_name)
dataset.blurb = "JavaScript Object Notation (JSON)"
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disc'
def get_mime(self):
"""Returns the mime type of the datatype"""
return 'application/json'
def sniff(self, filename):
"""
Try to load the string with the json module. If successful it's a json file.
"""
return self._looks_like_json(filename)
def _looks_like_json(self, filename):
# Pattern used by SequenceSplitLocations
if os.path.getsize(filename) < 50000:
# If the file is small enough - don't guess just check.
try:
json.load(open(filename, "r"))
return True
except Exception:
return False
else:
with open(filename, "r") as fh:
while True:
# Grab first chunk of file and see if it looks like json.
start = fh.read(100).strip()
if start:
# simple types are valid JSON as well - but would such a file
# be interesting as JSON in Galaxy?
return start.startswith("[") or start.startswith("{")
return False
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "JSON file (%s)" % (nice_size(dataset.get_size()))
class Ipynb(Json):
file_ext = "ipynb"
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = get_file_peek(dataset.file_name)
dataset.blurb = "Jupyter Notebook"
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disc'
def sniff(self, filename):
"""
Try to load the string with the json module. If successful it's a json file.
"""
if self._looks_like_json(filename):
try:
ipynb = json.load(open(filename))
if ipynb.get('nbformat', False) is not False and ipynb.get('metadata', False):
return True
else:
return False
except Exception:
return False
def display_data(self, trans, dataset, preview=False, filename=None, to_ext=None, **kwd):
config = trans.app.config
trust = getattr(config, 'trust_jupyter_notebook_conversion', False)
if trust:
return self._display_data_trusted(trans, dataset, preview=preview, filename=filename, to_ext=to_ext, **kwd)
else:
return super(Ipynb, self).display_data(trans, dataset, preview=preview, filename=filename, to_ext=to_ext, **kwd)
def _display_data_trusted(self, trans, dataset, preview=False, filename=None, to_ext=None, **kwd):
preview = string_as_bool(preview)
if to_ext or not preview:
return self._serve_raw(trans, dataset, to_ext, **kwd)
else:
with tempfile.NamedTemporaryFile(delete=False) as ofile_handle:
ofilename = ofile_handle.name
try:
cmd = ['jupyter', 'nbconvert', '--to', 'html', '--template', 'full', dataset.file_name, '--output', ofilename]
subprocess.check_call(cmd)
ofilename = '%s.html' % ofilename
except subprocess.CalledProcessError:
ofilename = dataset.file_name
log.exception('Command "%s" failed. Could not convert the Jupyter Notebook to HTML, defaulting to plain text.', ' '.join(map(shlex_quote, cmd)))
return open(ofilename)
def set_meta(self, dataset, **kwd):
"""
Set the number of models in dataset.
"""
pass
class Biom1(Json):
"""
BIOM version 1.0 file format description
http://biom-format.org/documentation/format_versions/biom-1.0.html
"""
file_ext = "biom1"
edam_format = "format_3746"
MetadataElement(name="table_rows", default=[], desc="table_rows", param=MetadataParameter, readonly=True, visible=False, optional=True, no_value=[])
MetadataElement(name="table_matrix_element_type", default="", desc="table_matrix_element_type", param=MetadataParameter, readonly=True, visible=False, optional=True, no_value="")
MetadataElement(name="table_format", default="", desc="table_format", param=MetadataParameter, readonly=True, visible=False, optional=True, no_value="")
MetadataElement(name="table_generated_by", default="", desc="table_generated_by", param=MetadataParameter, readonly=True, visible=True, optional=True, no_value="")
MetadataElement(name="table_matrix_type", default="", desc="table_matrix_type", param=MetadataParameter, readonly=True, visible=False, optional=True, no_value="")
MetadataElement(name="table_shape", default=[], desc="table_shape", param=MetadataParameter, readonly=True, visible=False, optional=True, no_value=[])
MetadataElement(name="table_format_url", default="", desc="table_format_url", param=MetadataParameter, readonly=True, visible=False, optional=True, no_value="")
MetadataElement(name="table_date", default="", desc="table_date", param=MetadataParameter, readonly=True, visible=True, optional=True, no_value="")
MetadataElement(name="table_type", default="", desc="table_type", param=MetadataParameter, readonly=True, visible=True, optional=True, no_value="")
MetadataElement(name="table_id", default=None, desc="table_id", param=MetadataParameter, readonly=True, visible=True, optional=True, no_value=None)
MetadataElement(name="table_columns", default=[], desc="table_columns", param=MetadataParameter, readonly=True, visible=False, optional=True, no_value=[])
def set_peek(self, dataset, is_multi_byte=False):
super(Biom1, self).set_peek(dataset)
if not dataset.dataset.purged:
dataset.blurb = "Biological Observation Matrix v1"
def sniff(self, filename):
is_biom = False
if self._looks_like_json(filename):
is_biom = self._looks_like_biom(filename)
return is_biom
def _looks_like_biom(self, filepath, load_size=50000):
"""
@param filepath: [str] The path to the evaluated file.
@param load_size: [int] The size of the file block load in RAM (in
bytes).
"""
is_biom = False
segment_size = int(load_size / 2)
try:
with open(filepath, "r") as fh:
prev_str = ""
segment_str = fh.read(segment_size)
if segment_str.strip().startswith('{'):
while segment_str:
current_str = prev_str + segment_str
if '"format"' in current_str:
current_str = re.sub(r'\s', '', current_str)
if '"format":"BiologicalObservationMatrix' in current_str:
is_biom = True
break
prev_str = segment_str
segment_str = fh.read(segment_size)
except Exception:
pass
return is_biom
def set_meta(self, dataset, **kwd):
"""
Store metadata information from the BIOM file.
"""
if dataset.has_data():
with open(dataset.file_name) as fh:
try:
json_dict = json.load(fh)
except Exception:
return
def _transform_dict_list_ids(dict_list):
if dict_list:
return [x.get('id', None) for x in dict_list]
return []
b_transform = {'rows': _transform_dict_list_ids, 'columns': _transform_dict_list_ids}
for (m_name, b_name) in [('table_rows', 'rows'),
('table_matrix_element_type', 'matrix_element_type'),
('table_format', 'format'),
('table_generated_by', 'generated_by'),
('table_matrix_type', 'matrix_type'),
('table_shape', 'shape'),
('table_format_url', 'format_url'),
('table_date', 'date'),
('table_type', 'type'),
('table_id', 'id'),
('table_columns', 'columns')]:
try:
metadata_value = json_dict.get(b_name, None)
if b_name in b_transform:
metadata_value = b_transform[b_name](metadata_value)
setattr(dataset.metadata, m_name, metadata_value)
except Exception:
pass
class Obo(Text):
"""
OBO file format description
https://owlcollab.github.io/oboformat/doc/GO.format.obo-1_2.html
"""
edam_data = "data_0582"
edam_format = "format_2549"
file_ext = "obo"
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = get_file_peek(dataset.file_name)
dataset.blurb = "Open Biomedical Ontology (OBO)"
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disc'
def sniff(self, filename):
"""
Try to guess the Obo filetype.
It usually starts with a "format-version:" string and has several stanzas which starts with "id:".
"""
stanza = re.compile(r'^\[.*\]$')
with open(filename) as handle:
first_line = handle.readline()
if not first_line.startswith('format-version:'):
return False
for line in handle:
if stanza.match(line.strip()):
# a stanza needs to begin with an ID tag
if handle.next().startswith('id:'):
return True
return False
class Arff(Text):
"""
An ARFF (Attribute-Relation File Format) file is an ASCII text file that describes a list of instances sharing a set of attributes.
http://weka.wikispaces.com/ARFF
"""
edam_format = "format_3581"
file_ext = "arff"
"""Add metadata elements"""
MetadataElement(name="comment_lines", default=0, desc="Number of comment lines", readonly=True, optional=True, no_value=0)
MetadataElement(name="columns", default=0, desc="Number of columns", readonly=True, visible=True, no_value=0)
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = get_file_peek(dataset.file_name)
dataset.blurb = "Attribute-Relation File Format (ARFF)"
dataset.blurb += ", %s comments, %s attributes" % (dataset.metadata.comment_lines, dataset.metadata.columns)
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disc'
def sniff(self, filename):
"""
Try to guess the Arff filetype.
It usually starts with a "format-version:" string and has several stanzas which starts with "id:".
"""
with open(filename) as handle:
relation_found = False
attribute_found = False
for line_count, line in enumerate(handle):
if line_count > 1000:
# only investigate the first 1000 lines
return False
line = line.strip()
if not line:
continue
start_string = line[:20].upper()
if start_string.startswith("@RELATION"):
relation_found = True
elif start_string.startswith("@ATTRIBUTE"):
attribute_found = True
elif start_string.startswith("@DATA"):
# @DATA should be the last data block
if relation_found and attribute_found:
return True
return False
def set_meta(self, dataset, **kwd):
"""
Trying to count the comment lines and the number of columns included.
A typical ARFF data block looks like this:
@DATA
5.1,3.5,1.4,0.2,Iris-setosa
4.9,3.0,1.4,0.2,Iris-setosa
"""
comment_lines = column_count = 0
if dataset.has_data():
first_real_line = False
data_block = False
with open(dataset.file_name) as handle:
for line in handle:
line = line.strip()
if not line:
continue
if line.startswith('%') and not first_real_line:
comment_lines += 1
else:
first_real_line = True
if data_block:
if line.startswith('{'):
# Sparse representation
"""
@data
0, X, 0, Y, "class A", {5}
or
@data
{1 X, 3 Y, 4 "class A"}, {5}
"""
token = line.split('}', 1)
first_part = token[0]
last_column = first_part.split(',')[-1].strip()
numeric_value = last_column.split()[0]
column_count = int(numeric_value)
if len(token) > 1:
# we have an additional weight
column_count -= 1
else:
columns = line.strip().split(',')
column_count = len(columns)
if columns[-1].strip().startswith('{'):
# we have an additional weight at the end
column_count -= 1
# We have now the column_count and we know the initial comment lines. So we can terminate here.
break
if line[:5].upper() == "@DATA":
data_block = True
dataset.metadata.comment_lines = comment_lines
dataset.metadata.columns = column_count
class SnpEffDb(Text):
"""Class describing a SnpEff genome build"""
edam_format = "format_3624"
file_ext = "snpeffdb"
MetadataElement(name="genome_version", default=None, desc="Genome Version", readonly=True, visible=True, no_value=None)
MetadataElement(name="snpeff_version", default="SnpEff4.0", desc="SnpEff Version", readonly=True, visible=True, no_value=None)
MetadataElement(name="regulation", default=[], desc="Regulation Names", readonly=True, visible=True, no_value=[], optional=True)
MetadataElement(name="annotation", default=[], desc="Annotation Names", readonly=True, visible=True, no_value=[], optional=True)
def __init__(self, **kwd):
Text.__init__(self, **kwd)
# The SnpEff version line was added in SnpEff version 4.1
def getSnpeffVersionFromFile(self, path):
snpeff_version = None
try:
with gzip.open(path, 'rb') as fh:
buf = fh.read(100)
lines = buf.splitlines()
m = re.match('^(SnpEff)\s+(\d+\.\d+).*$', lines[0].strip())
if m:
snpeff_version = m.groups()[0] + m.groups()[1]
except Exception:
pass
return snpeff_version
def set_meta(self, dataset, **kwd):
Text.set_meta(self, dataset, **kwd)
data_dir = dataset.extra_files_path
# search data_dir/genome_version for files
regulation_pattern = 'regulation_(.+).bin'
# annotation files that are included in snpEff by a flag
annotations_dict = {'nextProt.bin': '-nextprot', 'motif.bin': '-motif', 'interactions.bin': '-interaction'}
regulations = []
annotations = []
genome_version = None
snpeff_version = None
if data_dir and os.path.isdir(data_dir):
for root, dirs, files in os.walk(data_dir):
for fname in files:
if fname.startswith('snpEffectPredictor'):
# if snpEffectPredictor.bin download succeeded
genome_version = os.path.basename(root)
dataset.metadata.genome_version = genome_version
# read the first line of the gzipped snpEffectPredictor.bin file to get the SnpEff version
snpeff_version = self.getSnpeffVersionFromFile(os.path.join(root, fname))
if snpeff_version:
dataset.metadata.snpeff_version = snpeff_version
else:
m = re.match(regulation_pattern, fname)
if m:
name = m.groups()[0]
regulations.append(name)
elif fname in annotations_dict:
value = annotations_dict[fname]
name = value.lstrip('-')
annotations.append(name)
dataset.metadata.regulation = regulations
dataset.metadata.annotation = annotations
try:
with open(dataset.file_name, 'w') as fh:
fh.write("%s\n" % genome_version if genome_version else 'Genome unknown')
fh.write("%s\n" % snpeff_version if snpeff_version else 'SnpEff version unknown')
if annotations:
fh.write("annotations: %s\n" % ','.join(annotations))
if regulations:
fh.write("regulations: %s\n" % ','.join(regulations))
except Exception:
pass
class SnpSiftDbNSFP(Text):
"""Class describing a dbNSFP database prepared fpr use by SnpSift dbnsfp """
MetadataElement(name='reference_name', default='dbSNFP', desc='Reference Name', readonly=True, visible=True, set_in_upload=True, no_value='dbSNFP')
MetadataElement(name="bgzip", default=None, desc="dbNSFP bgzip", readonly=True, visible=True, no_value=None)
MetadataElement(name="index", default=None, desc="Tabix Index File", readonly=True, visible=True, no_value=None)
MetadataElement(name="annotation", default=[], desc="Annotation Names", readonly=True, visible=True, no_value=[])
file_ext = "snpsiftdbnsfp"
composite_type = 'auto_primary_file'
allow_datatype_change = False
"""
## The dbNSFP file is a tabular file with 1 header line
## The first 4 columns are required to be: chrom pos ref alt
## These match columns 1,2,4,5 of the VCF file
## SnpSift requires the file to be block-gzipped and the indexed with samtools tabix
## Example:
## Compress using block-gzip algorithm
bgzip dbNSFP2.3.txt
## Create tabix index
tabix -s 1 -b 2 -e 2 dbNSFP2.3.txt.gz
"""
def __init__(self, **kwd):
Text.__init__(self, **kwd)
self.add_composite_file('%s.gz', description='dbNSFP bgzip', substitute_name_with_metadata='reference_name', is_binary=True)
self.add_composite_file('%s.gz.tbi', description='Tabix Index File', substitute_name_with_metadata='reference_name', is_binary=True)
def init_meta(self, dataset, copy_from=None):
Text.init_meta(self, dataset, copy_from=copy_from)
def generate_primary_file(self, dataset=None):
"""
This is called only at upload to write the html file
cannot rename the datasets here - they come with the default unfortunately
"""
return '<html><head><title>SnpSiftDbNSFP Composite Dataset</title></head></html>'
def regenerate_primary_file(self, dataset):
"""
cannot do this until we are setting metadata
"""
annotations = "dbNSFP Annotations: %s\n" % ','.join(dataset.metadata.annotation)
with open(dataset.file_name, 'a') as f:
if dataset.metadata.bgzip:
bn = dataset.metadata.bgzip
f.write(bn)
f.write('\n')
f.write(annotations)
def set_meta(self, dataset, overwrite=True, **kwd):
try:
efp = dataset.extra_files_path
if os.path.exists(efp):
flist = os.listdir(efp)
for i, fname in enumerate(flist):
if fname.endswith('.gz'):
dataset.metadata.bgzip = fname
try:
with gzip.open(os.path.join(efp, fname), 'r') as fh:
buf = fh.read(5000)
lines = buf.splitlines()
headers = lines[0].split('\t')
dataset.metadata.annotation = headers[4:]
except Exception as e:
log.warning("set_meta fname: %s %s" % (fname, str(e)))
if fname.endswith('.tbi'):
dataset.metadata.index = fname
self.regenerate_primary_file(dataset)
except Exception as e:
log.warning("set_meta fname: %s %s" % (dataset.file_name if dataset and dataset.file_name else 'Unkwown', str(e)))
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = '%s : %s' % (dataset.metadata.reference_name, ','.join(dataset.metadata.annotation))
dataset.blurb = '%s' % dataset.metadata.reference_name
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disc'
class IQTree(Text):
"""IQ-TREE format"""
file_ext = 'iqtree'
def sniff(self, filename):
"""
Detect the IQTree file
Scattered text file containing various headers and data
types.
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('example.iqtree')
>>> IQTree().sniff(fname)
True
>>> fname = get_test_fname('temp.txt')
>>> IQTree().sniff(fname)
False
>>> fname = get_test_fname('test_tab1.tabular')
>>> IQTree().sniff(fname)
False
"""
with open(filename, 'r') as fio:
return fio.read(7) == "IQ-TREE"
return False
| 43.311847
| 182
| 0.560315
|
64dbb6604ce1a311b1b8d1d483df704678a0a6fe
| 695
|
py
|
Python
|
django_pages/site/__init__.py
|
lunemec/django-pages
|
caed40f9275919b81417924550e7bcfdc7c5ffbf
|
[
"BSD-3-Clause"
] | 3
|
2015-11-24T02:30:48.000Z
|
2018-11-01T10:10:24.000Z
|
django_pages/site/__init__.py
|
lunemec/django-pages
|
caed40f9275919b81417924550e7bcfdc7c5ffbf
|
[
"BSD-3-Clause"
] | 1
|
2015-04-18T16:37:36.000Z
|
2015-04-18T16:37:36.000Z
|
django_pages/site/__init__.py
|
lunemec/django-pages
|
caed40f9275919b81417924550e7bcfdc7c5ffbf
|
[
"BSD-3-Clause"
] | 2
|
2015-11-24T02:01:00.000Z
|
2019-04-09T15:33:56.000Z
|
# -*- encoding: utf-8 -*-
from ..common.errors import ConfigurationError
from .models import Site, Script
def get_site():
"""
checks for site with pk=1
@return Site object
@raises ConfigurationError
"""
try:
site = Site.objects.get(pk=1)
return site
except Site.DoesNotExist:
raise ConfigurationError('There are no Site\'s, please create one in admin')
except Site.MultipleObjectsReturned:
raise ConfigurationError('There is more than one site, please make sure there is exactly one, this feature may be changed in future')
def get_scripts():
"""
Returns all scripts from DB
"""
return Script.objects.all()
| 23.166667
| 141
| 0.667626
|
dd61b51e35060ad9721104c25ccb8cb5c9280e12
| 803
|
py
|
Python
|
pony_barn/settings/base_settings.py
|
ericholscher/pony_barn
|
8294a816c1db81582b135e71d670d18295718f4e
|
[
"MIT"
] | 3
|
2015-05-19T02:50:05.000Z
|
2015-11-08T11:35:27.000Z
|
pony_barn/settings/base_settings.py
|
ericholscher/pony_barn
|
8294a816c1db81582b135e71d670d18295718f4e
|
[
"MIT"
] | null | null | null |
pony_barn/settings/base_settings.py
|
ericholscher/pony_barn
|
8294a816c1db81582b135e71d670d18295718f4e
|
[
"MIT"
] | null | null | null |
import os
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = ( )
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
{% if db_engine %}
DATABASE_ENGINE = '{{ db_engine }}'
{% endif %}
{% if db_name %}
DATABASE_NAME = '{{ db_name }}'
{% endif %}
{% if db_pass %}
DATABASE_PASSWORD = '{{ db_pass }}'
{% endif %}
{% if db_user %}
DATABASE_USER = '{{ db_user }}'
{% endif %}
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
)
ROOT_URLCONF = 'urls'
{% if more_installed %}
INSTALLED_APPS += (
{% for app in more_installed %}
'{{ app }}',
{% endfor %}
)
{% endif %}
| 18.25
| 52
| 0.631382
|
74cc99dea2d3271134e39e2621b8183df99be6f9
| 1,820
|
py
|
Python
|
azure-graphrbac/azure/graphrbac/models/password_credential.py
|
HydAu/AzureSDKForPython
|
5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3
|
[
"Apache-2.0"
] | null | null | null |
azure-graphrbac/azure/graphrbac/models/password_credential.py
|
HydAu/AzureSDKForPython
|
5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3
|
[
"Apache-2.0"
] | null | null | null |
azure-graphrbac/azure/graphrbac/models/password_credential.py
|
HydAu/AzureSDKForPython
|
5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PasswordCredential(Model):
"""
Active Directory service principal Password Credential information
:param start_date: Gets or sets start date
:type start_date: datetime
:param end_date: Gets or sets end date
:type end_date: datetime
:param key_id: Gets or sets key Id
:type key_id: str
:param value: Gets or sets value
:type value: str
"""
_attribute_map = {
'start_date': {'key': 'startDate', 'type': 'iso-8601'},
'end_date': {'key': 'endDate', 'type': 'iso-8601'},
'key_id': {'key': 'keyId', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(self, start_date=None, end_date=None, key_id=None, value=None):
self.start_date = start_date
self.end_date = end_date
self.key_id = key_id
self.value = value
| 35.686275
| 80
| 0.629121
|
d7b59866b224a4818f90369f22b4c9758bf1f069
| 9,669
|
py
|
Python
|
lib/model/rpn/tracking_proposal_target_layer.py
|
Feynman27/pytorch-detect-rfcn
|
33cc264306be841ffaf997c4c458ca9ac2329378
|
[
"MIT"
] | 32
|
2018-10-10T10:17:06.000Z
|
2021-08-18T11:07:51.000Z
|
lib/model/rpn/tracking_proposal_target_layer.py
|
Feynman27/pytorch-detect-rfcn
|
33cc264306be841ffaf997c4c458ca9ac2329378
|
[
"MIT"
] | 9
|
2018-10-12T01:54:51.000Z
|
2020-11-22T07:36:19.000Z
|
lib/model/rpn/tracking_proposal_target_layer.py
|
Feynman27/pytorch-detect-rfcn
|
33cc264306be841ffaf997c4c458ca9ac2329378
|
[
"MIT"
] | 4
|
2019-03-08T15:55:08.000Z
|
2019-08-28T10:36:15.000Z
|
from __future__ import absolute_import
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
# --------------------------------------------------------
# Reorganized and modified by Jianwei Yang and Jiasen Lu
# --------------------------------------------------------
import torch
import torch.nn as nn
import numpy as np
import numpy.random as npr
from ..utils.config import cfg
from .bbox_transform import bbox_overlaps_batch, bbox_transform_batch
import pdb
class _TrackingProposalTargetLayer(nn.Module):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
"""
def __init__(self, nclasses):
super(_TrackingProposalTargetLayer, self).__init__()
self._num_classes = nclasses
self.BBOX_NORMALIZE_MEANS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS)
self.BBOX_NORMALIZE_STDS = torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS)
self.BBOX_INSIDE_WEIGHTS = torch.FloatTensor(cfg.TRAIN.BBOX_INSIDE_WEIGHTS)
def forward(self, gt_boxes, num_boxes):
self.BBOX_NORMALIZE_MEANS = self.BBOX_NORMALIZE_MEANS.type_as(gt_boxes)
self.BBOX_NORMALIZE_STDS = self.BBOX_NORMALIZE_STDS.type_as(gt_boxes)
self.BBOX_INSIDE_WEIGHTS = self.BBOX_INSIDE_WEIGHTS.type_as(gt_boxes)
# Use ground-truth boxes with frame correspondence as the set of candidate rois
gt_rois = gt_boxes.new(gt_boxes.size()).zero_()
gt_rois[:,:,:,1:5] = gt_boxes[:,:,:,:4]
gt_trk_ids = gt_boxes[:,:,:,5]
num_images = 1
rois_per_image = int(cfg.TRAIN.BATCH_SIZE / num_images)
fg_rois_per_image = int(np.round(cfg.TRAIN.FG_FRACTION * rois_per_image))
fg_rois_per_image = 1 if fg_rois_per_image == 0 else fg_rois_per_image
labels, rois, bbox_targets, bbox_inside_weights = self._sample_gt_rois_pytorch(
gt_boxes, fg_rois_per_image, rois_per_image, self._num_classes, num_boxes)
bbox_outside_weights = (bbox_inside_weights > 0).float()
return rois, labels, bbox_targets, bbox_inside_weights, bbox_outside_weights
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def _get_tracking_regression_labels_pytorch(self, bbox_target_data, labels_batch, num_classes):
"""Tracking Bounding-box regression targets (bbox_target_data) are stored in a
compact form b x N x (class, tx, ty, tw, th)
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets).
Returns:
bbox_target (ndarray): b x N x 4K blob of tracking regression targets
bbox_inside_weights (ndarray): b x N x 4K blob of loss weights
"""
batch_size = labels_batch.size(0)
rois_per_image = labels_batch.size(1)
clss = labels_batch
bbox_targets = bbox_target_data.new(batch_size, rois_per_image, 4).zero_()
bbox_inside_weights = bbox_target_data.new(bbox_targets.size()).zero_()
for b in range(batch_size):
# assert clss[b].sum() > 0
if clss[b].sum() == 0:
continue
inds = torch.nonzero(clss[b] > 0).view(-1)
for i in range(inds.numel()):
ind = inds[i]
bbox_targets[b, ind, :] = bbox_target_data[b, ind, :]
bbox_inside_weights[b, ind, :] = self.BBOX_INSIDE_WEIGHTS
return bbox_targets, bbox_inside_weights
def _compute_targets_pytorch(self, gt_rois_t0, gt_rois_t1):
"""Compute tracking regression targets for an image."""
assert gt_rois_t0.size(1) == gt_rois_t1.size(1)
assert gt_rois_t0.size(2) == 4
assert gt_rois_t1.size(2) == 4
batch_size = gt_rois_t0.size(0)
rois_per_image = gt_rois_t0.size(1)
targets = bbox_transform_batch(gt_rois_t0, gt_rois_t1)
# TODO Check if we need this step
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
targets = ((targets - self.BBOX_NORMALIZE_MEANS.expand_as(targets))
/ self.BBOX_NORMALIZE_STDS.expand_as(targets))
return targets
def _get_track_correspondence(self, gt_boxes, num_boxes):
"""Check whether gt track in frame t has correspondence in frame t+tau
"""
n_twins = gt_boxes.size(0)
batch_size = gt_boxes.size(1)
correspondence_matrices = []
for i_leg in range(n_twins-1):
for j_leg in range(i_leg+1, n_twins):
for i_batch in range(batch_size):
i_num_boxes = num_boxes[i_leg][i_batch][0]
j_num_boxes = num_boxes[j_leg][i_batch][0]
if j_num_boxes==0 or i_num_boxes==0:
padded_corr_matrix = torch.zeros(
gt_boxes[i_leg][i_batch].size(0),gt_boxes[i_leg][i_batch].size(0)).type_as(gt_boxes)
padded_corr_matrix = padded_corr_matrix.long()
else:
twin2_trk_id_set = gt_boxes[j_leg][i_batch][:j_num_boxes, 5]
twin1_trk_id_set = gt_boxes[i_leg][i_batch][:i_num_boxes, 5]
# Create N_t+tau by N_t correspondence matrix
X = twin2_trk_id_set.expand(twin1_trk_id_set.size(0), twin2_trk_id_set.size(0)).t()
# transpose to N_t by N_t+tau matrix
corr_matrix = (twin1_trk_id_set==X).t()
# fatten up with zeros again
padding = gt_boxes.size(2)-i_num_boxes
padded_corr_matrix = torch.zeros(
gt_boxes[i_leg][i_batch].size(0),
gt_boxes[i_leg][i_batch].size(0)).type_as(corr_matrix)
padded_corr_matrix[:i_num_boxes, :j_num_boxes] = corr_matrix
padded_corr_matrix = padded_corr_matrix.long()
correspondence_matrices.append(padded_corr_matrix.unsqueeze(0))
batch_corr_matrices = torch.cat(correspondence_matrices, dim=0)
return batch_corr_matrices
def _sample_gt_rois_pytorch(self, gt_boxes, fg_rois_per_image, rois_per_image, num_classes, num_boxes):
"""Generate sample of RoIs comprised of ground-truth rois.
"""
n_twins = gt_boxes.size(0)
batch_size = gt_boxes.size(1)
num_boxes_per_img = gt_boxes.size(2)
# N_t by N_t+tau correspondence matrix
trk_correspondences = self._get_track_correspondence(gt_boxes, num_boxes)
batch_gt_rois_t0 = gt_boxes.new(batch_size, num_boxes_per_img, 6).zero_()
batch_gt_rois_t1 = batch_gt_rois_t0.clone()
labels = gt_boxes[:,:,:,4]
tracking_labels_batch = labels.new(batch_size, num_boxes_per_img).zero_()
tracking_rois_batch = gt_boxes.new(batch_size, num_boxes_per_img, 5).zero_()
#batch_gt_rois_t0=[]
#batch_gt_rois_t1=[]
for i_bch in range(batch_size):
row_inds = torch.nonzero(trk_correspondences[i_bch].sum(dim=1)).long().view(-1)
col_inds = torch.nonzero(trk_correspondences[i_bch].sum(dim=0)).long().view(-1)
gt_boxes_t0 = gt_boxes[0][i_bch]
gt_boxes_t1 = gt_boxes[1][i_bch]
if row_inds.numel()>0 and col_inds.numel()>0:
# gt rois with correspondence across frames
# TODO handle case where row_inds and/or col_inds are empty
# Probably easiest just to filter from roidb
gt_rois_t0 = torch.index_select(gt_boxes_t0, 0, row_inds)
gt_rois_t1 = torch.index_select(gt_boxes_t1, 0, col_inds)
# align tracks across time frames
_, sorted_gt_inds = torch.sort(gt_rois_t0[:, 5], descending=False)
gt_rois_t0 = gt_rois_t0[sorted_gt_inds]
_, sorted_gt_inds = torch.sort(gt_rois_t1[:, 5], descending=False)
gt_rois_t1 = gt_rois_t1[sorted_gt_inds]
assert gt_rois_t0.size(0)==gt_rois_t1.size(0), "gt rois dim are not equal."
temp_num_rois_t0 = gt_rois_t0.size(0)
temp_num_rois_t1 = gt_rois_t1.size(0)
batch_gt_rois_t0[i_bch][:temp_num_rois_t0] = gt_rois_t0
batch_gt_rois_t1[i_bch][:temp_num_rois_t1] = gt_rois_t1
tracking_labels_batch[i_bch] = batch_gt_rois_t0[i_bch][:, 4] # uncomment this line!
tracking_rois_batch[i_bch][:,0] = i_bch
tracking_rois_batch[i_bch][:,1:] = gt_boxes[0][i_bch][:,:4]
tracking_target_data = self._compute_targets_pytorch(batch_gt_rois_t0[:,:,:4],
batch_gt_rois_t1[:,:,:4])
tracking_targets, tracking_inside_weights = \
self._get_tracking_regression_labels_pytorch(tracking_target_data,
tracking_labels_batch, num_classes)
# set tracking rois to gt rois in frame t0
#tracking_rois_batch = gt_boxes[0][:,:,:5]
return tracking_labels_batch, tracking_rois_batch, tracking_targets, tracking_inside_weights
| 49.331633
| 112
| 0.626952
|
7052e92e02cf6fe3cc4f48c66958e958f28dd0b7
| 2,338
|
py
|
Python
|
eclcli/bare/v2/port.py
|
shimisho/eclcli
|
a1f55ec9c6a0849c8b2100ddb8938a3bee141100
|
[
"Apache-2.0"
] | null | null | null |
eclcli/bare/v2/port.py
|
shimisho/eclcli
|
a1f55ec9c6a0849c8b2100ddb8938a3bee141100
|
[
"Apache-2.0"
] | null | null | null |
eclcli/bare/v2/port.py
|
shimisho/eclcli
|
a1f55ec9c6a0849c8b2100ddb8938a3bee141100
|
[
"Apache-2.0"
] | null | null | null |
from eclcli.common import command
from eclcli.bare import bare_utils
from eclcli.common import utils
class ListPort(command.Lister):
"""List all nic physical ports for a server"""
def get_parser(self, prog_name):
parser = super(ListPort, self).get_parser(prog_name)
parser.add_argument(
"server",
metavar="<server>",
help="Name or ID of server",
)
return parser
def take_action(self, parsed_args):
bare_client = self.app.client_manager.bare
identity_client = self.app.client_manager.identity
search_opts = {}
self.log.debug('search options: %s', search_opts)
columns = (
'ID',
'Mac Addr',
'Network Physical Port ID',
'Plane',
'Hardware ID',
)
server_obj = utils.find_resource(bare_client.servers,parsed_args.server)
data = bare_client.ports.list(server_obj.id)
return (columns, (utils.get_item_properties(s,columns) for s in data))
class ShowPort(command.ShowOne):
"""Show nic physical port's detail for a server"""
def get_parser(self, prog_name):
parser = super(ShowPort, self).get_parser(prog_name)
parser.add_argument(
"server",
metavar="<server>",
help="Name or ID of server",
)
parser.add_argument(
"port",
metavar="<port>",
help="Port ID",
)
return parser
def take_action(self, parsed_args):
bare_client = self.app.client_manager.bare
identity_client = self.app.client_manager.identity
server_obj = utils.find_resource(bare_client.servers,parsed_args.server)
data = bare_client.ports.get(server_obj.id,parsed_args.port)
columns = (
'ID',
'Mac Addr',
'Network Physical Port ID',
'Plane',
'Attached Ports',
'Hardware ID',
)
return columns, (utils.get_item_properties(
data,
columns,
mixed_case_fields=[],
formatters={
'Attached Ports':bare_utils._format_dicts_list_generic
}
))
| 30.763158
| 82
| 0.555603
|
8255933e2a61a376235c2de08e90c66a11149a02
| 3,670
|
py
|
Python
|
huaweicloud-sdk-apig/huaweicloudsdkapig/v2/model/delete_environment_v2_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-apig/huaweicloudsdkapig/v2/model/delete_environment_v2_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-apig/huaweicloudsdkapig/v2/model/delete_environment_v2_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DeleteEnvironmentV2Request:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'env_id': 'str'
}
attribute_map = {
'instance_id': 'instance_id',
'env_id': 'env_id'
}
def __init__(self, instance_id=None, env_id=None):
"""DeleteEnvironmentV2Request - a model defined in huaweicloud sdk"""
self._instance_id = None
self._env_id = None
self.discriminator = None
self.instance_id = instance_id
self.env_id = env_id
@property
def instance_id(self):
"""Gets the instance_id of this DeleteEnvironmentV2Request.
实例编号
:return: The instance_id of this DeleteEnvironmentV2Request.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this DeleteEnvironmentV2Request.
实例编号
:param instance_id: The instance_id of this DeleteEnvironmentV2Request.
:type: str
"""
self._instance_id = instance_id
@property
def env_id(self):
"""Gets the env_id of this DeleteEnvironmentV2Request.
环境的ID
:return: The env_id of this DeleteEnvironmentV2Request.
:rtype: str
"""
return self._env_id
@env_id.setter
def env_id(self, env_id):
"""Sets the env_id of this DeleteEnvironmentV2Request.
环境的ID
:param env_id: The env_id of this DeleteEnvironmentV2Request.
:type: str
"""
self._env_id = env_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteEnvironmentV2Request):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.214286
| 79
| 0.563488
|
8e168f00b63a1e58442e6d98826a4f1f40e206dc
| 1,883
|
py
|
Python
|
functions_legacy/BackwardSelection.py
|
dpopadic/arpmRes
|
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
|
[
"MIT"
] | 6
|
2021-04-10T13:24:30.000Z
|
2022-03-26T08:20:42.000Z
|
functions_legacy/BackwardSelection.py
|
dpopadic/arpmRes
|
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
|
[
"MIT"
] | null | null | null |
functions_legacy/BackwardSelection.py
|
dpopadic/arpmRes
|
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
|
[
"MIT"
] | 6
|
2019-08-13T22:02:17.000Z
|
2022-02-09T17:49:12.000Z
|
import matplotlib.pyplot as plt
import numpy as np
from numpy import max as npmax
from numpy import zeros, where, mod, ceil
from scipy.misc import comb
plt.style.use('seaborn')
def BackwardSelection(v,data,objective,step=1,k_min=1):
# Backward stepwise routine for n_-choose-k selection problems
# INPUTS
# v :[vector](1 x n_) vector of indices
# data :[struct] struct with data needed by the objective function
# objective :[handle] handle of the objective function
# step :[scalar] number of indices to be added at each iteration
# k_min :[scalar] minimum number of indices to be chosen (optional default = 1)
# OPS
# O_ :[vector](k_ x 1) objective value for each set v_{k}
# v_ :[cell](k_ x 1) each row contains a set of indices in ascending order of length
# v_num :[vector](k_ x 1) cardinality of each set of indices v_[k]
# For details on the exercise, see here .
## Code
n_ = len(v)
# step 0
k_ = int(ceil(n_/step))
k = k_-1
k_stop=int(ceil(k_min/step)-mod(k_min,step))-1
O_ = zeros((k_,1))
O_[k] = objective(data,v)
v_ = {}
v_[k] = v
v_num = zeros((k_,1))
v_num[k] = n_
while k > k_stop:
n = comb(v,step)
if n.size==0:
n = v
h_ = n.shape[0]
O_k = zeros((1,h_))
for h in range(h_):
# step 1
v_h = np.setdiff1d(v,n[h])
# step 2
O_k[0,h] = objective(data,v_h)
# step 3
O_star, h_star = npmax(O_k[0]), np.argmax(O_k[0])
O_[k-1] = O_star
# step 4
v = np.setdiff1d(v,n[h_star])
v_[k-1] = v
v_num[k-1] = len(v)
# step 5
k = k-1
O_ = where(v_num==0,0,O_)
v_ = where(v_num==0,0,v_)
v_num = where(v_num==0,0,v_num)
return O_, v_, v_num
| 28.104478
| 96
| 0.560276
|
833f6930f7f410f1483d4f3df7a2da55725149d7
| 4,942
|
py
|
Python
|
services/web/server/src/simcore_service_webserver/studies_dispatcher/_users.py
|
colinRawlings/osparc-simcore
|
bf2f18d5bc1e574d5f4c238d08ad15156184c310
|
[
"MIT"
] | 25
|
2018-04-13T12:44:12.000Z
|
2022-03-12T15:01:17.000Z
|
services/web/server/src/simcore_service_webserver/studies_dispatcher/_users.py
|
colinRawlings/osparc-simcore
|
bf2f18d5bc1e574d5f4c238d08ad15156184c310
|
[
"MIT"
] | 2,553
|
2018-01-18T17:11:55.000Z
|
2022-03-31T16:26:40.000Z
|
services/web/server/src/simcore_service_webserver/studies_dispatcher/_users.py
|
odeimaiz/osparc-simcore
|
71c2fc58dcfe067487dcd75cb70298a4d6237e97
|
[
"MIT"
] | 20
|
2018-01-18T19:45:33.000Z
|
2022-03-29T07:08:47.000Z
|
""" Users management
Keeps functionality that couples with the following app modules
- users,
- login
- security
- resource_manager
"""
import logging
from typing import Dict, Optional
from aiohttp import web
from aioredlock import Aioredlock
from pydantic import BaseModel
from ..login.cfg import get_storage
from ..login.handlers import ACTIVE, GUEST
from ..login.utils import get_client_ip, get_random_string
from ..resource_manager.config import GUEST_USER_RC_LOCK_FORMAT
from ..resource_manager.redis import get_redis_lock_manager
from ..security_api import authorized_userid, encrypt_password, is_anonymous, remember
from ..users_api import get_user
from ..users_exceptions import UserNotFoundError
log = logging.getLogger(__name__)
class UserInfo(BaseModel):
id: int
name: str
email: str
primary_gid: int
needs_login: bool = False
is_guest: bool = True
async def _get_authorized_user(request: web.Request) -> Optional[Dict]:
# Returns valid user if it is identified (cookie) and logged in (valid cookie)?
user_id = await authorized_userid(request)
if user_id is not None:
try:
user = await get_user(request.app, user_id)
return user
except UserNotFoundError:
return None
return None
async def _create_temporary_user(request: web.Request):
db = get_storage(request.app)
lock_manager: Aioredlock = get_redis_lock_manager(request.app)
# TODO: avatar is an icon of the hero!
random_user_name = get_random_string(min_len=5)
email = random_user_name + "@guest-at-osparc.io"
password = get_random_string(min_len=12)
# GUEST_USER_RC_LOCK:
#
# These locks prevents the GC from deleting a GUEST user in to stages of its lifefime:
#
# 1. During construction:
# - Prevents GC from deleting this GUEST user while it is being created
# - Since the user still does not have an ID assigned, the lock is named with his random_user_name
#
MAX_DELAY_TO_CREATE_USER = 3 # secs
#
# 2. During initialization
# - Prevents the GC from deleting this GUEST user, with ID assigned, while it gets initialized and acquires it's first resource
# - Uses the ID assigned to name the lock
#
MAX_DELAY_TO_GUEST_FIRST_CONNECTION = 15 # secs
#
#
# NOTES:
# - In case of failure or excessive delay the lock has a timeout that automatically unlocks it
# and the GC can clean up what remains
# - Notice that the ids to name the locks are unique, therefore the lock can be acquired w/o errors
# - These locks are very specific to resources and have timeout so the risk of blocking from GC is small
#
# (1) read details above
async with await lock_manager.lock(
GUEST_USER_RC_LOCK_FORMAT.format(user_id=random_user_name),
lock_timeout=MAX_DELAY_TO_CREATE_USER,
):
# NOTE: usr Dict is incomplete, e.g. does not contain primary_gid
usr = await db.create_user(
{
"name": random_user_name,
"email": email,
"password_hash": encrypt_password(password),
"status": ACTIVE,
"role": GUEST,
"created_ip": get_client_ip(request),
}
)
user: Dict = await get_user(request.app, usr["id"])
# (2) read details above
await lock_manager.lock(
GUEST_USER_RC_LOCK_FORMAT.format(user_id=user["id"]),
lock_timeout=MAX_DELAY_TO_GUEST_FIRST_CONNECTION,
)
return user
async def acquire_user(request: web.Request, *, is_guest_allowed: bool) -> UserInfo:
"""
Identifies request's user and if anonymous, it creates
a temporary guest user that is authorized.
"""
user = None
# anonymous = no identity in request
is_anonymous_user = await is_anonymous(request)
if not is_anonymous_user:
# NOTE: covers valid cookie with unauthorized user (e.g. expired guest/banned)
user = await _get_authorized_user(request)
if not user and is_guest_allowed:
log.debug("Creating temporary GUEST user ...")
user = await _create_temporary_user(request)
is_anonymous_user = True
if not is_guest_allowed and (not user or user.get("role") == GUEST):
raise web.HTTPUnauthorized(reason="Only available for registered users")
return UserInfo(
id=user["id"],
name=user["name"],
email=user["email"],
primary_gid=user["primary_gid"],
needs_login=is_anonymous_user,
is_guest=user.get("role") == GUEST,
)
async def ensure_authentication(
user: UserInfo, request: web.Request, response: web.Response
):
if user.needs_login:
log.debug("Auto login for anonymous user %s", user.name)
identity = user.email
await remember(request, response, identity)
| 33.167785
| 135
| 0.677256
|
336980b47a33312c08a6f9a32e6002d76f3f15e7
| 800
|
py
|
Python
|
tests/test_GitHubUtils.py
|
Prodigysov/pyutil
|
d8faa7196ce3f9f156b49d088c97d93d25c2ec20
|
[
"Apache-2.0"
] | null | null | null |
tests/test_GitHubUtils.py
|
Prodigysov/pyutil
|
d8faa7196ce3f9f156b49d088c97d93d25c2ec20
|
[
"Apache-2.0"
] | null | null | null |
tests/test_GitHubUtils.py
|
Prodigysov/pyutil
|
d8faa7196ce3f9f156b49d088c97d93d25c2ec20
|
[
"Apache-2.0"
] | null | null | null |
import os
import unittest
from github.GithubException import GithubException
from pyutil import GitHubUtils
from .TestSupport import TestSupport
class test_GitHubUtils(unittest.TestCase):
def test_search_repos_with_username(self):
test_user = "google"
test_repos_1 = GitHubUtils.search_repos("user:{}".format(test_user), language="Java")
self.assertTrue(len(test_repos_1) > 0)
test_repos_2 = GitHubUtils.search_repos("user:{} language:Java".format(test_user))
self.assertTrue(len(test_repos_2) > 0)
# Query separator "+" will not work
test_repos_3 = GitHubUtils.search_repos("user:{}+language:Java".format(test_user), max_retry_times=0)
self.assertTrue(len(test_repos_3) == 0)
if __name__ == '__main__':
unittest.main()
| 30.769231
| 109
| 0.72
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.