hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7ed12725c01586b7ea78bc6761c9cf27421819fb
| 1,818
|
py
|
Python
|
apistar/commands/new.py
|
sirex/apistar
|
0ab0ac394983230bfacad2035d9436e88988cfc4
|
[
"BSD-3-Clause"
] | 1
|
2017-12-27T09:05:23.000Z
|
2017-12-27T09:05:23.000Z
|
apistarlearn/commands/new.py
|
1067511899/tornado-learn
|
497cc8f7816f15e2eab834a758f192d50704fe05
|
[
"Apache-2.0"
] | null | null | null |
apistarlearn/commands/new.py
|
1067511899/tornado-learn
|
497cc8f7816f15e2eab834a758f192d50704fe05
|
[
"Apache-2.0"
] | null | null | null |
import os
import shutil
import apistar
from apistar import exceptions
from apistar.interfaces import Console
APISTAR_PACKAGE_DIR = os.path.dirname(apistar.__file__)
LAYOUTS_DIR = os.path.join(APISTAR_PACKAGE_DIR, 'layouts')
LAYOUT_CHOICES = os.listdir(LAYOUTS_DIR)
IGNORED_DIRECTORIES = ['__pycache__']
def new(console: Console,
target_dir: str,
framework: str='wsgi',
force: bool=False) -> None:
"""
Create a new project in TARGET_DIR.
Args:
console: The console to write output about file creation.
target_dir: The directory to use when creating the project.
layout: Select the project layout to use.
force: Overwrite any existing project files.
"""
if framework not in ('wsgi', 'asyncio'):
message = "Invalid framework option. Use 'wsgi' or 'asyncio'."
raise exceptions.CommandLineError(message)
source_dir = os.path.join(LAYOUTS_DIR, framework)
copy_paths = []
for dir_path, dirs, filenames in os.walk(source_dir):
dirs[:] = [d for d in dirs if d not in IGNORED_DIRECTORIES]
for filename in filenames:
source_path = os.path.join(dir_path, filename)
rel_path = os.path.relpath(source_path, source_dir)
target_path = os.path.join(target_dir, rel_path)
if os.path.exists(target_path) and not force:
message = 'Project files already exist. Use `--force` to overwrite.'
raise exceptions.CommandLineError(message)
copy_paths.append((source_path, target_path))
for source_path, target_path in sorted(copy_paths):
console.echo(target_path)
parent = os.path.dirname(target_path)
if parent:
os.makedirs(parent, exist_ok=True)
shutil.copy(source_path, target_path)
| 35.647059
| 84
| 0.674917
|
5d0a70259f1a6faf9564710ae7a1fa73c238836f
| 2,286
|
gyp
|
Python
|
3rdParty/V8/gypfiles/parser-shell.gyp
|
rajeev02101987/arangodb
|
817e6c04cb82777d266f3b444494140676da98e2
|
[
"Apache-2.0"
] | 12,278
|
2015-01-29T17:11:33.000Z
|
2022-03-31T21:12:00.000Z
|
3rdParty/V8/gypfiles/parser-shell.gyp
|
rajeev02101987/arangodb
|
817e6c04cb82777d266f3b444494140676da98e2
|
[
"Apache-2.0"
] | 9,469
|
2015-01-30T05:33:07.000Z
|
2022-03-31T16:17:21.000Z
|
3rdParty/V8/gypfiles/parser-shell.gyp
|
rajeev02101987/arangodb
|
817e6c04cb82777d266f3b444494140676da98e2
|
[
"Apache-2.0"
] | 892
|
2015-01-29T16:26:19.000Z
|
2022-03-20T07:44:30.000Z
|
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{
'variables': {
# 'V8_ROOT': '../',
'v8_code': 1,
'v8_enable_i18n_support%': 1,
},
'includes': ['toolchain.gypi', 'features.gypi'],
'targets': [
{
'target_name': 'parser-shell',
'type': 'executable',
'dependencies': [
'v8.gyp:v8',
'v8.gyp:v8_libbase',
'v8.gyp:v8_libplatform',
],
'conditions': [
['v8_enable_i18n_support==1', {
'dependencies': [
'<(icu_gyp_path):icui18n',
'<(icu_gyp_path):icuuc',
],
}],
],
'include_dirs+': [
'..',
],
'sources': [
'../tools/parser-shell.cc',
'../tools/shell-utils.h',
],
},
],
}
| 36.870968
| 72
| 0.663167
|
2c45a95d7d738221f0615ccd5e62e7e2cdd4c599
| 3,910
|
py
|
Python
|
modelserver/modelserver/guidance/guidance.py
|
terrykwon/captivate
|
c7e4714a0eaced62509153849ee89981a58e8e26
|
[
"MIT"
] | 3
|
2022-01-21T05:44:40.000Z
|
2022-02-26T16:15:23.000Z
|
modelserver/modelserver/guidance/guidance.py
|
terrykwon/captivate
|
c7e4714a0eaced62509153849ee89981a58e8e26
|
[
"MIT"
] | null | null | null |
modelserver/modelserver/guidance/guidance.py
|
terrykwon/captivate
|
c7e4714a0eaced62509153849ee89981a58e8e26
|
[
"MIT"
] | 1
|
2022-03-14T14:40:56.000Z
|
2022-03-14T14:40:56.000Z
|
import pandas as pd
import numpy as np
from collections import defaultdict
import time
toy_num = 12
class Toy:
def __init__(self, toy_name):
self.toy_name = toy_name
self.weight = 1/toy_num
self.phrases = []
def add_phrase(self, phrase):
self.phrases.append(phrase)
def update_weight(self, target_dist, target_length, alpha):
self.weight = self.weight * (1-alpha) + target_dist[self.toy_name] * alpha / target_length
def is_phrase_spoken(self, word, display_phrases):
for phrase in self.phrases:
if (phrase.phrase in display_phrases) and (phrase.word == word):
phrase.increase_spoken_count()
return True
return False
def track_displayed_time(self, curr_time):
need_ordered = False
for phrase in self.phrases:
if (phrase.is_displayed == True) and (phrase.track_displayed_time(curr_time)):
need_ordered = True
return need_ordered
def set_display(self, displayed_phrases):
for phrase in self.phrases:
if phrase.phrase in displayed_phrases:
if phrase.is_displayed == True:
pass
elif phrase.is_displayed == False:
phrase.is_displayed = True
phrase.on_displayed()
else:
print("is_displayed error")
else:
phrase.is_displayed = False
class Phrase:
def __init__(self, word, phrase, id, highlight, color):
self.word = word
self.phrase = phrase
self.highlight = highlight
self.id = id
self.weight = 1
self.color= color
self.start_displayed = 0
self.spoken_count = 0
self.is_displayed = False
def increase_spoken_count(self):
self.spoken_count += 1
self.weight -= 0.5
if self.spoken_count > 1:
self.is_displayed = False
self.spoken_count = 0
def on_displayed(self):
self.start_displayed = int(round(time.time() * 1000))
def track_displayed_time(self, curr_time):
if curr_time - self.start_displayed > 120000:
self.weight -= 1
return True
return False
def print_all(self):
print("-----------------------------")
print("phrase : "+self.phrase)
print("weight : "+str(self.weight))
print("start_displayed : "+ str(self.start_displayed))
print("spoken_count : "+str(self.spoken_count))
class Guidance :
def __init__(self, file_path):
self.toys = []
self.toy_list = []
self.read_guide_csv(file_path)
def read_guide_csv(self, file_path):
guidance_arr = np.array(pd.read_csv(file_path))
for guide in guidance_arr:
toy_name = guide[0]
if toy_name not in self.toy_list:
self.toy_list.append(toy_name)
new_toy = Toy(toy_name)
self.toys.append(new_toy)
word = guide[1]
sentence = guide[2]
highlight = guide[3]
id = guide[4]
color = guide[5]
new_phrase = Phrase(word, sentence, id, highlight, color)
for toy in self.toys:
if toy.toy_name == toy_name:
toy.add_phrase(new_phrase)
def get_toys(self):
return self.toys
# def get_object_names(self):
# return list(self.guide_dict.keys())
# def get_object_context(self):
# object_list = self.get_object_names()
# object_context = {obj : 1/len(object_list) for obj in object_list}
# return object_context
# def get_candidates(self):
# return self.guide_dict
| 25.555556
| 98
| 0.561637
|
99446bbe83270a710cc30f7bfdb8bd5390f9e97d
| 13,793
|
py
|
Python
|
msticpy/common/pkg_config.py
|
2xyo/msticpy
|
17f6a25ea82d85632e0c52a60e20626e9621d3b0
|
[
"MIT"
] | 1
|
2021-07-29T16:04:08.000Z
|
2021-07-29T16:04:08.000Z
|
msticpy/common/pkg_config.py
|
QPC-database/msticpy
|
54c6d74e0bb25528dd0347edb40c693dd7b1eac7
|
[
"MIT"
] | 3
|
2021-05-15T02:16:39.000Z
|
2022-01-19T13:13:25.000Z
|
msticpy/common/pkg_config.py
|
QPC-database/msticpy
|
54c6d74e0bb25528dd0347edb40c693dd7b1eac7
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Package configuration reader.
Reads default configuration from package file `msticpyconfig.yaml`.
Optionally reads custom configuration from file specified in environment
variable `MSTICPYCONFIG`. If this is not defined the package will look for
a file `msticpyconfig.yaml` in the current directory.
Default settings are accessible as an attribute `default_settings`.
Custom settings are accessible as an attribute `custom_settings`.
Consolidated settings are accessible as an attribute `settings`.
"""
import os
from pathlib import Path
from typing import Any, Dict, Optional, Callable
import pkg_resources
import yaml
from yaml.error import YAMLError
from . import exceptions
from .exceptions import MsticpyUserConfigError
from .utility import is_valid_uuid
from .._version import VERSION
__version__ = VERSION
__author__ = "Ian Hellen"
_CONFIG_FILE: str = "msticpyconfig.yaml"
_CONFIG_ENV_VAR: str = "MSTICPYCONFIG"
_DP_KEY = "DataProviders"
_AZ_SENTINEL = "AzureSentinel"
_AZ_CLI = "AzureCLI"
# pylint: disable=invalid-name
default_settings: Dict[str, Any] = {}
custom_settings: Dict[str, Any] = {}
settings: Dict[str, Any] = {}
def _get_current_config() -> Callable[[Any], Optional[str]]:
"""Closure for holding path of config file."""
_current_conf_file: Optional[str] = None
def _current_config(file_path: Optional[str] = None) -> Optional[str]:
nonlocal _current_conf_file # noqa
if file_path is not None:
_current_conf_file = file_path
return _current_conf_file
return _current_config
_CURRENT_CONF_FILE = _get_current_config()
def current_config_path() -> Optional[str]:
"""
Return the path of the current config file, if any.
Returns
-------
Optional[str]
path of the current config file
"""
return _CURRENT_CONF_FILE(None)
def refresh_config():
"""Re-read the config settings."""
# pylint: disable=global-statement
global default_settings, custom_settings, settings
default_settings = _get_default_config()
custom_settings = _get_custom_config()
custom_settings = _create_data_providers(custom_settings)
settings = _consolidate_configs(default_settings, custom_settings)
def get_config(setting_path: str) -> Any:
"""
Return setting item for path.
Parameters
----------
setting_path : str
Path to setting item expressed as dot-separated
string
Returns
-------
Any
The item at the path location.
"""
path_elems = setting_path.split(".")
cur_node = settings
for elem in path_elems:
cur_node = cur_node.get(elem, None)
if cur_node is None:
raise KeyError(f"{elem} value of {setting_path} is not a valid path")
return cur_node
def set_config(setting_path: str, value: Any):
"""
Set setting value for path.
Parameters
----------
setting_path : str
Path to setting item expressed as dot-separated
string
value : Any
The value to set.
"""
path_elems = setting_path.split(".")
cur_node = settings
for elem in path_elems:
if elem in cur_node:
cur_node[elem] = value
break
cur_node = cur_node.get(elem, None)
if cur_node is None:
raise KeyError(f"{elem} value of {setting_path} is not a valid path")
return cur_node
def _read_config_file(config_file: str) -> Dict[str, Any]:
"""
Read a yaml config definition file.
Parameters
----------
config_file : str
Path to yaml config file
Returns
-------
Dict
Configuration settings
"""
if Path(config_file).is_file():
with open(config_file) as f_handle:
# use safe_load instead of load
try:
return yaml.safe_load(f_handle)
except YAMLError as yml_err:
raise MsticpyUserConfigError(
f"Check that your {config_file} is valid YAML.",
"The following error was encountered",
str(yml_err),
title="config file could not be read",
) from yml_err
return {}
def _consolidate_configs(
def_config: Dict[str, Any], cust_config: Dict[str, Any]
) -> Dict[str, Any]:
resultant_config = {}
resultant_config.update(def_config)
_override_config(resultant_config, cust_config)
return resultant_config
def _override_config(base_config: Dict[str, Any], new_config: Dict[str, Any]):
for c_key, c_item in new_config.items():
if c_item is None:
continue
if isinstance(base_config.get(c_key), dict):
_override_config(base_config[c_key], new_config[c_key])
else:
base_config[c_key] = new_config[c_key]
def _get_default_config():
"""Return the package default config file."""
conf_file = None
package = "msticpy"
try:
conf_file = pkg_resources.resource_filename(package, _CONFIG_FILE)
except ModuleNotFoundError as mod_err:
# if all else fails we try to find the package default config somewhere
# in the package tree - we use the first one we find
pkg_root = _get_pkg_path("msticpy")
if not pkg_root:
raise MsticpyUserConfigError(
f"Unable to locate the package default {_CONFIG_FILE}",
"msticpy package may be corrupted.",
title=f"Package {_CONFIG_FILE} missing.",
) from mod_err
conf_file = next(iter(pkg_root.glob("**/" + _CONFIG_FILE)))
if conf_file:
return _read_config_file(conf_file)
return {}
def _get_custom_config():
config_path = os.environ.get(_CONFIG_ENV_VAR, None)
if config_path and Path(config_path).is_file():
_CURRENT_CONF_FILE(str(Path(config_path).resolve()))
return _read_config_file(config_path)
if Path(_CONFIG_FILE).is_file():
_CURRENT_CONF_FILE(str(Path(".").joinpath(_CONFIG_FILE).resolve()))
return _read_config_file(_CONFIG_FILE)
return {}
def _get_pkg_path(pkg_name):
current_path = Path(__file__)
while current_path.name != pkg_name:
if current_path == current_path.parent:
return None
current_path = current_path.parent
return current_path
def _create_data_providers(mp_config: Dict[str, Any]) -> Dict[str, Any]:
if mp_config.get(_DP_KEY) is None:
mp_config[_DP_KEY] = {}
data_providers = mp_config[_DP_KEY]
az_sent_config = mp_config.get(_AZ_SENTINEL)
if az_sent_config and az_sent_config.get("Workspaces"):
for section, prov_settings in mp_config[_AZ_SENTINEL]["Workspaces"].items():
sec_name = f"{_AZ_SENTINEL}_{section}"
if sec_name in data_providers:
continue
data_providers[sec_name] = {"Args": prov_settings}
az_cli_config = mp_config.get(_AZ_CLI)
if az_cli_config and _AZ_CLI not in data_providers:
data_providers[_AZ_CLI] = mp_config[_AZ_CLI]
return mp_config
# read initial config when first imported.
refresh_config()
def validate_config(mp_config: Dict[str, Any] = None, config_file: str = None):
"""
Validate msticpy config settings.
Parameters
----------
mp_config : Dict[str, Any], optional
The settings dictionary, by default it will
check the currently loaded settings.
config_file : str
path to config file to check, by default None
"""
if config_file:
mp_config = _read_config_file(config_file)
if not (mp_config or config_file):
mp_config = settings
if not isinstance(mp_config, dict):
raise TypeError("Unknown format for configuration settings.")
mp_errors, mp_warn = _validate_azure_sentinel(mp_config=mp_config)
auth_key_providers = [
"OTX",
"VirusTotal",
"XForce",
"OpenPageRank",
"GeoIPLite",
"IPStack",
]
for conf_section in ["TIProviders", "OtherProviders", _DP_KEY]:
prov_errors, prov_warn = _check_provider_settings(
mp_config=mp_config.get(conf_section, {}),
section=conf_section,
key_provs=auth_key_providers,
)
if conf_section == _DP_KEY and mp_config.get(conf_section) is None:
continue
mp_errors.extend(prov_errors)
mp_warn.extend(prov_warn)
_print_validation_report(mp_errors, mp_warn)
if mp_errors or mp_warn:
return mp_errors, mp_warn
return [], []
def _print_validation_report(mp_errors, mp_warn):
if mp_errors:
title = "\nThe following configuration errors were found:"
print(title, "\n", "-" * len(title))
for err in mp_errors:
print(err)
else:
print("No errors found.")
if mp_warn:
title = "\nThe following configuration warnings were found:"
print(title, "\n", "-" * len(title))
for err in mp_warn:
print(err)
else:
print("No warnings found.")
def _validate_azure_sentinel(mp_config):
mp_errors = []
mp_warnings = []
as_settings = mp_config.get(_AZ_SENTINEL, {})
if not as_settings:
mp_errors.append("Missing or empty 'AzureSentinel' section")
return mp_errors, mp_warnings
ws_settings = as_settings.get("Workspaces", {})
if not ws_settings:
mp_errors.append("Missing or empty 'Workspaces' key in 'AzureSentinel' section")
return mp_errors, mp_warnings
no_default = True
for ws, ws_settings in ws_settings.items():
if ws == "Default":
no_default = False
ws_id = ws_settings.get("WorkspaceId")
if not (ws_id and is_valid_uuid(ws_id)):
mp_errors.append(f"Invalid GUID for WorkspaceId in {ws} section")
ten_id = ws_settings.get("TenantId")
if not (ten_id and is_valid_uuid(ten_id)):
mp_errors.append(f"Invalid GUID for TenantId in {ws} section")
mp_warnings = ["No default workspace set"] if no_default else []
return mp_errors, mp_warnings
def _check_provider_settings(mp_config, section, key_provs):
mp_errors = []
mp_warnings = []
if not mp_config:
mp_warnings.append(f"'{section}' section has no settings.")
return mp_errors, mp_warnings
for p_name, p_setting in mp_config.items():
if not p_setting:
mp_warnings.append(f"'{section}/{p_name}' sub-section has no settings.")
continue
if "Args" not in p_setting:
continue
sec_args = p_setting.get("Args")
if not sec_args:
mp_errors.append(f"'{section}/{p_name}/{sec_args}' key has no settings.")
continue
sec_path = f"{section}/{p_name}" if section else f"{p_name}"
mp_errors.extend(
_check_required_provider_settings(sec_args, sec_path, p_name, key_provs)
)
mp_errors.extend(
_check_env_vars(args_key=p_setting.get("Args"), section=sec_path)
)
return mp_errors, mp_warnings
def _check_required_provider_settings(sec_args, sec_path, p_name, key_provs):
errs = []
if key_provs and p_name in key_provs:
errs.append(_check_required_key(sec_args, "AuthKey", sec_path))
if p_name == "XForce":
errs.append(_check_required_key(sec_args, "ApiID", sec_path))
if p_name == _AZ_SENTINEL:
errs.append(_check_is_uuid(sec_args, "WorkspaceID", sec_path))
errs.append(_check_is_uuid(sec_args, "TenantID", sec_path))
if p_name.startswith("AzureSentinel_"):
errs.append(_check_is_uuid(sec_args, "WorkspaceId", sec_path))
errs.append(_check_is_uuid(sec_args, "TenantId", sec_path))
if (
p_name == _AZ_CLI
and "clientId" in sec_args
and sec_args["clientId"] is not None
):
# only warn if partially filled - since these are optional
errs.append(_check_required_key(sec_args, "clientId", sec_path))
errs.append(_check_required_key(sec_args, "tenantId", sec_path))
errs.append(_check_required_key(sec_args, "clientSecret", sec_path))
return [err for err in errs if err]
def _check_required_key(conf_section, key, sec_path):
if key not in conf_section or not conf_section.get(key):
return f"{sec_path}: Missing or invalid {key}."
return None
def _check_is_uuid(conf_section, key, sec_path):
if (
key not in conf_section
or not conf_section[key]
or not is_valid_uuid(conf_section[key])
):
return f"{sec_path}: Missing or invalid {key}."
return None
def _check_env_vars(args_key, section):
mp_errs = []
if not args_key:
return mp_errs
for val in args_key.values():
if not val:
continue
if "EnvironmentVar" in val:
env_name = val.get("EnvironmentVar")
if not env_name:
mp_errs.append(f"{section}: No environment variable name specified.")
elif env_name not in os.environ:
mp_errs.append(f"{section}: Env variable {env_name} not set.")
elif not os.environ[env_name]:
mp_errs.append(f"{section}: Env variable {env_name} value is not set.")
return mp_errs
# Set get_config function in exceptions module
# so that it can be called without having a circular import
# pylint: disable=protected-access
exceptions._get_config = get_config
| 32.00232
| 88
| 0.650837
|
2d61b74408c225599964e7b26da8dfe8eb04671d
| 29,566
|
py
|
Python
|
testing/test_assertrewrite.py
|
meawoppl/pytest
|
3676da594c76243cbb2271a59213d3b2ded2f729
|
[
"MIT"
] | null | null | null |
testing/test_assertrewrite.py
|
meawoppl/pytest
|
3676da594c76243cbb2271a59213d3b2ded2f729
|
[
"MIT"
] | null | null | null |
testing/test_assertrewrite.py
|
meawoppl/pytest
|
3676da594c76243cbb2271a59213d3b2ded2f729
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import glob
import os
import py_compile
import stat
import sys
import zipfile
import py
import pytest
import _pytest._code
from _pytest.assertion import util
from _pytest.assertion.rewrite import rewrite_asserts, PYTEST_TAG, AssertionRewritingHook
from _pytest.main import EXIT_NOTESTSCOLLECTED
ast = pytest.importorskip("ast")
if sys.platform.startswith("java"):
# XXX should be xfail
pytest.skip("assert rewrite does currently not work on jython")
def setup_module(mod):
mod._old_reprcompare = util._reprcompare
_pytest._code._reprcompare = None
def teardown_module(mod):
util._reprcompare = mod._old_reprcompare
del mod._old_reprcompare
def rewrite(src):
tree = ast.parse(src)
rewrite_asserts(tree)
return tree
def getmsg(f, extra_ns=None, must_pass=False):
"""Rewrite the assertions in f, run it, and get the failure message."""
src = '\n'.join(_pytest._code.Code(f).source().lines)
mod = rewrite(src)
code = compile(mod, "<test>", "exec")
ns = {}
if extra_ns is not None:
ns.update(extra_ns)
py.builtin.exec_(code, ns)
func = ns[f.__name__]
try:
func()
except AssertionError:
if must_pass:
pytest.fail("shouldn't have raised")
s = str(sys.exc_info()[1])
if not s.startswith("assert"):
return "AssertionError: " + s
return s
else:
if not must_pass:
pytest.fail("function didn't raise at all")
class TestAssertionRewrite(object):
def test_place_initial_imports(self):
s = """'Doc string'\nother = stuff"""
m = rewrite(s)
assert isinstance(m.body[0], ast.Expr)
assert isinstance(m.body[0].value, ast.Str)
for imp in m.body[1:3]:
assert isinstance(imp, ast.Import)
assert imp.lineno == 2
assert imp.col_offset == 0
assert isinstance(m.body[3], ast.Assign)
s = """from __future__ import with_statement\nother_stuff"""
m = rewrite(s)
assert isinstance(m.body[0], ast.ImportFrom)
for imp in m.body[1:3]:
assert isinstance(imp, ast.Import)
assert imp.lineno == 2
assert imp.col_offset == 0
assert isinstance(m.body[3], ast.Expr)
s = """'doc string'\nfrom __future__ import with_statement\nother"""
m = rewrite(s)
assert isinstance(m.body[0], ast.Expr)
assert isinstance(m.body[0].value, ast.Str)
assert isinstance(m.body[1], ast.ImportFrom)
for imp in m.body[2:4]:
assert isinstance(imp, ast.Import)
assert imp.lineno == 3
assert imp.col_offset == 0
assert isinstance(m.body[4], ast.Expr)
s = """from . import relative\nother_stuff"""
m = rewrite(s)
for imp in m.body[0:2]:
assert isinstance(imp, ast.Import)
assert imp.lineno == 1
assert imp.col_offset == 0
assert isinstance(m.body[3], ast.Expr)
def test_dont_rewrite(self):
s = """'PYTEST_DONT_REWRITE'\nassert 14"""
m = rewrite(s)
assert len(m.body) == 2
assert isinstance(m.body[0].value, ast.Str)
assert isinstance(m.body[1], ast.Assert)
assert m.body[1].msg is None
def test_name(self):
def f():
assert False
assert getmsg(f) == "assert False"
def f():
f = False
assert f
assert getmsg(f) == "assert False"
def f():
assert a_global # noqa
assert getmsg(f, {"a_global": False}) == "assert False"
def f():
assert sys == 42
assert getmsg(f, {"sys": sys}) == "assert sys == 42"
def f():
assert cls == 42 # noqa
class X(object):
pass
assert getmsg(f, {"cls": X}) == "assert cls == 42"
def test_assert_already_has_message(self):
def f():
assert False, "something bad!"
assert getmsg(f) == "AssertionError: something bad!\nassert False"
def test_assertion_message(self, testdir):
testdir.makepyfile("""
def test_foo():
assert 1 == 2, "The failure message"
""")
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines([
"*AssertionError*The failure message*",
"*assert 1 == 2*",
])
def test_assertion_message_multiline(self, testdir):
testdir.makepyfile("""
def test_foo():
assert 1 == 2, "A multiline\\nfailure message"
""")
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines([
"*AssertionError*A multiline*",
"*failure message*",
"*assert 1 == 2*",
])
def test_assertion_message_tuple(self, testdir):
testdir.makepyfile("""
def test_foo():
assert 1 == 2, (1, 2)
""")
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines([
"*AssertionError*%s*" % repr((1, 2)),
"*assert 1 == 2*",
])
def test_assertion_message_expr(self, testdir):
testdir.makepyfile("""
def test_foo():
assert 1 == 2, 1 + 2
""")
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines([
"*AssertionError*3*",
"*assert 1 == 2*",
])
def test_assertion_message_escape(self, testdir):
testdir.makepyfile("""
def test_foo():
assert 1 == 2, 'To be escaped: %'
""")
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines([
"*AssertionError: To be escaped: %",
"*assert 1 == 2",
])
def test_boolop(self):
def f():
f = g = False
assert f and g
assert getmsg(f) == "assert (False)"
def f():
f = True
g = False
assert f and g
assert getmsg(f) == "assert (True and False)"
def f():
f = False
g = True
assert f and g
assert getmsg(f) == "assert (False)"
def f():
f = g = False
assert f or g
assert getmsg(f) == "assert (False or False)"
def f():
f = g = False
assert not f and not g
getmsg(f, must_pass=True)
def x():
return False
def f():
assert x() and x()
assert getmsg(f, {"x": x}) == """assert (False)
+ where False = x()"""
def f():
assert False or x()
assert getmsg(f, {"x": x}) == """assert (False or False)
+ where False = x()"""
def f():
assert 1 in {} and 2 in {}
assert getmsg(f) == "assert (1 in {})"
def f():
x = 1
y = 2
assert x in {1: None} and y in {}
assert getmsg(f) == "assert (1 in {1: None} and 2 in {})"
def f():
f = True
g = False
assert f or g
getmsg(f, must_pass=True)
def f():
f = g = h = lambda: True
assert f() and g() and h()
getmsg(f, must_pass=True)
def test_short_circuit_evaluation(self):
def f():
assert True or explode # noqa
getmsg(f, must_pass=True)
def f():
x = 1
assert x == 1 or x == 2
getmsg(f, must_pass=True)
def test_unary_op(self):
def f():
x = True
assert not x
assert getmsg(f) == "assert not True"
def f():
x = 0
assert ~x + 1
assert getmsg(f) == "assert (~0 + 1)"
def f():
x = 3
assert -x + x
assert getmsg(f) == "assert (-3 + 3)"
def f():
x = 0
assert +x + x
assert getmsg(f) == "assert (+0 + 0)"
def test_binary_op(self):
def f():
x = 1
y = -1
assert x + y
assert getmsg(f) == "assert (1 + -1)"
def f():
assert not 5 % 4
assert getmsg(f) == "assert not (5 % 4)"
def test_boolop_percent(self):
def f():
assert 3 % 2 and False
assert getmsg(f) == "assert ((3 % 2) and False)"
def f():
assert False or 4 % 2
assert getmsg(f) == "assert (False or (4 % 2))"
@pytest.mark.skipif("sys.version_info < (3,5)")
def test_at_operator_issue1290(self, testdir):
testdir.makepyfile("""
class Matrix(object):
def __init__(self, num):
self.num = num
def __matmul__(self, other):
return self.num * other.num
def test_multmat_operator():
assert Matrix(2) @ Matrix(3) == 6""")
testdir.runpytest().assert_outcomes(passed=1)
def test_call(self):
def g(a=42, *args, **kwargs):
return False
ns = {"g": g}
def f():
assert g()
assert getmsg(f, ns) == """assert False
+ where False = g()"""
def f():
assert g(1)
assert getmsg(f, ns) == """assert False
+ where False = g(1)"""
def f():
assert g(1, 2)
assert getmsg(f, ns) == """assert False
+ where False = g(1, 2)"""
def f():
assert g(1, g=42)
assert getmsg(f, ns) == """assert False
+ where False = g(1, g=42)"""
def f():
assert g(1, 3, g=23)
assert getmsg(f, ns) == """assert False
+ where False = g(1, 3, g=23)"""
def f():
seq = [1, 2, 3]
assert g(*seq)
assert getmsg(f, ns) == """assert False
+ where False = g(*[1, 2, 3])"""
def f():
x = "a"
assert g(**{x: 2})
assert getmsg(f, ns) == """assert False
+ where False = g(**{'a': 2})"""
def test_attribute(self):
class X(object):
g = 3
ns = {"x": X}
def f():
assert not x.g # noqa
assert getmsg(f, ns) == """assert not 3
+ where 3 = x.g"""
def f():
x.a = False # noqa
assert x.a # noqa
assert getmsg(f, ns) == """assert False
+ where False = x.a"""
def test_comparisons(self):
def f():
a, b = range(2)
assert b < a
assert getmsg(f) == """assert 1 < 0"""
def f():
a, b, c = range(3)
assert a > b > c
assert getmsg(f) == """assert 0 > 1"""
def f():
a, b, c = range(3)
assert a < b > c
assert getmsg(f) == """assert 1 > 2"""
def f():
a, b, c = range(3)
assert a < b <= c
getmsg(f, must_pass=True)
def f():
a, b, c = range(3)
assert a < b
assert b < c
getmsg(f, must_pass=True)
def test_len(self):
def f():
l = list(range(10))
assert len(l) == 11
assert getmsg(f).startswith("""assert 10 == 11
+ where 10 = len([""")
def test_custom_reprcompare(self, monkeypatch):
def my_reprcompare(op, left, right):
return "42"
monkeypatch.setattr(util, "_reprcompare", my_reprcompare)
def f():
assert 42 < 3
assert getmsg(f) == "assert 42"
def my_reprcompare(op, left, right):
return "%s %s %s" % (left, op, right)
monkeypatch.setattr(util, "_reprcompare", my_reprcompare)
def f():
assert 1 < 3 < 5 <= 4 < 7
assert getmsg(f) == "assert 5 <= 4"
def test_assert_raising_nonzero_in_comparison(self):
def f():
class A(object):
def __nonzero__(self):
raise ValueError(42)
def __lt__(self, other):
return A()
def __repr__(self):
return "<MY42 object>"
def myany(x):
return False
assert myany(A() < 0)
assert "<MY42 object> < 0" in getmsg(f)
def test_formatchar(self):
def f():
assert "%test" == "test"
assert getmsg(f).startswith("assert '%test' == 'test'")
def test_custom_repr(self):
def f():
class Foo(object):
a = 1
def __repr__(self):
return "\n{ \n~ \n}"
f = Foo()
assert 0 == f.a
assert r"where 1 = \n{ \n~ \n}.a" in util._format_lines([getmsg(f)])[0]
class TestRewriteOnImport(object):
def test_pycache_is_a_file(self, testdir):
testdir.tmpdir.join("__pycache__").write("Hello")
testdir.makepyfile("""
def test_rewritten():
assert "@py_builtins" in globals()""")
assert testdir.runpytest().ret == 0
def test_pycache_is_readonly(self, testdir):
cache = testdir.tmpdir.mkdir("__pycache__")
old_mode = cache.stat().mode
cache.chmod(old_mode ^ stat.S_IWRITE)
testdir.makepyfile("""
def test_rewritten():
assert "@py_builtins" in globals()""")
try:
assert testdir.runpytest().ret == 0
finally:
cache.chmod(old_mode)
def test_zipfile(self, testdir):
z = testdir.tmpdir.join("myzip.zip")
z_fn = str(z)
f = zipfile.ZipFile(z_fn, "w")
try:
f.writestr("test_gum/__init__.py", "")
f.writestr("test_gum/test_lizard.py", "")
finally:
f.close()
z.chmod(256)
testdir.makepyfile("""
import sys
sys.path.append(%r)
import test_gum.test_lizard""" % (z_fn,))
assert testdir.runpytest().ret == EXIT_NOTESTSCOLLECTED
def test_readonly(self, testdir):
sub = testdir.mkdir("testing")
sub.join("test_readonly.py").write(
py.builtin._totext("""
def test_rewritten():
assert "@py_builtins" in globals()
""").encode("utf-8"), "wb")
old_mode = sub.stat().mode
sub.chmod(320)
try:
assert testdir.runpytest().ret == 0
finally:
sub.chmod(old_mode)
def test_dont_write_bytecode(self, testdir, monkeypatch):
testdir.makepyfile("""
import os
def test_no_bytecode():
assert "__pycache__" in __cached__
assert not os.path.exists(__cached__)
assert not os.path.exists(os.path.dirname(__cached__))""")
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", "1")
assert testdir.runpytest_subprocess().ret == 0
def test_orphaned_pyc_file(self, testdir):
if sys.version_info < (3, 0) and hasattr(sys, 'pypy_version_info'):
pytest.skip("pypy2 doesn't run orphaned pyc files")
testdir.makepyfile("""
import orphan
def test_it():
assert orphan.value == 17
""")
testdir.makepyfile(orphan="""
value = 17
""")
py_compile.compile("orphan.py")
os.remove("orphan.py")
# Python 3 puts the .pyc files in a __pycache__ directory, and will
# not import from there without source. It will import a .pyc from
# the source location though.
if not os.path.exists("orphan.pyc"):
pycs = glob.glob("__pycache__/orphan.*.pyc")
assert len(pycs) == 1
os.rename(pycs[0], "orphan.pyc")
assert testdir.runpytest().ret == 0
@pytest.mark.skipif('"__pypy__" in sys.modules')
def test_pyc_vs_pyo(self, testdir, monkeypatch):
testdir.makepyfile("""
import pytest
def test_optimized():
"hello"
assert test_optimized.__doc__ is None"""
)
p = py.path.local.make_numbered_dir(prefix="runpytest-", keep=None,
rootdir=testdir.tmpdir)
tmp = "--basetemp=%s" % p
monkeypatch.setenv("PYTHONOPTIMIZE", "2")
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
assert testdir.runpytest_subprocess(tmp).ret == 0
tagged = "test_pyc_vs_pyo." + PYTEST_TAG
assert tagged + ".pyo" in os.listdir("__pycache__")
monkeypatch.undo()
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
assert testdir.runpytest_subprocess(tmp).ret == 1
assert tagged + ".pyc" in os.listdir("__pycache__")
def test_package(self, testdir):
pkg = testdir.tmpdir.join("pkg")
pkg.mkdir()
pkg.join("__init__.py").ensure()
pkg.join("test_blah.py").write("""
def test_rewritten():
assert "@py_builtins" in globals()""")
assert testdir.runpytest().ret == 0
def test_translate_newlines(self, testdir):
content = "def test_rewritten():\r\n assert '@py_builtins' in globals()"
b = content.encode("utf-8")
testdir.tmpdir.join("test_newlines.py").write(b, "wb")
assert testdir.runpytest().ret == 0
@pytest.mark.skipif(sys.version_info < (3, 3),
reason='packages without __init__.py not supported on python 2')
def test_package_without__init__py(self, testdir):
pkg = testdir.mkdir('a_package_without_init_py')
pkg.join('module.py').ensure()
testdir.makepyfile("import a_package_without_init_py.module")
assert testdir.runpytest().ret == EXIT_NOTESTSCOLLECTED
def test_rewrite_warning(self, pytestconfig, monkeypatch):
hook = AssertionRewritingHook(pytestconfig)
warnings = []
def mywarn(code, msg):
warnings.append((code, msg))
monkeypatch.setattr(hook.config, 'warn', mywarn)
hook.mark_rewrite('_pytest')
assert '_pytest' in warnings[0][1]
def test_rewrite_module_imported_from_conftest(self, testdir):
testdir.makeconftest('''
import test_rewrite_module_imported
''')
testdir.makepyfile(test_rewrite_module_imported='''
def test_rewritten():
assert "@py_builtins" in globals()
''')
assert testdir.runpytest_subprocess().ret == 0
def test_remember_rewritten_modules(self, pytestconfig, testdir, monkeypatch):
"""
AssertionRewriteHook should remember rewritten modules so it
doesn't give false positives (#2005).
"""
monkeypatch.syspath_prepend(testdir.tmpdir)
testdir.makepyfile(test_remember_rewritten_modules='')
warnings = []
hook = AssertionRewritingHook(pytestconfig)
monkeypatch.setattr(hook.config, 'warn', lambda code, msg: warnings.append(msg))
hook.find_module('test_remember_rewritten_modules')
hook.load_module('test_remember_rewritten_modules')
hook.mark_rewrite('test_remember_rewritten_modules')
hook.mark_rewrite('test_remember_rewritten_modules')
assert warnings == []
def test_rewrite_warning_using_pytest_plugins(self, testdir):
testdir.makepyfile(**{
'conftest.py': "pytest_plugins = ['core', 'gui', 'sci']",
'core.py': "",
'gui.py': "pytest_plugins = ['core', 'sci']",
'sci.py': "pytest_plugins = ['core']",
'test_rewrite_warning_pytest_plugins.py': "def test(): pass",
})
testdir.chdir()
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(['*= 1 passed in *=*'])
assert 'pytest-warning summary' not in result.stdout.str()
def test_rewrite_warning_using_pytest_plugins_env_var(self, testdir, monkeypatch):
monkeypatch.setenv('PYTEST_PLUGINS', 'plugin')
testdir.makepyfile(**{
'plugin.py': "",
'test_rewrite_warning_using_pytest_plugins_env_var.py': """
import plugin
pytest_plugins = ['plugin']
def test():
pass
""",
})
testdir.chdir()
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(['*= 1 passed in *=*'])
assert 'pytest-warning summary' not in result.stdout.str()
@pytest.mark.skipif(sys.version_info[0] > 2, reason='python 2 only')
def test_rewrite_future_imports(self, testdir):
"""Test that rewritten modules don't inherit the __future__ flags
from the assertrewrite module.
assertion.rewrite imports __future__.division (and others), so
ensure rewritten modules don't inherit those flags.
The test below will fail if __future__.division is enabled
"""
testdir.makepyfile('''
def test():
x = 1 / 2
assert type(x) is int
''')
result = testdir.runpytest()
assert result.ret == 0
class TestAssertionRewriteHookDetails(object):
def test_loader_is_package_false_for_module(self, testdir):
testdir.makepyfile(test_fun="""
def test_loader():
assert not __loader__.is_package(__name__)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"* 1 passed*",
])
def test_loader_is_package_true_for_package(self, testdir):
testdir.makepyfile(test_fun="""
def test_loader():
assert not __loader__.is_package(__name__)
def test_fun():
assert __loader__.is_package('fun')
def test_missing():
assert not __loader__.is_package('pytest_not_there')
""")
testdir.mkpydir('fun')
result = testdir.runpytest()
result.stdout.fnmatch_lines([
'* 3 passed*',
])
@pytest.mark.skipif("sys.version_info[0] >= 3")
@pytest.mark.xfail("hasattr(sys, 'pypy_translation_info')")
def test_assume_ascii(self, testdir):
content = "u'\xe2\x99\xa5\x01\xfe'"
testdir.tmpdir.join("test_encoding.py").write(content, "wb")
res = testdir.runpytest()
assert res.ret != 0
assert "SyntaxError: Non-ASCII character" in res.stdout.str()
@pytest.mark.skipif("sys.version_info[0] >= 3")
def test_detect_coding_cookie(self, testdir):
testdir.makepyfile(test_cookie="""
# -*- coding: utf-8 -*-
u"St\xc3\xa4d"
def test_rewritten():
assert "@py_builtins" in globals()""")
assert testdir.runpytest().ret == 0
@pytest.mark.skipif("sys.version_info[0] >= 3")
def test_detect_coding_cookie_second_line(self, testdir):
testdir.makepyfile(test_cookie="""
# -*- coding: utf-8 -*-
u"St\xc3\xa4d"
def test_rewritten():
assert "@py_builtins" in globals()""")
assert testdir.runpytest().ret == 0
@pytest.mark.skipif("sys.version_info[0] >= 3")
def test_detect_coding_cookie_crlf(self, testdir):
testdir.makepyfile(test_cookie="""
# -*- coding: utf-8 -*-
u"St\xc3\xa4d"
def test_rewritten():
assert "@py_builtins" in globals()""")
assert testdir.runpytest().ret == 0
def test_sys_meta_path_munged(self, testdir):
testdir.makepyfile("""
def test_meta_path():
import sys; sys.meta_path = []""")
assert testdir.runpytest().ret == 0
def test_write_pyc(self, testdir, tmpdir, monkeypatch):
from _pytest.assertion.rewrite import _write_pyc
from _pytest.assertion import AssertionState
try:
import __builtin__ as b
except ImportError:
import builtins as b
config = testdir.parseconfig([])
state = AssertionState(config, "rewrite")
source_path = tmpdir.ensure("source.py")
pycpath = tmpdir.join("pyc").strpath
assert _write_pyc(state, [1], source_path.stat(), pycpath)
def open(*args):
e = IOError()
e.errno = 10
raise e
monkeypatch.setattr(b, "open", open)
assert not _write_pyc(state, [1], source_path.stat(), pycpath)
def test_resources_provider_for_loader(self, testdir):
"""
Attempts to load resources from a package should succeed normally,
even when the AssertionRewriteHook is used to load the modules.
See #366 for details.
"""
pytest.importorskip("pkg_resources")
testdir.mkpydir('testpkg')
contents = {
'testpkg/test_pkg': """
import pkg_resources
import pytest
from _pytest.assertion.rewrite import AssertionRewritingHook
def test_load_resource():
assert isinstance(__loader__, AssertionRewritingHook)
res = pkg_resources.resource_string(__name__, 'resource.txt')
res = res.decode('ascii')
assert res == 'Load me please.'
""",
}
testdir.makepyfile(**contents)
testdir.maketxtfile(**{'testpkg/resource': "Load me please."})
result = testdir.runpytest_subprocess()
result.assert_outcomes(passed=1)
def test_read_pyc(self, tmpdir):
"""
Ensure that the `_read_pyc` can properly deal with corrupted pyc files.
In those circumstances it should just give up instead of generating
an exception that is propagated to the caller.
"""
import py_compile
from _pytest.assertion.rewrite import _read_pyc
source = tmpdir.join('source.py')
pyc = source + 'c'
source.write('def test(): pass')
py_compile.compile(str(source), str(pyc))
contents = pyc.read(mode='rb')
strip_bytes = 20 # header is around 8 bytes, strip a little more
assert len(contents) > strip_bytes
pyc.write(contents[:strip_bytes], mode='wb')
assert _read_pyc(source, str(pyc)) is None # no error
def test_reload_is_same(self, testdir):
# A file that will be picked up during collecting.
testdir.tmpdir.join("file.py").ensure()
testdir.tmpdir.join("pytest.ini").write(py.std.textwrap.dedent("""
[pytest]
python_files = *.py
"""))
testdir.makepyfile(test_fun="""
import sys
try:
from imp import reload
except ImportError:
pass
def test_loader():
import file
assert sys.modules["file"] is reload(file)
""")
result = testdir.runpytest('-s')
result.stdout.fnmatch_lines([
"* 1 passed*",
])
def test_get_data_support(self, testdir):
"""Implement optional PEP302 api (#808).
"""
path = testdir.mkpydir("foo")
path.join("test_foo.py").write(_pytest._code.Source("""
class Test(object):
def test_foo(self):
import pkgutil
data = pkgutil.get_data('foo.test_foo', 'data.txt')
assert data == b'Hey'
"""))
path.join('data.txt').write('Hey')
result = testdir.runpytest()
result.stdout.fnmatch_lines('*1 passed*')
def test_issue731(testdir):
testdir.makepyfile("""
class LongReprWithBraces(object):
def __repr__(self):
return 'LongReprWithBraces({' + ('a' * 80) + '}' + ('a' * 120) + ')'
def some_method(self):
return False
def test_long_repr():
obj = LongReprWithBraces()
assert obj.some_method()
""")
result = testdir.runpytest()
assert 'unbalanced braces' not in result.stdout.str()
class TestIssue925(object):
def test_simple_case(self, testdir):
testdir.makepyfile("""
def test_ternary_display():
assert (False == False) == False
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines('*E*assert (False == False) == False')
def test_long_case(self, testdir):
testdir.makepyfile("""
def test_ternary_display():
assert False == (False == True) == True
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines('*E*assert (False == True) == True')
def test_many_brackets(self, testdir):
testdir.makepyfile("""
def test_ternary_display():
assert True == ((False == True) == True)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines('*E*assert True == ((False == True) == True)')
class TestIssue2121():
def test_simple(self, testdir):
testdir.tmpdir.join("tests/file.py").ensure().write("""
def test_simple_failure():
assert 1 + 1 == 3
""")
testdir.tmpdir.join("pytest.ini").write(py.std.textwrap.dedent("""
[pytest]
python_files = tests/**.py
"""))
result = testdir.runpytest()
result.stdout.fnmatch_lines('*E*assert (1 + 1) == 3')
| 30.324103
| 89
| 0.545593
|
4fbf5bfc23a09f532417eafac673b20e948d4f5c
| 737
|
py
|
Python
|
models/transport.py
|
3lpsy/FactionAPI
|
e3659c4a1a1cbdefcf6c3a240ee6db1475e3b022
|
[
"BSD-3-Clause"
] | 1
|
2019-06-07T16:21:51.000Z
|
2019-06-07T16:21:51.000Z
|
models/transport.py
|
3lpsy/FactionAPI
|
e3659c4a1a1cbdefcf6c3a240ee6db1475e3b022
|
[
"BSD-3-Clause"
] | null | null | null |
models/transport.py
|
3lpsy/FactionAPI
|
e3659c4a1a1cbdefcf6c3a240ee6db1475e3b022
|
[
"BSD-3-Clause"
] | null | null | null |
from backend.database import db
class Transport(db.Model):
__tablename__ = "Transport"
Id = db.Column(db.Integer, primary_key=True)
Name = db.Column(db.String)
TransportType = db.Column(db.String)
Guid = db.Column(db.String)
Created = db.Column(db.DateTime)
LastCheckin = db.Column(db.DateTime)
Configuration = db.Column(db.String)
ApiKeyId = db.Column(db.Integer, db.ForeignKey('ApiKey.Id'), nullable=False)
Enabled = db.Column(db.Boolean)
Visible = db.Column(db.Boolean)
Agents = db.relationship('Agent', backref='Transport', lazy=True)
Payloads = db.relationship('Payload', backref='Transport', lazy=True)
def __repr__(self):
return '<Transport: %s>' % str(self.Id)
| 33.5
| 80
| 0.683853
|
1382d43500ad690e826c3b76582069e724fa1424
| 187
|
py
|
Python
|
vmupdate/__init__.py
|
CorwinTanner/vmupdate
|
3cd231abb6d00b1fce6850b621b505d30ed14125
|
[
"MIT"
] | null | null | null |
vmupdate/__init__.py
|
CorwinTanner/vmupdate
|
3cd231abb6d00b1fce6850b621b505d30ed14125
|
[
"MIT"
] | null | null | null |
vmupdate/__init__.py
|
CorwinTanner/vmupdate
|
3cd231abb6d00b1fce6850b621b505d30ed14125
|
[
"MIT"
] | null | null | null |
"""
vmupdate is a command line utility used to keep your virtual machines up to date.
"""
from os import path
__version__ = '0.3.0'
BASE_DIR = path.dirname(path.dirname(__file__))
| 18.7
| 85
| 0.716578
|
410ade7d563f74f3bade775abf3569f93d6d7975
| 11,036
|
py
|
Python
|
localstack/services/awslambda/lambda_utils.py
|
roguesupport/localstack
|
087abb05fcb360297431ad8e5790c8014e0a80d7
|
[
"Apache-2.0"
] | 1
|
2022-03-17T07:22:23.000Z
|
2022-03-17T07:22:23.000Z
|
localstack/services/awslambda/lambda_utils.py
|
roguesupport/localstack
|
087abb05fcb360297431ad8e5790c8014e0a80d7
|
[
"Apache-2.0"
] | null | null | null |
localstack/services/awslambda/lambda_utils.py
|
roguesupport/localstack
|
087abb05fcb360297431ad8e5790c8014e0a80d7
|
[
"Apache-2.0"
] | null | null | null |
import base64
import logging
import os
import re
import time
from collections import defaultdict
from functools import lru_cache
from io import BytesIO
from typing import Any, Dict, List, Union
from flask import Response
from localstack import config
from localstack.utils import bootstrap
from localstack.utils.aws import aws_stack
from localstack.utils.aws.aws_models import LambdaFunction
from localstack.utils.aws.aws_responses import flask_error_response_json
from localstack.utils.common import short_uid, to_str
from localstack.utils.docker_utils import DOCKER_CLIENT
LOG = logging.getLogger(__name__)
# root path of Lambda API endpoints
API_PATH_ROOT = "/2015-03-31"
# Lambda runtime constants
LAMBDA_RUNTIME_PYTHON36 = "python3.6"
LAMBDA_RUNTIME_PYTHON37 = "python3.7"
LAMBDA_RUNTIME_PYTHON38 = "python3.8"
LAMBDA_RUNTIME_PYTHON39 = "python3.9"
LAMBDA_RUNTIME_NODEJS = "nodejs"
LAMBDA_RUNTIME_NODEJS10X = "nodejs10.x"
LAMBDA_RUNTIME_NODEJS12X = "nodejs12.x"
LAMBDA_RUNTIME_NODEJS14X = "nodejs14.x"
LAMBDA_RUNTIME_JAVA8 = "java8"
LAMBDA_RUNTIME_JAVA8_AL2 = "java8.al2"
LAMBDA_RUNTIME_JAVA11 = "java11"
LAMBDA_RUNTIME_DOTNETCORE2 = "dotnetcore2.0"
LAMBDA_RUNTIME_DOTNETCORE21 = "dotnetcore2.1"
LAMBDA_RUNTIME_DOTNETCORE31 = "dotnetcore3.1"
LAMBDA_RUNTIME_GOLANG = "go1.x"
LAMBDA_RUNTIME_RUBY = "ruby"
LAMBDA_RUNTIME_RUBY25 = "ruby2.5"
LAMBDA_RUNTIME_RUBY27 = "ruby2.7"
LAMBDA_RUNTIME_PROVIDED = "provided"
LAMBDA_RUNTIME_PROVIDED_AL2 = "provided.al2"
# default handler and runtime
LAMBDA_DEFAULT_HANDLER = "handler.handler"
LAMBDA_DEFAULT_RUNTIME = LAMBDA_RUNTIME_PYTHON37
LAMBDA_DEFAULT_STARTING_POSITION = "LATEST"
# List of Dotnet Lambda runtime names
DOTNET_LAMBDA_RUNTIMES = [
LAMBDA_RUNTIME_DOTNETCORE2,
LAMBDA_RUNTIME_DOTNETCORE21,
LAMBDA_RUNTIME_DOTNETCORE31,
]
# IP address of main Docker container (lazily initialized)
DOCKER_MAIN_CONTAINER_IP = None
LAMBDA_CONTAINER_NETWORK = None
class ClientError(Exception):
def __init__(self, msg, code=400):
super(ClientError, self).__init__(msg)
self.code = code
self.msg = msg
def get_response(self):
if isinstance(self.msg, Response):
return self.msg
return error_response(self.msg, self.code)
@lru_cache()
def get_default_executor_mode() -> str:
"""
Returns the default docker executor mode, which is "docker" if the docker socket is available via the docker
client, or "local" otherwise.
:return:
"""
try:
return "docker" if DOCKER_CLIENT.has_docker() else "local"
except Exception:
return "local"
def get_executor_mode() -> str:
"""
Returns the currently active lambda executor mode. If config.LAMBDA_EXECUTOR is set, then it returns that,
otherwise it falls back to get_default_executor_mode().
:return: the lambda executor mode (e.g., 'local', 'docker', or 'docker-reuse')
"""
return config.LAMBDA_EXECUTOR or get_default_executor_mode()
def multi_value_dict_for_list(elements: Union[List, Dict]) -> Dict:
temp_mv_dict = defaultdict(list)
for key in elements:
if isinstance(key, (list, tuple)):
key, value = key
else:
value = elements[key]
key = to_str(key)
temp_mv_dict[key].append(value)
return dict((k, tuple(v)) for k, v in temp_mv_dict.items())
def get_lambda_runtime(runtime_details: Union[LambdaFunction, str]) -> str:
"""Return the runtime string from the given LambdaFunction (or runtime string)."""
if isinstance(runtime_details, LambdaFunction):
runtime_details = runtime_details.runtime
if not isinstance(runtime_details, str):
LOG.info("Unable to determine Lambda runtime from parameter: %s", runtime_details)
return runtime_details or ""
def is_provided_runtime(runtime_details: Union[LambdaFunction, str]) -> bool:
"""Whether the given LambdaFunction uses a 'provided' runtime."""
runtime = get_lambda_runtime(runtime_details) or ""
return runtime.startswith("provided")
def get_handler_file_from_name(handler_name: str, runtime: str = None):
runtime = runtime or LAMBDA_DEFAULT_RUNTIME
if runtime.startswith(LAMBDA_RUNTIME_PROVIDED):
return "bootstrap"
delimiter = "."
if runtime.startswith(LAMBDA_RUNTIME_NODEJS):
file_ext = ".js"
elif runtime.startswith(LAMBDA_RUNTIME_GOLANG):
file_ext = ""
elif runtime.startswith(tuple(DOTNET_LAMBDA_RUNTIMES)):
file_ext = ".dll"
delimiter = ":"
elif runtime.startswith(LAMBDA_RUNTIME_RUBY):
file_ext = ".rb"
else:
handler_name = handler_name.rpartition(delimiter)[0].replace(delimiter, os.path.sep)
file_ext = ".py"
return "%s%s" % (handler_name.split(delimiter)[0], file_ext)
def is_java_lambda(lambda_details):
runtime = getattr(lambda_details, "runtime", lambda_details)
return runtime in [LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA8_AL2, LAMBDA_RUNTIME_JAVA11]
def is_nodejs_runtime(lambda_details):
runtime = getattr(lambda_details, "runtime", lambda_details) or ""
return runtime.startswith("nodejs")
def is_python_runtime(lambda_details):
runtime = getattr(lambda_details, "runtime", lambda_details) or ""
return runtime.startswith("python")
def store_lambda_logs(
lambda_function: LambdaFunction, log_output: str, invocation_time=None, container_id=None
):
# leave here to avoid import issues from CLI
from localstack.utils.cloudwatch.cloudwatch_util import store_cloudwatch_logs
log_group_name = "/aws/lambda/%s" % lambda_function.name()
container_id = container_id or short_uid()
invocation_time = invocation_time or int(time.time() * 1000)
invocation_time_secs = int(invocation_time / 1000)
time_str = time.strftime("%Y/%m/%d", time.gmtime(invocation_time_secs))
log_stream_name = "%s/[LATEST]%s" % (time_str, container_id)
return store_cloudwatch_logs(log_group_name, log_stream_name, log_output, invocation_time)
def get_main_endpoint_from_container():
global DOCKER_MAIN_CONTAINER_IP
if not config.HOSTNAME_FROM_LAMBDA and DOCKER_MAIN_CONTAINER_IP is None:
DOCKER_MAIN_CONTAINER_IP = False
container_name = bootstrap.get_main_container_name()
try:
if config.is_in_docker:
DOCKER_MAIN_CONTAINER_IP = DOCKER_CLIENT.get_container_ipv4_for_network(
container_name_or_id=container_name,
container_network=get_container_network_for_lambda(),
)
else:
# default gateway for the network should be the host
# (only under Linux - otherwise fall back to DOCKER_HOST_FROM_CONTAINER below)
if config.is_in_linux:
DOCKER_MAIN_CONTAINER_IP = DOCKER_CLIENT.inspect_network(
get_container_network_for_lambda()
)["IPAM"]["Config"][0]["Gateway"]
LOG.info("Determined main container target IP: %s", DOCKER_MAIN_CONTAINER_IP)
except Exception as e:
LOG.info(
'Unable to get IP address of main Docker container "%s": %s', container_name, e
)
# return (1) predefined endpoint host, or (2) main container IP, or (3) Docker host (e.g., bridge IP)
return (
config.HOSTNAME_FROM_LAMBDA or DOCKER_MAIN_CONTAINER_IP or config.DOCKER_HOST_FROM_CONTAINER
)
def get_container_network_for_lambda():
global LAMBDA_CONTAINER_NETWORK
if config.LAMBDA_DOCKER_NETWORK:
return config.LAMBDA_DOCKER_NETWORK
if LAMBDA_CONTAINER_NETWORK is None:
try:
if config.is_in_docker:
networks = DOCKER_CLIENT.get_networks(bootstrap.get_main_container_name())
LAMBDA_CONTAINER_NETWORK = networks[0]
else:
LAMBDA_CONTAINER_NETWORK = (
"bridge" # use the default bridge network in case of host mode
)
LOG.info("Determined lambda container network: %s", LAMBDA_CONTAINER_NETWORK)
except Exception as e:
container_name = bootstrap.get_main_container_name()
LOG.info('Unable to get network name of main container "%s": %s', container_name, e)
return LAMBDA_CONTAINER_NETWORK
def rm_docker_container(container_name_or_id, check_existence=False, safe=False):
# TODO: remove method / move to docker module
if not container_name_or_id:
return
if check_existence and container_name_or_id not in DOCKER_CLIENT.get_running_container_names():
# TODO: check names as well as container IDs!
return
try:
DOCKER_CLIENT.remove_container(container_name_or_id)
except Exception:
if not safe:
raise
def get_record_from_event(event: Dict, key: str) -> Any:
"""Retrieve a field with the given key from the list of Records within 'event'."""
try:
return event["Records"][0][key]
except KeyError:
return None
def get_zip_bytes(function_code):
"""Returns the ZIP file contents from a FunctionCode dict.
:type function_code: dict
:param function_code: https://docs.aws.amazon.com/lambda/latest/dg/API_FunctionCode.html
:returns: bytes of the Zip file.
"""
function_code = function_code or {}
if "S3Bucket" in function_code:
s3_client = aws_stack.connect_to_service("s3")
bytes_io = BytesIO()
try:
s3_client.download_fileobj(function_code["S3Bucket"], function_code["S3Key"], bytes_io)
zip_file_content = bytes_io.getvalue()
except Exception as e:
raise ClientError("Unable to fetch Lambda archive from S3: %s" % e, 404)
elif "ZipFile" in function_code:
zip_file_content = function_code["ZipFile"]
zip_file_content = base64.b64decode(zip_file_content)
elif "ImageUri" in function_code:
zip_file_content = None
else:
raise ClientError("No valid Lambda archive specified: %s" % list(function_code.keys()))
return zip_file_content
def event_source_arn_matches(mapped: str, searched: str) -> bool:
if not mapped:
return False
if not searched or mapped == searched:
return True
# Some types of ARNs can end with a path separated by slashes, for
# example the ARN of a DynamoDB stream is tableARN/stream/ID. It's
# a little counterintuitive that a more specific mapped ARN can
# match a less specific ARN on the event, but some integration tests
# rely on it for things like subscribing to a stream and matching an
# event labeled with the table ARN.
if re.match(r"^%s$" % searched, mapped):
return True
if mapped.startswith(searched):
suffix = mapped[len(searched) :]
return suffix[0] == "/"
return False
def error_response(msg, code=500, error_type="InternalFailure"):
if code != 404:
LOG.debug(msg)
return flask_error_response_json(msg, code=code, error_type=error_type)
| 36.909699
| 112
| 0.708499
|
975a1e8765eb2c2aff035c56438f5df6adc38b35
| 4,621
|
py
|
Python
|
lib/experiment.py
|
Racter42/gxabm
|
04b77f3abb9caa69e527f84c3d85fa8e3d931e27
|
[
"MIT"
] | 1
|
2021-11-23T21:44:09.000Z
|
2021-11-23T21:44:09.000Z
|
lib/experiment.py
|
Racter42/gxabm
|
04b77f3abb9caa69e527f84c3d85fa8e3d931e27
|
[
"MIT"
] | 18
|
2021-07-28T22:32:30.000Z
|
2021-12-09T19:43:36.000Z
|
lib/experiment.py
|
Racter42/gxabm
|
04b77f3abb9caa69e527f84c3d85fa8e3d931e27
|
[
"MIT"
] | 2
|
2021-08-04T21:24:14.000Z
|
2021-11-23T17:57:40.000Z
|
import os
import yaml
import json
import lib
import helm
from common import load_profiles, set_active_profile
import benchmark
INVOCATIONS_DIR = "invocations"
METRICS_DIR = "metrics"
def run(args: list):
"""
Runs a single benchmark defined by *args[0]*
:param args: a list that contains a single element, the path to a benchmark
configuration file.
:return: True if the benchmarks completed sucessfully. False otherwise.
"""
if len(args) == 0:
print("ERROR: No benchmarking configuration provided.")
return False
benchmark_path = args[0]
if not os.path.exists(benchmark_path):
print(f"ERROR: Benchmarking configuration not found {benchmark_path}")
return False
with open(benchmark_path, 'r') as f:
config = yaml.safe_load(f)
profiles = load_profiles()
num_runs = config['runs']
for cloud in config['cloud']:
if cloud not in profiles:
print(f"WARNING: No profile found for {cloud}")
continue
if not set_active_profile(cloud):
print(f"ERROR: Unable to set the profile for {cloud}")
continue
if lib.KUBECONFIG is None:
print(f"ERROR: No kubeconfig set for {cloud}")
continue
print("------------------------")
print(f"Benchmarking: {cloud}")
for conf in config['job_configs']:
job_conf_path = f"rules/{conf}.yml"
if not helm.update([job_conf_path]):
print(f"WARNING: job conf not found {conf}")
continue
for n in range(num_runs):
history_name_prefix = f"{n} {cloud} {conf}"
for workflow_conf in config['benchmark_confs']:
benchmark.run([workflow_conf, history_name_prefix])
# for n in range(num_runs):
# print("------------------------")
# print(f"Benchmarking run #{n+1}")
# for cloud in config['cloud']:
# if cloud not in profiles:
# print(f"WARNING: no profile for instance {cloud}")
# continue
# if not set_active_profile(cloud):
# print(f"WARNING: unable to set {cloud} as the active profile")
# if lib.KUBECONFIG is None:
# print(f"WARNGING: no kubeconfig for instance {cloud}")
# continue
# for job_conf in config['job_configs']:
# job_conf_path = f"rules/{job_conf}.yml"
# if not helm.update([job_conf_path]):
# print(f"WARNING: job conf not found {job_conf}")
# continue
# history_name_prefix = f"Run {n} {job_conf}"
# for workflow_conf in config['workflow_conf']:
# workflow.run([workflow_conf, history_name_prefix])
def test(args: list):
print(lib.GALAXY_SERVER)
if os.path.exists(args[0]):
with open(args[0]) as f:
data = yaml.safe_load(f)
print(data)
def parse_toolid(id:str) -> str:
parts = id.split('/')
return f"{parts[-2]},{parts[-1]}"
def summarize(args: list):
"""
Parses all the files in the **METRICS_DIR** directory and prints metrics
as CSV to stdout
:param args: Ignored
:return: None
"""
row = [''] * 15
print("Run,Cloud,Job Conf,Workflow,History,Server,Tool,Tool Version,State,Slots,Memory,Runtime (Sec),CPU,Memory Limit (Bytes),Memory Max usage (Bytes),Memory Soft Limit")
for file in os.listdir(METRICS_DIR):
input_path = os.path.join(METRICS_DIR, file)
with open(input_path, 'r') as f:
data = json.load(f)
row[0] = data['run']
row[1] = data['cloud']
row[2] = data['job_conf']
row[3] = data['workflow_id']
row[4] = data['history_id']
row[5] = data['server'] if data['server'] is not None else 'https://iu1.usegvl.org/galaxy'
row[6] = parse_toolid(data['metrics']['tool_id'])
row[7] = data['metrics']['state']
add_metrics_to_row(data['metrics']['job_metrics'], row)
print(','.join(row))
def add_metrics_to_row(metrics_list: list, row: list):
accept_metrics = ['galaxy_slots', 'galaxy_memory_mb', 'runtime_seconds', 'cpuacct.usage','memory.limit_in_bytes', 'memory.max_usage_in_bytes','memory.soft_limit_in_bytes']
for job_metrics in metrics_list:
if job_metrics['name'] in accept_metrics:
index = accept_metrics.index(job_metrics['name'])
row[index + 8] = job_metrics['raw_value']
# row.append(job_metrics['raw_value'])
| 35.007576
| 175
| 0.590565
|
4cbdf7a0ef242165701dd3369f8033de29ca014d
| 10,320
|
py
|
Python
|
src/transformers/commands/add_new_model.py
|
liminghao1630/transformers
|
207594be81b8e5a8589c8b11c3b236924555d806
|
[
"Apache-2.0"
] | 31
|
2022-02-02T13:13:41.000Z
|
2022-03-29T08:37:20.000Z
|
src/transformers/commands/add_new_model.py
|
liminghao1630/transformers
|
207594be81b8e5a8589c8b11c3b236924555d806
|
[
"Apache-2.0"
] | 2
|
2022-01-06T05:40:05.000Z
|
2022-01-06T15:12:29.000Z
|
src/transformers/commands/add_new_model.py
|
liminghao1630/transformers
|
207594be81b8e5a8589c8b11c3b236924555d806
|
[
"Apache-2.0"
] | 3
|
2022-01-06T04:44:13.000Z
|
2022-02-18T23:35:21.000Z
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_has_cookiecutter = True
except ImportError:
_has_cookiecutter = False
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
def add_new_model_command_factory(args: Namespace):
return AddNewModelCommand(args.testing, args.testing_file, path=args.path)
class AddNewModelCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
add_new_model_parser = parser.add_parser("add-new-model")
add_new_model_parser.add_argument("--testing", action="store_true", help="If in testing mode.")
add_new_model_parser.add_argument("--testing_file", type=str, help="Configuration file on which to run.")
add_new_model_parser.add_argument(
"--path", type=str, help="Path to cookiecutter. Should only be used for testing purposes."
)
add_new_model_parser.set_defaults(func=add_new_model_command_factory)
def __init__(self, testing: bool, testing_file: str, path=None, *args):
self._testing = testing
self._testing_file = testing_file
self._path = path
def run(self):
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n"
)
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
directories = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(directories) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory."
)
path_to_transformer_root = (
Path(__file__).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent
)
path_to_cookiecutter = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(path_to_cookiecutter))
else:
with open(self._testing_file, "r") as configuration_file:
testing_configuration = json.load(configuration_file)
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path),
no_input=True,
extra_context=testing_configuration,
)
directory = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json", "r") as configuration_file:
configuration = json.load(configuration_file)
lowercase_model_name = configuration["lowercase_modelname"]
generate_tensorflow_pytorch_and_flax = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f"{directory}/configuration.json")
output_pytorch = "PyTorch" in generate_tensorflow_pytorch_and_flax
output_tensorflow = "TensorFlow" in generate_tensorflow_pytorch_and_flax
output_flax = "Flax" in generate_tensorflow_pytorch_and_flax
model_dir = f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
os.makedirs(model_dir, exist_ok=True)
shutil.move(
f"{directory}/__init__.py",
f"{model_dir}/__init__.py",
)
shutil.move(
f"{directory}/configuration_{lowercase_model_name}.py",
f"{model_dir}/configuration_{lowercase_model_name}.py",
)
def remove_copy_lines(path):
with open(path, "r") as f:
lines = f.readlines()
with open(path, "w") as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(line)
if output_pytorch:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py")
shutil.move(
f"{directory}/modeling_{lowercase_model_name}.py",
f"{model_dir}/modeling_{lowercase_model_name}.py",
)
shutil.move(
f"{directory}/test_modeling_{lowercase_model_name}.py",
f"{path_to_transformer_root}/tests/test_modeling_{lowercase_model_name}.py",
)
else:
os.remove(f"{directory}/modeling_{lowercase_model_name}.py")
os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py")
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py")
shutil.move(
f"{directory}/modeling_tf_{lowercase_model_name}.py",
f"{model_dir}/modeling_tf_{lowercase_model_name}.py",
)
shutil.move(
f"{directory}/test_modeling_tf_{lowercase_model_name}.py",
f"{path_to_transformer_root}/tests/test_modeling_tf_{lowercase_model_name}.py",
)
else:
os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py")
os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py")
if output_flax:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_flax_{lowercase_model_name}.py")
shutil.move(
f"{directory}/modeling_flax_{lowercase_model_name}.py",
f"{model_dir}/modeling_flax_{lowercase_model_name}.py",
)
shutil.move(
f"{directory}/test_modeling_flax_{lowercase_model_name}.py",
f"{path_to_transformer_root}/tests/test_modeling_flax_{lowercase_model_name}.py",
)
else:
os.remove(f"{directory}/modeling_flax_{lowercase_model_name}.py")
os.remove(f"{directory}/test_modeling_flax_{lowercase_model_name}.py")
shutil.move(
f"{directory}/{lowercase_model_name}.mdx",
f"{path_to_transformer_root}/docs/source/model_doc/{lowercase_model_name}.mdx",
)
shutil.move(
f"{directory}/tokenization_{lowercase_model_name}.py",
f"{model_dir}/tokenization_{lowercase_model_name}.py",
)
shutil.move(
f"{directory}/tokenization_fast_{lowercase_model_name}.py",
f"{model_dir}/tokenization_{lowercase_model_name}_fast.py",
)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(original_file: str, line_to_copy_below: str, lines_to_copy: List[str]):
# Create temp file
fh, abs_path = mkstemp()
line_found = False
with fdopen(fh, "w") as new_file:
with open(original_file) as old_file:
for line in old_file:
new_file.write(line)
if line_to_copy_below in line:
line_found = True
for line_to_copy in lines_to_copy:
new_file.write(line_to_copy)
if not line_found:
raise ValueError(f"Line {line_to_copy_below} was not found in file.")
# Copy the file permissions from the old file to the new file
copymode(original_file, abs_path)
# Remove original file
remove(original_file)
# Move new file
move(abs_path, original_file)
def skip_units(line):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(path_to_datafile):
with open(path_to_datafile) as datafile:
lines_to_copy = []
skip_file = False
skip_snippet = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
file_to_replace_in = line.split('"')[1]
skip_file = skip_units(line)
elif "# Below: " in line and "##" not in line:
line_to_copy_below = line.split('"')[1]
skip_snippet = skip_units(line)
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(file_to_replace_in, line_to_copy_below, lines_to_copy)
lines_to_copy = []
elif "# Replace with" in line and "##" not in line:
lines_to_copy = []
elif "##" not in line:
lines_to_copy.append(line)
remove(path_to_datafile)
replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py")
os.rmdir(directory)
| 41.445783
| 119
| 0.620252
|
f0ffded880c12a07812b9a6b2bf183c2595ceebe
| 814
|
py
|
Python
|
2020/8.py
|
quickthom/aoc
|
34703f57ae8dbbd56748401d999fcb96d0b47bfe
|
[
"MIT"
] | null | null | null |
2020/8.py
|
quickthom/aoc
|
34703f57ae8dbbd56748401d999fcb96d0b47bfe
|
[
"MIT"
] | null | null | null |
2020/8.py
|
quickthom/aoc
|
34703f57ae8dbbd56748401d999fcb96d0b47bfe
|
[
"MIT"
] | null | null | null |
from handy import read
lines = read(8)
#lines = [ "nop +0", "acc +1", "jmp +4", "acc +3", "jmp -3", "acc -99", "acc +1", "jmp -4", "acc +6", ]
def execute(lines):
pgmc = 0
pgmc_hist = set()
acc = 0
while pgmc < len(lines) and pgmc not in pgmc_hist:
pgmc_hist.add(pgmc)
cmd, arg = lines[pgmc].split()
if cmd =='acc':
acc += int(arg)
pgmc += int(arg) if cmd=='jmp' else 1
if pgmc != len(lines):
return False, acc
return True, acc
execute(lines)
nops_and_jmps = [i for i, cmd in enumerate(lines) if cmd[:3] in ('nop','jmp')]
for i in nops_and_jmps:
new = lines.copy()
repl=('nop','jmp') if 'nop' in new[i] else ('jmp','nop')
new[i] = new[i].replace(*repl)
clean, acc = execute(new)
if clean:
break
print(acc)
| 26.258065
| 103
| 0.545455
|
c1b560d0957d12a948864ab5820fa8c7afb7dab6
| 3,043
|
py
|
Python
|
modules/image/object_detection/faster_rcnn_resnet50_fpn_venus/roi_extractor.py
|
chunzhang-hub/PaddleHub
|
c5cfd021f77fd59340fb26e223e09a592e6a345f
|
[
"Apache-2.0"
] | 8,360
|
2019-01-18T10:46:45.000Z
|
2022-03-31T14:50:02.000Z
|
modules/image/object_detection/faster_rcnn_resnet50_fpn_venus/roi_extractor.py
|
dwuping/PaddleHub
|
9a3b23295947e22149cc85c17cb4cf23c03f9e06
|
[
"Apache-2.0"
] | 1,158
|
2019-04-11T09:22:43.000Z
|
2022-03-31T12:12:09.000Z
|
modules/image/object_detection/faster_rcnn_resnet50_fpn_venus/roi_extractor.py
|
dwuping/PaddleHub
|
9a3b23295947e22149cc85c17cb4cf23c03f9e06
|
[
"Apache-2.0"
] | 1,677
|
2019-04-09T15:07:40.000Z
|
2022-03-31T06:41:10.000Z
|
# coding=utf-8
import paddle.fluid as fluid
__all__ = ['FPNRoIAlign']
class FPNRoIAlign(object):
"""
RoI align pooling for FPN feature maps
Args:
sampling_ratio (int): number of sampling points
min_level (int): lowest level of FPN layer
max_level (int): highest level of FPN layer
canconical_level (int): the canconical FPN feature map level
canonical_size (int): the canconical FPN feature map size
box_resolution (int): box resolution
mask_resolution (int): mask roi resolution
"""
def __init__(self,
sampling_ratio=0,
min_level=2,
max_level=5,
canconical_level=4,
canonical_size=224,
box_resolution=7,
mask_resolution=14):
super(FPNRoIAlign, self).__init__()
self.sampling_ratio = sampling_ratio
self.min_level = min_level
self.max_level = max_level
self.canconical_level = canconical_level
self.canonical_size = canonical_size
self.box_resolution = box_resolution
self.mask_resolution = mask_resolution
def __call__(self, head_inputs, rois, spatial_scale, is_mask=False):
"""
Adopt RoI align onto several level of feature maps to get RoI features.
Distribute RoIs to different levels by area and get a list of RoI
features by distributed RoIs and their corresponding feature maps.
Returns:
roi_feat(Variable): RoI features with shape of [M, C, R, R],
where M is the number of RoIs and R is RoI resolution
"""
k_min = self.min_level
k_max = self.max_level
num_roi_lvls = k_max - k_min + 1
name_list = list(head_inputs.keys())
input_name_list = name_list[-num_roi_lvls:]
spatial_scale = spatial_scale[-num_roi_lvls:]
rois_dist, restore_index = fluid.layers.distribute_fpn_proposals(rois, k_min, k_max, self.canconical_level,
self.canonical_size)
# rois_dist is in ascend order
roi_out_list = []
resolution = is_mask and self.mask_resolution or self.box_resolution
for lvl in range(num_roi_lvls):
name_index = num_roi_lvls - lvl - 1
rois_input = rois_dist[lvl]
head_input = head_inputs[input_name_list[name_index]]
sc = spatial_scale[name_index]
roi_out = fluid.layers.roi_align(
input=head_input,
rois=rois_input,
pooled_height=resolution,
pooled_width=resolution,
spatial_scale=sc,
sampling_ratio=self.sampling_ratio)
roi_out_list.append(roi_out)
roi_feat_shuffle = fluid.layers.concat(roi_out_list)
roi_feat_ = fluid.layers.gather(roi_feat_shuffle, restore_index)
roi_feat = fluid.layers.lod_reset(roi_feat_, rois)
return roi_feat
| 39.519481
| 115
| 0.618797
|
f707533305b80bf36cdd6ae41ab2b2aee1f24d73
| 216
|
py
|
Python
|
Domains/Python/04 - Sets/Introduction to Sets/solution.py
|
abhinavgunwant/hackerrank-solutions
|
e016366cb6a9fac562a754d2b230fef907080733
|
[
"MIT"
] | 1
|
2019-06-09T00:04:56.000Z
|
2019-06-09T00:04:56.000Z
|
Domains/Python/04 - Sets/Introduction to Sets/solution.py
|
abhinavgunwant/hackerrank-solutions
|
e016366cb6a9fac562a754d2b230fef907080733
|
[
"MIT"
] | 19
|
2019-06-09T14:45:52.000Z
|
2019-06-17T18:52:53.000Z
|
Domains/Python/04 - Sets/Introduction to Sets/solution.py
|
abhinavgunwant/hackerrank-solutions
|
e016366cb6a9fac562a754d2b230fef907080733
|
[
"MIT"
] | null | null | null |
def average(array):
array = list(set(array))
return sum(array)/len(array)
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().split()))
result = average(arr)
print(result)
| 24
| 41
| 0.606481
|
af736a64bd65956495f5ecc432d100c063f28b48
| 7,232
|
py
|
Python
|
eli5/base.py
|
abael/eli5
|
e2f2acfeac5f7a13f13b314e486069660287d7b0
|
[
"MIT"
] | null | null | null |
eli5/base.py
|
abael/eli5
|
e2f2acfeac5f7a13f13b314e486069660287d7b0
|
[
"MIT"
] | null | null | null |
eli5/base.py
|
abael/eli5
|
e2f2acfeac5f7a13f13b314e486069660287d7b0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from typing import Any, List, Tuple, Union
from .base_utils import attrs
from .formatters.features import FormattedFeatureName
# @attrs decorator used in this file calls @attr.s(slots=True),
# creating attr.ib entries based on the signature of __init__.
@attrs
class Explanation(object):
""" An explanation for classifier or regressor,
it can either explain weights or a single prediction.
"""
def __init__(self,
estimator, # type: str
description=None, # type: str
error=None, # type: str
method=None, # type: str
is_regression=False, # type: bool
targets=None, # type: List[TargetExplanation]
feature_importances=None, # type: FeatureImportances
decision_tree=None, # type: TreeInfo
highlight_spaces=None,
transition_features=None, # type: TransitionFeatureWeights
):
self.estimator = estimator
self.description = description
self.error = error
self.method = method
self.is_regression = is_regression
self.targets = targets
self.feature_importances = feature_importances
self.decision_tree = decision_tree
self.highlight_spaces = highlight_spaces
self.transition_features = transition_features
def _repr_html_(self):
""" HTML formatting for the notebook.
"""
from eli5.formatters import fields
from eli5.formatters.html import format_as_html
return format_as_html(self, force_weights=False, show=fields.WEIGHTS)
@attrs
class FeatureImportances(object):
""" Feature importances with number of remaining non-zero features.
"""
def __init__(self, importances, remaining):
self.importances = importances # type: List[FeatureWeight]
self.remaining = remaining # type: int
@attrs
class TargetExplanation(object):
""" Explanation for a single target or class.
Feature weights are stored in the :feature_weights: attribute,
and features highlighted in text in the :weighted_spans: attribute.
"""
def __init__(self,
target, # type: str
feature_weights, # type: FeatureWeights
proba=None, # type: float
score=None, # type: float
weighted_spans=None, # type: WeightedSpans
):
self.target = target
self.feature_weights = feature_weights
self.proba = proba
self.score = score
self.weighted_spans = weighted_spans
# List is currently used for unhashed features
Feature = Union[str, List, FormattedFeatureName]
@attrs
class FeatureWeights(object):
""" Weights for top features, :pos: for positive and :neg: for negative,
sorted by descending absolute value.
Number of remaining positive and negative features are stored in
:pos_remaining: and :neg_remaining: attributes.
"""
def __init__(self,
pos, # type: List[FeatureWeight]
neg, # type: List[FeatureWeight]
pos_remaining=0, # type: int
neg_remaining=0, # type: int
):
self.pos = pos
self.neg = neg
self.pos_remaining = pos_remaining
self.neg_remaining = neg_remaining
@attrs
class FeatureWeight(object):
def __init__(self,
feature, # type: Feature
weight, # type: float
std=None, # type: float
value=None, # type: Any
):
self.feature = feature
self.weight = weight
self.std = std
self.value = value
@attrs
class WeightedSpans(object):
""" Holds highlighted spans for parts of document - a DocWeightedSpans
object for each vectorizer, and other features not highlighted anywhere.
"""
def __init__(self,
docs_weighted_spans, # type: List[DocWeightedSpans]
other=None, # type: FeatureWeights
):
self.docs_weighted_spans = docs_weighted_spans
self.other = other
WeightedSpan = Tuple[
Feature,
List[Tuple[int, int]], # list of spans (start, end) for this feature
float, # feature weight
]
@attrs
class DocWeightedSpans(object):
""" Features highlighted in text. :document: is a pre-processed document
before applying the analyzer. :weighted_spans: holds a list of spans
for features found in text (span indices correspond to
:document:). :preserve_density: determines how features are colored
when doing formatting - it is better set to True for char features
and to False for word features.
"""
def __init__(self,
document, # type: str
spans, # type: List[WeightedSpan]
preserve_density=None, # type: bool
vec_name=None, # type: str
):
self.document = document
self.spans = spans
self.preserve_density = preserve_density
self.vec_name = vec_name
@attrs
class TransitionFeatureWeights(object):
""" Weights matrix for transition features. """
def __init__(self,
class_names, # type: List[str],
coef,
):
self.class_names = class_names
self.coef = coef
@attrs
class TreeInfo(object):
""" Information about the decision tree. :criterion: is the name of
the function to measure the quality of a split, :tree: holds all nodes
of the tree, and :graphviz: is the tree rendered in graphviz .dot format.
"""
def __init__(self,
criterion, # type: str
tree, # type: NodeInfo
graphviz, # type: str
is_classification, # type: bool
):
self.criterion = criterion
self.tree = tree
self.graphviz = graphviz
self.is_classification = is_classification
@attrs
class NodeInfo(object):
""" A node in a binary tree.
Pointers to left and right children are in :left: and :right: attributes.
"""
def __init__(self,
id, # type: int
is_leaf, # type: bool
value,
value_ratio,
impurity, # type: float
samples, # type: int
sample_ratio, # type: float
feature_name=None, # type: str
feature_id=None, # type: int
threshold=None, # type: float
left=None, # type: NodeInfo
right=None, # type: NodeInfo
):
self.id = id
self.is_leaf = is_leaf
self.value = value
self.value_ratio = value_ratio
self.impurity = impurity
self.samples = samples
self.sample_ratio = sample_ratio
self.feature_name = feature_name
self.feature_id = feature_id
self.threshold = threshold
self.left = left
self.right = right
| 33.637209
| 77
| 0.590846
|
b80524e0c415be27a9e1492fe64e475b3a0deea2
| 100
|
py
|
Python
|
python/pytest_example/eric_math.py
|
holycrap872/til
|
97f6b041dad03a2edffb804dc4db090b65b9154f
|
[
"MIT"
] | 8
|
2015-10-07T02:47:58.000Z
|
2018-12-25T16:01:08.000Z
|
python/pytest_example/eric_math.py
|
holycrap872/til
|
97f6b041dad03a2edffb804dc4db090b65b9154f
|
[
"MIT"
] | null | null | null |
python/pytest_example/eric_math.py
|
holycrap872/til
|
97f6b041dad03a2edffb804dc4db090b65b9154f
|
[
"MIT"
] | 1
|
2016-08-25T17:45:40.000Z
|
2016-08-25T17:45:40.000Z
|
def double(x):
return 2 * x
def square(x):
return x ** 2
def add(x, y):
return x + y
| 10
| 17
| 0.52
|
c5badc5ce5b62e1dcbd29fe34295f4048d487c1d
| 9,573
|
py
|
Python
|
docs/conf.py
|
nparslow/taskbuster
|
6bdafde2d3d724bdaa720eec490fbaa42016b5f1
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
nparslow/taskbuster
|
6bdafde2d3d724bdaa720eec490fbaa42016b5f1
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
nparslow/taskbuster
|
6bdafde2d3d724bdaa720eec490fbaa42016b5f1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# TaskBuster documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 1 14:33:26 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.insert(0, os.path.abspath('..')) # tell sphinx to look one folder up for the project files
from django.conf import settings
settings.configure()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'TaskBuster'
copyright = u'2016, Nick Parslow'
author = u'Nick Parslow'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = u'TaskBuster v0.1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'TaskBusterdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TaskBuster.tex', u'TaskBuster Documentation',
u'Nick Parslow', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'taskbuster', u'TaskBuster Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TaskBuster', u'TaskBuster Documentation',
author, 'TaskBuster', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.672355
| 99
| 0.718897
|
3aa3a454fb206fa664b89212c7a8c4d060d54609
| 354
|
py
|
Python
|
feder/parcels/types.py
|
dzemeuksis/feder
|
32ef7793af6256d4ecada61505c7baf334b34419
|
[
"MIT"
] | 16
|
2015-08-11T17:20:26.000Z
|
2022-02-11T20:15:41.000Z
|
feder/parcels/types.py
|
dzemeuksis/feder
|
32ef7793af6256d4ecada61505c7baf334b34419
|
[
"MIT"
] | 534
|
2015-08-04T00:10:54.000Z
|
2022-03-17T10:44:47.000Z
|
feder/parcels/types.py
|
dzemeuksis/feder
|
32ef7793af6256d4ecada61505c7baf334b34419
|
[
"MIT"
] | 10
|
2017-08-30T13:34:32.000Z
|
2022-02-18T13:00:35.000Z
|
from feder.parcels.models import IncomingParcelPost, OutgoingParcelPost
from feder.records.registry import record_type_registry
from feder.records.types import CommonRecordType
record_type_registry.registry(IncomingParcelPost, CommonRecordType(IncomingParcelPost))
record_type_registry.registry(OutgoingParcelPost, CommonRecordType(OutgoingParcelPost))
| 50.571429
| 87
| 0.89548
|
83b4d86aa8a83e956120cdb4b5f6c8df14df9e74
| 3,187
|
py
|
Python
|
uuv_assistants/scripts/unpause_simulation.py
|
jpliquid/testActions2
|
6f314fa1430f654e5943e47ac278101b9c24f938
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
uuv_assistants/scripts/unpause_simulation.py
|
jpliquid/testActions2
|
6f314fa1430f654e5943e47ac278101b9c24f938
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
uuv_assistants/scripts/unpause_simulation.py
|
jpliquid/testActions2
|
6f314fa1430f654e5943e47ac278101b9c24f938
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2020 The Plankton Authors.
# All rights reserved.
#
# This source code is derived from UUV Simulator
# (https://github.com/uuvsimulator/uuv_simulator)
# Copyright (c) 2016-2019 The UUV Simulator Authors
# licensed under the Apache license, Version 2.0
# cf. 3rd-party-licenses.txt file in the root directory of this source tree.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import rclpy
from std_srvs.srv import Empty
import time
import sys
from plankton_utils.time import is_sim_time
def main():
rclpy.init()
sim_time_param = is_sim_time()
node = rclpy.create_node('unpause_simulation',
allow_undeclared_parameters=True,
automatically_declare_parameters_from_overrides=True,
parameter_overrides=[sim_time_param])
#Default sim_time to True
# sim_time = rclpy.parameter.Parameter('use_sim_time', rclpy.Parameter.Type.BOOL, True)
# node.set_parameters([sim_time])
# if not rclpy.ok():
# rospy.ROSException('ROS master is not running!')
timeout = 0.0
if node.has_parameter('timeout'):
timeout = node.get_parameter('timeout').get_parameter_value().double_value
if timeout <= 0:
raise RuntimeError('Unpause time must be a positive floating point value')
print('Unpause simulation - Time = {} s'.format(timeout))
# start_time = time.time()
# while time.time() - start_time < timeout:
# time.sleep(0.1)
if(timeout > 0):
time.sleep(timeout)
#start_time = time.time()
try:
# Handle for retrieving model properties
unpause = node.create_client(Empty, '/gazebo/unpause_physics')
unpause.wait_for_service(timeout_sec=100)
if(not ready):
raise rclpy.exceptions.InvalidServiceNameException('service is unavailable')
except rclpy.exceptions.InvalidServiceNameException:
print('/gazebo/unpause_physics service is unavailable')
sys.exit()
node.get_logger().info(
'The Gazebo "unpause_physics" service was available {} s after the timeout'.format(timeout))#time.time() - start_time))
req = Empty.Request()
future = unpause.call_async(req)
rclpy.spin_until_future_complete(self, future)
if future.result() is not None:
prop = future.result()
if prop.succes:
print('Simulation unpaused...')
else
node.get_logger().error('Failed to unpaused the simulation')
#==============================================================================
if __name__ == '__main__':
main()
| 36.215909
| 127
| 0.670223
|
6e80aaa71221d455dfde800170aadd00d5f1f888
| 5,208
|
py
|
Python
|
docs/conf.py
|
tannewt/Adafruit_CircuitPython_IL91874
|
fd926ee5b8530a5a9b1666143b27103f2338e9c7
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
tannewt/Adafruit_CircuitPython_IL91874
|
fd926ee5b8530a5a9b1666143b27103f2338e9c7
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
tannewt/Adafruit_CircuitPython_IL91874
|
fd926ee5b8530a5a9b1666143b27103f2338e9c7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
]
# TODO: Please Read!
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
autodoc_mock_imports = ["displayio"]
intersphinx_mapping = {'python': ('https://docs.python.org/3.4', None),'CircuitPython': ('https://circuitpython.readthedocs.io/en/latest/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Adafruit IL91874 Library'
copyright = u'2019 Scott Shawcroft'
author = u'Scott Shawcroft'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '.env', 'CODE_OF_CONDUCT.md']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
napoleon_numpy_docstring = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = '_static/favicon.ico'
# Output file base name for HTML help builder.
htmlhelp_basename = 'AdafruitIl91874Librarydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AdafruitIL91874Library.tex', u'AdafruitIL91874 Library Documentation',
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'AdafruitIL91874library', u'Adafruit IL91874 Library Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AdafruitIL91874Library', u'Adafruit IL91874 Library Documentation',
author, 'AdafruitIL91874Library', 'One line description of project.',
'Miscellaneous'),
]
| 32.347826
| 146
| 0.685676
|
d7e32523bc14e9d6fd4603dca78742f00d710693
| 14,098
|
py
|
Python
|
anuga/culvert_flows/tests/test_culvert_routines_box_10pct.py
|
samcom12/anuga_core
|
f4378114dbf02d666fe6423de45798add5c42806
|
[
"Python-2.0",
"OLDAP-2.7"
] | 136
|
2015-05-07T05:47:43.000Z
|
2022-02-16T03:07:40.000Z
|
anuga/culvert_flows/tests/test_culvert_routines_box_10pct.py
|
samcom12/anuga_core
|
f4378114dbf02d666fe6423de45798add5c42806
|
[
"Python-2.0",
"OLDAP-2.7"
] | 184
|
2015-05-03T09:27:54.000Z
|
2021-12-20T04:22:48.000Z
|
anuga/culvert_flows/tests/test_culvert_routines_box_10pct.py
|
samcom12/anuga_core
|
f4378114dbf02d666fe6423de45798add5c42806
|
[
"Python-2.0",
"OLDAP-2.7"
] | 70
|
2015-03-18T07:35:22.000Z
|
2021-11-01T07:07:29.000Z
|
#!/usr/bin/env python
from __future__ import division
from past.utils import old_div
import unittest
import os.path
import sys
from anuga.utilities.system_tools import get_pathname_from_package
from anuga.culvert_flows.culvert_routines import boyd_generalised_culvert_model
import numpy as num
class Test_culvert_routines_box_10pct(unittest.TestCase):
"""
This unit test sets up 6 tests for various culvert conditions for a Box Culvert on a 10% Slope
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_boyd_1(self):
"""test_boyd_1
This tests the Boyd routine with data obtained from ??? by Petar Milevski
"""
# FIXME(Ole): This test fails (20 Feb 2009)
g=9.81
inlet_depth=0.150
outlet_depth=0.15
inlet_velocity=1.00
outlet_velocity=0.5
culvert_length=10.0
culvert_width=3.6
culvert_height=1.20
culvert_type='box'
manning=0.013
sum_loss=1.5
inlet_specific_energy=inlet_depth + old_div(0.5*inlet_velocity**2,g)
culvert_slope=10.0 # % Downward
z_in = 10.0
z_out = old_div(-culvert_length*culvert_slope,100)
E_in = z_in+inlet_depth + old_div(0.5*inlet_velocity**2,g)
E_out = z_out+outlet_depth + old_div(0.5*outlet_velocity**2,g)
delta_total_energy = E_in-E_out
inlet_specific_energy=inlet_depth + old_div(0.5*inlet_velocity**2,g)
Q, v, d = boyd_generalised_culvert_model(inlet_depth,
outlet_depth,
inlet_velocity,
outlet_velocity,
inlet_specific_energy,
delta_total_energy,
g,
culvert_length,
culvert_width,
culvert_height,
culvert_type,
manning,
sum_loss)
#print ('%s,%.2f,%.2f,%.2f' %('ANUGAcalcsTEST01 Q-v-d',Q,v,d))
#print('%s,%.2f,%.2f,%.2f' %('Spreadsheet_Boydcalcs', 0.5526, 1.146, 0.1339))
assert num.allclose(Q, 0.5526, rtol=1.0e-1) #inflow
assert num.allclose(v, 1.146, rtol=1.0e-1) #outflow velocity
assert num.allclose(d, 0.1339, rtol=1.0e-1) #depth at outlet used to calc v
def test_boyd_2(self):
"""test_boyd_2
This tests the Boyd routine with data obtained from ??? by Petar Milevski
"""
# FIXME(Ole): This test fails (20 Feb 2009)
g=9.81
culvert_slope=10 # Downward
inlet_depth=0.500
outlet_depth=0.700
inlet_velocity=1.0
outlet_velocity=0.50
culvert_length=10.0
culvert_width=3.60
culvert_height=1.20
culvert_type='box'
manning=0.013
sum_loss=1.5
inlet_specific_energy=inlet_depth + old_div(0.5*inlet_velocity**2,g)
z_in = 0.0
z_out = old_div(-culvert_length*culvert_slope,100)
E_in = z_in+inlet_depth + old_div(0.5*inlet_velocity**2,g)
E_out = z_out+outlet_depth + old_div(0.5*outlet_velocity**2,g)
delta_total_energy = E_in-E_out
Q, v, d = boyd_generalised_culvert_model(inlet_depth,
outlet_depth,
inlet_velocity,
outlet_velocity,
inlet_specific_energy,
delta_total_energy,
g,
culvert_length,
culvert_width,
culvert_height,
culvert_type,
manning,
sum_loss)
#print ('%s,%.2f,%.2f,%.2f' %('ANUGAcalcsTEST02 Q-v-d',Q,v,d))
#print ('%s,%.2f,%.2f,%.2f' %('Spreadsheet_Boydcalcs', 2.508, 1.897, 0.367))
assert num.allclose(Q, 2.508, rtol=1.0e-1) #inflow
assert num.allclose(v, 1.897, rtol=1.0e-1) #outflow velocity
assert num.allclose(d, 0.367, rtol=1.0e-1) #depth at outlet used to calc v
def test_boyd_3(self):
"""test_boyd_3
This tests the Boyd routine with data obtained from ??? by Petar Milevski
"""
# FIXME(Ole): This test fails (20 Feb 2009)
g=9.81
culvert_slope=10 # Downward
inlet_depth=1.800
outlet_depth=0.80
inlet_velocity=1.0
outlet_velocity=0.5
culvert_length=10.0
culvert_width=3.60
culvert_height=1.20
culvert_type='box'
manning=0.013
sum_loss=1.5
inlet_specific_energy=inlet_depth + old_div(0.5*inlet_velocity**2,g)
z_in = 0.0
z_out = old_div(-culvert_length*culvert_slope,100)
E_in = z_in+inlet_depth + old_div(0.5*inlet_velocity**2,g)
E_out = z_out+outlet_depth + old_div(0.5*outlet_velocity**2,g)
delta_total_energy = E_in-E_out
Q, v, d = boyd_generalised_culvert_model(inlet_depth,
outlet_depth,
inlet_velocity,
outlet_velocity,
inlet_specific_energy,
delta_total_energy,
g,
culvert_length,
culvert_width,
culvert_height,
culvert_type,
manning,
sum_loss)
#print ('%s,%.2f'%('SPEC_E = ',inlet_specific_energy))
#print ('%s,%.2f'%('Delta E = ',delta_total_energy))
#print ('%s,%.2f,%.2f,%.2f' %('ANUGAcalcsTEST03 Q-v-d',Q,v,d))
#print ('%s,%.2f,%.2f,%.2f' %('Spreadsheet_Boydcalcs', 13.554, 3.329, 1.131))
assert num.allclose(Q, 13.554, rtol=1.0e-2) #inflow
assert num.allclose(v, 3.329, rtol=1.0e-2) #outflow velocity
assert num.allclose(d, 1.131, rtol=1.0e-2) #depth at outlet used to calc v
#NOTE FROM HERE DOWN THE UNITS TEST HAVE NOT BEEN AMENDED TO ALLOW VELOCITY COMPONENT TO BE USED. ONLY ABOVE 3 TESTS WORK. PM WILL FIX THE ONES BELOW WHEN THE ABOVE 3 ARE WORKING
def test_boyd_4(self):
"""test_boyd_4
This tests the Boyd routine with data obtained from ??? by Petar Milevski
"""
# FIXME(Ole): This test fails (20 Feb 2009)
g=9.81
culvert_slope=10 # Downward
inlet_depth=1.00
outlet_depth=0.8
inlet_velocity=1.0
outlet_velocity=0.5
culvert_length=10.0
culvert_width=3.60
culvert_height=1.20
culvert_type='box'
manning=0.013
sum_loss=1.5
inlet_specific_energy=inlet_depth + old_div(0.5*inlet_velocity**2,g)
z_in = 10.0
z_out = 10.0-old_div(culvert_length*culvert_slope,100)
E_in = z_in+inlet_depth + old_div(0.5*inlet_velocity**2,g)
E_out = z_out+outlet_depth + old_div(0.5*outlet_velocity**2,g)
delta_total_energy = E_in-E_out
Q, v, d = boyd_generalised_culvert_model(inlet_depth,
outlet_depth,
inlet_velocity,
outlet_velocity,
inlet_specific_energy,
delta_total_energy,
g,
culvert_length,
culvert_width,
culvert_height,
culvert_type,
manning,
sum_loss)
#print ('%s,%.2f'%('SPEC_E = ',inlet_specific_energy))
#print ('%s,%.2f'%('Delta E = ',delta_total_energy))
#print ('%s,%.2f,%.2f,%.2f' %('ANUGAcalcsTEST04 Q-v-d',Q,v,d))
#print ('%s,%.2f,%.2f,%.2f' %('Spreadsheet_Boydcalcs', 6.609, 2.621, 0.70))
assert num.allclose(Q, 6.609, rtol=1.0e-2) #inflow
assert num.allclose(v, 2.621, rtol=1.0e-2) #outflow velocity
assert num.allclose(d, 0.70, rtol=1.0e-2) #depth at outlet used to calc v
def test_boyd_5(self):
"""test_boyd_5
This tests the Boyd routine with data obtained from ??? by Petar Milevski
"""
# FIXME(Ole): This test fails (20 Feb 2009)
g=9.81
culvert_slope=10 # Downward
inlet_depth=1.50
inlet_velocity= 1.0
outlet_depth=2.5
outlet_velocity=0.5
culvert_length=10.0
culvert_width=3.60
culvert_height=1.20
culvert_type='box'
manning=0.013
sum_loss=1.5
inlet_specific_energy=inlet_depth + old_div(0.5*inlet_velocity**2,g)
z_in = 10.0
z_out = 10.0-old_div(culvert_length*culvert_slope,100)
E_in = z_in+inlet_depth + old_div(0.5*inlet_velocity**2,g)
E_out = z_out+outlet_depth + old_div(0.5*outlet_velocity**2,g)
delta_total_energy = E_in-E_out
Q, v, d = boyd_generalised_culvert_model(inlet_depth,
outlet_depth,
inlet_velocity,
outlet_velocity,
inlet_specific_energy,
delta_total_energy,
g,
culvert_length,
culvert_width,
culvert_height,
culvert_type,
manning,
sum_loss)
#print ('%s,%.3f'%('SPEC_E = ',inlet_specific_energy))
#print ('%s,%.3f'%('Delta E = ',delta_total_energy))
#print ('%s,%.3f,%.3f,%.3f' %('ANUGAcalcsTEST05 Q-v-d',Q,v,d))
#print ('%s,%.3f,%.3f,%.3f' %('Spreadsheet_Boydcalcs',2.961, 0.685, 1.20))
assert num.allclose(Q, 2.961, rtol=1.0e-2) #inflow
assert num.allclose(v, 0.685, rtol=1.0e-2) #outflow velocity
assert num.allclose(d, 1.20, rtol=1.0e-2) #depth at outlet used to calc v
def test_boyd_6(self):
"""test_boyd_6
This tests the Boyd routine with data obtained from ??? by Petar Milevski
"""
# FIXME(Ole): This test fails (20 Feb 2009)
g=9.81
culvert_slope=10 # Downward
inlet_depth=1.50
inlet_velocity= 4.0
outlet_depth=0.80
outlet_velocity=4.0
culvert_length=10.0
culvert_width=3.60
culvert_height=1.20
culvert_type='box'
manning=0.013
sum_loss=1.5
inlet_specific_energy=inlet_depth + old_div(0.5*inlet_velocity**2,g)
z_in = 10.0
z_out = 10.0-old_div(culvert_length*culvert_slope,100)
E_in = z_in+inlet_depth + old_div(0.5*inlet_velocity**2,g)
E_out = z_out+outlet_depth + old_div(0.5*outlet_velocity**2,g)
delta_total_energy = E_in-E_out
Q, v, d = boyd_generalised_culvert_model(inlet_depth,
outlet_depth,
inlet_velocity,
outlet_velocity,
inlet_specific_energy,
delta_total_energy,
g,
culvert_length,
culvert_width,
culvert_height,
culvert_type,
manning,
sum_loss)
#print ('%s,%.3f'%('SPEC_E = ',inlet_specific_energy))
#print ('%s,%.3f'%('Delta E = ',delta_total_energy))
#print ('%s,%.3f,%.3f,%.3f' %('ANUGAcalcsTEST06 Q-v-d',Q,v,d))
#print ('%s,%.3f,%.3f,%.3f' %('Spreadsheet_Boydcalcs',15.537, 3.597, 1.20))
assert num.allclose(Q, 15.537, rtol=1.0e-2) #inflow
assert num.allclose(v, 3.597, rtol=1.0e-2) #outflow velocity
assert num.allclose(d, 1.20, rtol=1.0e-2) #depth at outlet used to calc v
# =========================================================================
# =========================================================================
if __name__ == "__main__":
suite = unittest.makeSuite(Test_culvert_routines_box_10pct, 'test')
runner = unittest.TextTestRunner()
runner.run(suite)
| 40.863768
| 178
| 0.465101
|
d989e3e4bbaa3fde55e2e102f448f05f50afb953
| 1,184
|
py
|
Python
|
src/lpa/xrd/code.py
|
DunstanBecht/lpa-xrd
|
62c3820ce348da3b375f30fd4e9e5552760da42e
|
[
"CC0-1.0"
] | null | null | null |
src/lpa/xrd/code.py
|
DunstanBecht/lpa-xrd
|
62c3820ce348da3b375f30fd4e9e5552760da42e
|
[
"CC0-1.0"
] | null | null | null |
src/lpa/xrd/code.py
|
DunstanBecht/lpa-xrd
|
62c3820ce348da3b375f30fd4e9e5552760da42e
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
"""
Module to manage the simulation program code.
"""
from . import *
from . import __version__
import pkg_resources
replace = (
("!VERSION", __version__),
)
@beartype
def copy(
cop: str,
pst: str,
) -> None:
"""
Clone recursively a file or a directory.
Input:
cop (str): file or directory to copy
pst (str): file of directory name to past
"""
if os.path.isdir(cop):
os.mkdir(pst)
for nod in os.listdir(cop):
copy(os.path.join(cop, nod), os.path.join(pst, nod))
else:
with open(cop, "r") as f:
ctt = f.read()
for tgt, txt in replace:
ctt = ctt.replace(tgt, txt)
with open(pst, "w") as f:
f.write(ctt)
@beartype
def clone(
clndir: str = clone_dir,
) -> None:
"""
Clone the simulation program files.
Input:
clndir (str): export path of the cloned program
"""
if os.path.isdir(clndir):
raise ValueError(f"existing clone directory: {clndir}")
pth = pkg_resources.resource_filename('lpa.xrd', 'xrd/') # path to code
copy(pth, clndir) # copy program files
| 21.527273
| 75
| 0.581926
|
2308d9e31108bc5e1754d596641ca188f8db430e
| 2,470
|
py
|
Python
|
mpf/tests/test_ModesConfigValidation.py
|
haggispinball/mpf_fathom_fast
|
1035c3fb90bb279de84cc3ed4aa1e1df38d0d563
|
[
"MIT"
] | 163
|
2015-01-25T02:19:50.000Z
|
2022-03-26T12:00:28.000Z
|
mpf/tests/test_ModesConfigValidation.py
|
haggispinball/mpf_fathom_fast
|
1035c3fb90bb279de84cc3ed4aa1e1df38d0d563
|
[
"MIT"
] | 1,086
|
2015-03-23T19:53:17.000Z
|
2022-03-24T20:46:11.000Z
|
mpf/tests/test_ModesConfigValidation.py
|
haggispinball/mpf_fathom_fast
|
1035c3fb90bb279de84cc3ed4aa1e1df38d0d563
|
[
"MIT"
] | 148
|
2015-01-28T02:31:39.000Z
|
2022-03-22T13:54:01.000Z
|
from mpf.tests.MpfTestCase import MpfTestCase
from mpf._version import log_url
class TestModesConfigValidation(MpfTestCase):
def get_config_file(self):
return self.config
def get_machine_path(self):
return 'tests/machine_files/mode_tests/'
def setUp(self):
self.save_and_prepare_sys_path()
def tearDown(self):
self.restore_sys_path()
def test_loading_invalid_modes(self):
self.config = 'test_loading_invalid_modes.yaml'
with self.assertRaises(AssertionError) as context:
super(TestModesConfigValidation, self).setUp()
self.loop.close()
self.assertEqual("No config found for mode 'invalid'. MPF expects the config at "
"'modes/invalid/config/invalid.yaml' inside your machine folder.",
str(context.exception))
def test_empty_modes_section(self):
self.config = 'test_empty_modes_section.yaml'
super(TestModesConfigValidation, self).setUp()
super().tearDown()
def test_broken_mode_config(self):
self.config = 'test_broken_mode_config.yaml'
with self.assertRaises(AssertionError) as context:
super(TestModesConfigValidation, self).setUp()
self.loop.close(ignore_running_tasks=True)
self.maxDiff = None
self.assertEqual('Config File Error in ConfigValidator: Your config contains a value for the setting '
'"mode:invalid_key", but this is not a valid setting name. Error Code: CFE-ConfigValidator-2 '
'({})'.format(log_url.format("CFE-ConfigValidator-2")),
str(context.exception))
def test_missing_mode_section(self):
self.config = 'test_missing_mode_section.yaml'
super(TestModesConfigValidation, self).setUp()
self.assertTrue("broken_mode2" in self.machine.modes)
super().tearDown()
def test_mode_without_config(self):
self.config = 'test_mode_without_config.yaml'
with self.assertRaises(AssertionError) as context:
super(TestModesConfigValidation, self).setUp()
self.loop.close(ignore_running_tasks=True)
self.assertEqual("No config found for mode 'mode_without_config'. MPF expects the config at "
"'modes/mode_without_config/config/mode_without_config.yaml' inside your machine folder.",
str(context.exception))
| 37.424242
| 119
| 0.664777
|
b54ecd7614ff3d802547e62d28519477afef1e27
| 604
|
py
|
Python
|
wagtail/contrib/styleguide/wagtail_hooks.py
|
wlcrs/wagtail
|
8afbc6c3eccef9eb0f09ed56c54cd36779451882
|
[
"BSD-3-Clause"
] | 3
|
2019-05-14T13:43:08.000Z
|
2021-11-09T11:27:18.000Z
|
wagtail/contrib/styleguide/wagtail_hooks.py
|
denza/wagtail
|
3939397850f2c73d3f960cea5cc9c2cfae2d005d
|
[
"BSD-3-Clause"
] | 163
|
2019-06-14T20:45:06.000Z
|
2022-03-23T01:41:07.000Z
|
wagtail/contrib/styleguide/wagtail_hooks.py
|
denza/wagtail
|
3939397850f2c73d3f960cea5cc9c2cfae2d005d
|
[
"BSD-3-Clause"
] | 1
|
2020-04-10T03:21:10.000Z
|
2020-04-10T03:21:10.000Z
|
from django.conf.urls import url
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from wagtail.admin.menu import MenuItem
from wagtail.core import hooks
from . import views
@hooks.register('register_admin_urls')
def register_admin_urls():
return [
url(r'^styleguide/$', views.index, name='wagtailstyleguide'),
]
@hooks.register('register_settings_menu_item')
def register_styleguide_menu_item():
return MenuItem(
_('Styleguide'),
reverse('wagtailstyleguide'),
classnames='icon icon-image',
order=1000
)
| 23.230769
| 69
| 0.720199
|
549be53da1b8abb6ec25c515fac8a6f669ffc07f
| 5,657
|
py
|
Python
|
Pornhub_img.py
|
Guwudao/PH-image-download
|
3ae880b22f65c62003f5d2fb8df334f2cc1c15b9
|
[
"MIT"
] | 1
|
2020-08-12T06:18:40.000Z
|
2020-08-12T06:18:40.000Z
|
Pornhub_img.py
|
Guwudao/PH-image-download
|
3ae880b22f65c62003f5d2fb8df334f2cc1c15b9
|
[
"MIT"
] | null | null | null |
Pornhub_img.py
|
Guwudao/PH-image-download
|
3ae880b22f65c62003f5d2fb8df334f2cc1c15b9
|
[
"MIT"
] | null | null | null |
import requests
import threading
import os
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
import urllib.parse
headers = {
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36"
}
proxy = {
"https": "127.0.0.1:1087",
"http": "127.0.0.1:1087",
}
domain = "https://cn.pornhub.com"
download_failed_list = []
def key_word_search(key_word):
data = {
"search": key_word
}
query_string = urllib.parse.urlencode(data)
album_search_url = f"https://cn.pornhub.com/albums/female-straight-uncategorized?{query_string}"
print(album_search_url)
html = requests.get(album_search_url, headers=headers, timeout=180, proxies=proxy)
soup = BeautifulSoup(html.text, "html.parser")
# print(soup)
li_list = soup.find_all("li", class_="photoAlbumListContainer")
album_list = [li.a.get("href") for li in li_list]
print(f"========================== 当前相册个数: {len(album_list)} ==========================")
title_list = [title_tag.get_text() for title_tag in soup.find_all("div", class_="title-album")][4:-1]
print(f"========================== 相册列表: {title_list} ==========================")
count = int(input("需要下载的相册个数:"))
if count <= len(album_list):
albums = album_list[0:count]
return pornhub_get_page_list(albums)
else:
print("输入个数有误")
return []
# get page number
# page_list = [page.get_text() for page in soup.find_all("li", class_="page_number")]
# print(page_list)
def pornhub_get_page_list(album_list):
if isinstance(album_list, list):
image_page_url = []
for album in album_list:
main_url = domain + album
image_page_url.extend(get_list(main_url))
print(image_page_url)
return image_page_url
elif isinstance(album_list, int):
main_url = domain + f"/album/{album_list}"
return get_list(main_url)
def get_list(url):
url_list = []
try:
url_list.append(url)
html = requests.get(url, headers=headers, timeout=180, proxies=proxy)
soup = BeautifulSoup(html.text, "html.parser")
div_pagination = soup.find_all("div", class_="pagination3")
page_list = div_pagination[0].find_all("a", class_="greyButton")
url_list.extend([domain + page.get("href") for page in page_list])
print(url_list)
return url_list
except Exception as e:
print("get page list error: ", e)
return []
def get_all_image_url(page_list):
url_list = []
for page_url in page_list:
try:
html = requests.get(page_url, headers=headers, timeout=180, proxies=proxy)
soup = BeautifulSoup(html.text, "html.parser")
div_list = soup.find_all("div", attrs={"class": "js_lazy_bkg photoAlbumListBlock"})
url_list = url_list + [domain + div.a.get("href") for div in div_list]
except Exception as e:
print("get image url error: ", e)
print(f"总张数:{len(url_list)}")
# print(url_list)
return url_list
def image_download(info):
url = info[0]
index = info[1]
html = requests.get(url, headers=headers, proxies=proxy)
soup = BeautifulSoup(html.text, "html.parser")
div_list = soup.find_all("div", class_="centerImage")
section = soup.find_all("section", attrs={"id": "photoInfoSection"})
title = section[0].find("h1").get_text().replace(" ", "")
# print(title)
if not os.path.exists(title):
os.mkdir(title)
file_name = title + "_" + str(index)
if len(div_list) > 0:
img_url = div_list[0].img.get("src")
try:
with open(f"./{title}/{file_name}.jpg", "wb") as f:
resp = requests.get(img_url, timeout=180, proxies=proxy)
f.write(resp.content)
print("%s" % (threading.current_thread().name), f"下载完成: {url}", index)
except Exception as e:
print("download error: ", e, url)
download_failed_list.append(url)
def video_analytics():
domain = "https://cv.phncdn.com/"
url = "https://cn.pornhub.com/view_video.php?viewkey=ph5d24f5b77c6f6"
resp = requests.get(url, headers=headers, proxies=proxy)
print(resp.text)
html_file = open("./mini.html", 'r', encoding='utf-8').read()
soup = BeautifulSoup(html_file, "html.parser")
videos = soup.find_all("source", attrs={"type": "video/mp4"})
# scripts = soup.find_all("script", attrs={"type": "text/javascript"})
print(videos)
for video in videos:
print(video.get("src"))
print("-" * 50)
if __name__ == '__main__':
select = 0
page_list = []
while select != 1 and select != 2:
select = int(input("请输入下载方式,1为搜索下载,2为相册编号下载:"))
if select == 1:
key_word = str(input("请输入关键字:"))
page_list = key_word_search(key_word)
elif select == 2:
num = int(input("请输入相册编号:"))
page_list = pornhub_get_page_list(num)
if len(page_list) > 0:
image_list = get_all_image_url(page_list)
n = 0
with ThreadPoolExecutor(max_workers=30, thread_name_prefix="当前线程_") as pool:
for image_url in image_list:
n += 1
pool.map(image_download, [(image_url, n)])
print("========================== 下载完成 ==========================")
else:
print("abort by page list not complete")
if len(download_failed_list) > 0:
print(f"download failed list: {download_failed_list}")
# video_analytics()
# key_word_search("过期米线")
| 32.325714
| 140
| 0.608096
|
8d420f8257412591ce7ce69791f04bb9f8bea571
| 2,404
|
py
|
Python
|
streamlit/src/pages/user_activities.py
|
likweitan/final_year_project
|
a86059cad92efe4edd85364d21b4ee6a56234b30
|
[
"MIT"
] | null | null | null |
streamlit/src/pages/user_activities.py
|
likweitan/final_year_project
|
a86059cad92efe4edd85364d21b4ee6a56234b30
|
[
"MIT"
] | null | null | null |
streamlit/src/pages/user_activities.py
|
likweitan/final_year_project
|
a86059cad92efe4edd85364d21b4ee6a56234b30
|
[
"MIT"
] | 1
|
2021-07-10T15:54:54.000Z
|
2021-07-10T15:54:54.000Z
|
from altair.vegalite.v4.schema.core import Scale
import streamlit as st
import altair as alt
import plotly.express as px
import pandas as pd
import numpy as np
def load(data):
st.title('🎲 User Statistics')
st.sidebar.header('User')
user = st.sidebar.selectbox('Please select an user', ['Student A','Student B','Student C','Student D','Student E'])
switcher = {
'Student A': "Kpq2q+eKw/O+6/jLs3XJosgmI7weEJxJZdnkKTbbF8I=",
'Student B': "0+VU/Zb0Q96uoByuRhl7r9bJuJO6CKWpsmNMEuijSzc=",
'Student C': "g8DnYvIqpolw10XlwWeIWv6NbDPByUbmgH8EshJqBns=",
'Student D': "kSyUTFlepsYUD723IPL/jEZ520xaKbscrBmNtBUFR1o=",
'Student E': "XMFbFA7C49+LRhUddhelfPpA6F5dbOoxeyL3eYbuTlY="
}
user_id = switcher.get(user,"Invalid")
st.write('**Student ID: **'+user_id)
st.write('The following table shows the subjects that users have attempted')
merge_df = merge_all(data[0], data[1], data[2])
course = merge_df[['ucid','problem_number','total_sec_taken','level']][merge_df['uuid']==user_id]
grouped_course = course.groupby(by=['ucid']).agg({'problem_number':'max','total_sec_taken':'mean','level':'max'}).reset_index()
col1, col2 = st.beta_columns([5,2])
with col1:
st.line_chart(grouped_course,height=450)
line_plot = alt.Chart(grouped_course).mark_bar().encode(
x='ucid',
y='level',
color=alt.condition(
alt.datum.level > 2,
alt.value("green"), # The positive color
alt.value("red") # The negative color
)
).properties(height=400)
st.altair_chart(line_plot, use_container_width=True)
with st.beta_expander("More"):
st.table(grouped_course)
st.write(grouped_course['level'].value_counts().reset_index())
fig = px.pie(grouped_course, values=grouped_course['level'].value_counts(), names=grouped_course['level'].value_counts().index, color_discrete_sequence=["red", "green", "blue", "goldenrod", "magenta"],title='Percentage')
with col2:
st.plotly_chart(fig, use_container_width=True)
@st.cache(show_spinner=False)
def merge_all(info_content_df: pd.DataFrame, info_userdata_df: pd.DataFrame, log_problem_df: pd.DataFrame):
merge_df = log_problem_df.merge(info_userdata_df, how='left', on='uuid')
merge_df = merge_df.merge(info_content_df, how='left', on='ucid')
return merge_df
| 38.774194
| 224
| 0.682196
|
b7c0aae8db360935204dceaf5e93ac8511de6738
| 1,141
|
py
|
Python
|
Hopfield-Network/hopfield.py
|
Andrey-2310/DPSI
|
4444b66d9d77f891464b218e72befc7be1d21584
|
[
"MIT"
] | null | null | null |
Hopfield-Network/hopfield.py
|
Andrey-2310/DPSI
|
4444b66d9d77f891464b218e72befc7be1d21584
|
[
"MIT"
] | null | null | null |
Hopfield-Network/hopfield.py
|
Andrey-2310/DPSI
|
4444b66d9d77f891464b218e72befc7be1d21584
|
[
"MIT"
] | null | null | null |
import numpy as np
weights = np.zeros((100, 100)).astype(int)
def write_weights_to_file(weights):
with open('weights', 'w') as f:
for item in weights:
f.write("%s\n" % item)
def teach_network(img_array):
calculate_weights(img_array)
delete_self_feedback()
write_weights_to_file(weights)
def calculate_weights(img_array):
for img in img_array:
for index, x in np.ndenumerate(weights):
weights[index[0]][index[1]] += img[index[0]] * img[index[1]]
def delete_self_feedback():
for diag_index in range(0, 100):
weights[diag_index][diag_index] = 0
def check_match(image, img_list):
return any(np.array_equal(image, img) for img in img_list)
def reproduce(noise_image, img_list, iter):
return noise_image if not iter or check_match(noise_image, img_list) \
else reproduce(reproduction(noise_image), img_list, iter - 1)
def reproduction(img):
new_img = []
for i in range(0, 100):
coef = 0
for j in range(0, 100):
coef += weights[i][j] * img[j]
new_img.append(1 if coef >= 0 else -1)
return new_img
| 24.804348
| 74
| 0.64943
|
1bd2a63a32c9389334e1d651c7c3f03b18b80519
| 5,742
|
py
|
Python
|
src/sage/categories/finite_dimensional_nilpotent_lie_algebras_with_basis.py
|
sensen1/sage
|
d6c5cd9be78cc448ee4c54bac93385b1244a234c
|
[
"BSL-1.0"
] | 1
|
2021-03-15T21:45:56.000Z
|
2021-03-15T21:45:56.000Z
|
src/sage/categories/finite_dimensional_nilpotent_lie_algebras_with_basis.py
|
sensen1/sage
|
d6c5cd9be78cc448ee4c54bac93385b1244a234c
|
[
"BSL-1.0"
] | null | null | null |
src/sage/categories/finite_dimensional_nilpotent_lie_algebras_with_basis.py
|
sensen1/sage
|
d6c5cd9be78cc448ee4c54bac93385b1244a234c
|
[
"BSL-1.0"
] | null | null | null |
r"""
Finite Dimensional Nilpotent Lie Algebras With Basis
AUTHORS:
- Eero Hakavuori (2018-08-16): initial version
"""
# ****************************************************************************
# Copyright (C) 2018 Eero Hakavuori <eero.hakavuori@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.categories.category_with_axiom import CategoryWithAxiom_over_base_ring
from sage.categories.lie_algebras import LieAlgebras
class FiniteDimensionalNilpotentLieAlgebrasWithBasis(CategoryWithAxiom_over_base_ring):
r"""
Category of finite dimensional nilpotent Lie algebras with basis.
TESTS::
sage: C1 = LieAlgebras(QQ).FiniteDimensional().WithBasis().Nilpotent()
sage: C2 = LieAlgebras(QQ).FiniteDimensional().Nilpotent().WithBasis()
sage: C3 = LieAlgebras(QQ).Nilpotent().FiniteDimensional().WithBasis()
sage: C4 = LieAlgebras(QQ).Nilpotent().WithBasis().FiniteDimensional()
sage: C5 = LieAlgebras(QQ).WithBasis().Nilpotent().FiniteDimensional()
sage: C6 = LieAlgebras(QQ).WithBasis().FiniteDimensional().Nilpotent()
sage: C1 is C2
True
sage: C2 is C3
True
sage: C3 is C4
True
sage: C4 is C5
True
sage: C5 is C6
True
sage: TestSuite(C1).run()
"""
_base_category_class_and_axiom = (LieAlgebras.FiniteDimensional.WithBasis, "Nilpotent")
class ParentMethods:
def _test_nilpotency(self, **options):
r"""
Tests that ``self`` is nilpotent and has the correct step.
INPUT:
- ``options`` -- any keyword arguments accepted by
:meth:`_tester`
EXAMPLES::
sage: L = LieAlgebra(QQ, {('X','Y'): {'Z': 1}}, nilpotent=True)
sage: L._test_nilpotency()
sage: L = LieAlgebra(QQ, {('X','Y'): {'Z': 1}},
....: nilpotent=True, step = 3)
sage: L._test_nilpotency()
Traceback (most recent call last):
...
AssertionError: claimed nilpotency step 3 does not match the actual nilpotency step 2
sage: L = LieAlgebra(QQ, {('X','Y'): {'X': 1}}, nilpotent=True)
sage: L._test_nilpotency()
Traceback (most recent call last):
...
AssertionError: final term of lower central series is non-zero
See the documentation for :class:`TestSuite` for more information.
"""
tester = self._tester(**options)
lcs = self.lower_central_series(submodule=True)
tester.assertEqual(lcs[-1].dimension(), 0,
msg="final term of lower central series is non-zero")
step = self.step()
tester.assertEqual(len(lcs) - 1, step,
msg="claimed nilpotency step %d does not match the "
"actual nilpotency step %d" % (step, len(lcs) - 1))
def lie_group(self, name='G', **kwds):
r"""
Return the Lie group associated to ``self``.
INPUT:
- ``name`` -- string (default: ``'G'``);
the name (symbol) given to the Lie group
EXAMPLES:
We define the Heisenberg group::
sage: L = lie_algebras.Heisenberg(QQ, 1)
sage: G = L.lie_group('G'); G
Lie group G of Heisenberg algebra of rank 1 over Rational Field
We test multiplying elements of the group::
sage: p,q,z = L.basis()
sage: g = G.exp(p); g
exp(p1)
sage: h = G.exp(q); h
exp(q1)
sage: g*h
exp(p1 + q1 + 1/2*z)
We extend an element of the Lie algebra to a left-invariant
vector field::
sage: X = G.left_invariant_extension(2*p + 3*q, name='X'); X
Vector field X on the Lie group G of Heisenberg algebra of rank 1 over Rational Field
sage: X.at(G.one()).display()
X = 2 d/dx_0 + 3 d/dx_1
sage: X.display()
X = 2 d/dx_0 + 3 d/dx_1 + (3/2*x_0 - x_1) d/dx_2
.. SEEALSO::
:class:`~sage.groups.lie_gps.nilpotent_lie_group.NilpotentLieGroup`
"""
from sage.groups.lie_gps.nilpotent_lie_group import NilpotentLieGroup
return NilpotentLieGroup(self, name, **kwds)
def step(self):
r"""
Return the nilpotency step of ``self``.
EXAMPLES::
sage: L = LieAlgebra(QQ, {('X','Y'): {'Z': 1}}, nilpotent=True)
sage: L.step()
2
sage: sc = {('X','Y'): {'Z': 1}, ('X','Z'): {'W': 1}}
sage: LieAlgebra(QQ, sc, nilpotent=True).step()
3
"""
if not hasattr(self, '_step'):
self._step = len(self.lower_central_series(submodule=True)) - 1
return self._step
def is_nilpotent(self):
r"""
Return ``True`` since ``self`` is nilpotent.
EXAMPLES::
sage: L = LieAlgebra(QQ, {('x','y'): {'z': 1}}, nilpotent=True)
sage: L.is_nilpotent()
True
"""
return True
| 35.226994
| 101
| 0.522814
|
f2306889ef2d76dd34d5cc51ae4bcb419b831118
| 1,132
|
py
|
Python
|
cmd/play.py
|
codymlewis/QuantumChess
|
13d582204e295cb7255e897336a8266592604923
|
[
"MIT"
] | null | null | null |
cmd/play.py
|
codymlewis/QuantumChess
|
13d582204e295cb7255e897336a8266592604923
|
[
"MIT"
] | null | null | null |
cmd/play.py
|
codymlewis/QuantumChess
|
13d582204e295cb7255e897336a8266592604923
|
[
"MIT"
] | null | null | null |
# Author: Cody Lewis
# Date: 12-APR-2018
# Description: The main game flow of the quantum chess game
import re
import board
if __name__ == "__main__":
b = board.Board()
i = 0
col = 'B'
pattern = '[Yy]e?s?'
sp = False
while(True):
winVal = b.win()
if(winVal == 'W'):
print("White wins!")
break
elif(winVal == 'B'):
print("Black wins!")
break
print(b.toString())
i += 1
if(col == 'W'):
col = 'B'
print("Blacks turn")
else:
col = 'W'
print("Whites turn")
while(True):
superPos = str(input("Do you want to super-position (y/n)? "))
if(re.match(pattern, superPos)):
print('Super-position mode on')
sp = True
start = str(input("Choose your starting piece: "))
end = str(input("Choose your end place: "))
if(b.play(start, end, col, sp)):
sp = False
break
else:
print("Your move was invalid, try again")
| 28.3
| 74
| 0.463781
|
3951f7178425b420daced0b8cd2047f2548324cd
| 797
|
py
|
Python
|
api/serializers.py
|
kzambrow/cs347
|
bcb711545a9f3dfcb298b8a20cf5106d13701cc1
|
[
"MIT"
] | null | null | null |
api/serializers.py
|
kzambrow/cs347
|
bcb711545a9f3dfcb298b8a20cf5106d13701cc1
|
[
"MIT"
] | null | null | null |
api/serializers.py
|
kzambrow/cs347
|
bcb711545a9f3dfcb298b8a20cf5106d13701cc1
|
[
"MIT"
] | null | null | null |
from .models import DataFile, SmartHomeDevice
from dashboard.models import CustomUser, UserProfile
from rest_framework import serializers
class UserProfileSerializer(serializers.ModelSerializer):
class Meta:
model = UserProfile
exclude = ('user',)
class UserSerializer(serializers.HyperlinkedModelSerializer):
profile = UserProfileSerializer()
class Meta:
model = CustomUser
fields = ('username', 'email', 'profile')
class SmartHomeDeviceSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = SmartHomeDevice
fields = '__all__'
class DataFileSerializer(serializers.HyperlinkedModelSerializer):
devices_captured = SmartHomeDeviceSerializer()
class Meta:
model = DataFile
fields = '__all__'
| 31.88
| 72
| 0.736512
|
627eb85e584c592bbfccacae880ce07f9b0bd5ea
| 7,447
|
py
|
Python
|
deebot_t8/auth_client.py
|
nickw444/deebot-t8
|
dac3d9fbb0e9f2aec60ec2e489a823458e8f9c15
|
[
"MIT"
] | 1
|
2022-02-26T18:56:16.000Z
|
2022-02-26T18:56:16.000Z
|
deebot_t8/auth_client.py
|
nickw444/deebot-t8
|
dac3d9fbb0e9f2aec60ec2e489a823458e8f9c15
|
[
"MIT"
] | 8
|
2021-07-05T11:55:12.000Z
|
2021-07-05T11:56:45.000Z
|
deebot_t8/auth_client.py
|
nickw444/deebot-t8
|
dac3d9fbb0e9f2aec60ec2e489a823458e8f9c15
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import logging
import threading
import time
from typing import Dict, Any, Callable
import requests
from .credentials import Credentials
from .exceptions import InvalidCredentialsException, ApiErrorException
from .md5 import md5_hex
from .portal_client import PortalClient
from .urls import USER_DO_PATH, REALM
LOGGER = logging.getLogger(__name__)
CLIENT_KEY = "1520391301804"
CLIENT_SECRET = "6c319b2a5cd3e66e39159c2e28f2fce9"
AUTH_CLIENT_KEY = "1520391491841"
AUTH_CLIENT_SECRET = "77ef58ce3afbe337da74aa8c5ab963a9"
class DeebotAuthClient:
def __init__(self, portal_client: PortalClient, device_id: str, country: str):
self._portal_client = portal_client
self._device_id = device_id
self._country = country
self._meta = {
"country": country,
"lang": "EN",
"deviceId": device_id,
"appCode": "global_e",
"appVersion": "1.6.3",
"channel": "google_play",
"deviceType": "1",
}
def _sign_params(
self,
params: Dict[Any, Any],
client_key: str,
client_secret: str,
):
payload = (
client_key
+ "".join([k + "=" + str(params[k]) for k in sorted(params.keys())])
+ client_secret
)
return md5_hex(payload)
def _get_login_url(self):
tld = "cn" if self._country == "cn" else "com"
login_path = "user/loginCheckMobile" if self._country == "cn" else "user/login"
return (
"https://gl-{country}-api.ecovacs.{tld}/v1/private/{country}/"
"{lang}/{deviceId}/{appCode}/{appVersion}/{channel}/"
"{deviceType}/{login_path}"
).format(login_path=login_path, tld=tld, **self._meta)
def _get_authcode_url(self):
tld = "cn" if self._country == "cn" else "com"
return (
f"https://gl-{self._country}-openapi.ecovacs.{tld}/"
f"v1/global/auth/getAuthCode"
)
def do_account_password_exchange(
self, account_id: str, password_hash: str
) -> Credentials:
params = {
"requestId": md5_hex(str(time.time())),
"account": account_id,
"password": password_hash,
"authTimespan": int(time.time() * 1000),
"authTimeZone": "GMT-8",
}
# Sign params
params_sig = self._sign_params(
{**self._meta, **params}, CLIENT_KEY, CLIENT_SECRET
)
params["authSign"] = params_sig
params["authAppkey"] = CLIENT_KEY
url = self._get_login_url()
# Do request
resp = requests.get(url, params)
resp.raise_for_status()
resp_json = resp.json()
if resp_json["code"] == "0000":
return Credentials(
access_token=resp_json["data"]["accessToken"],
user_id=resp_json["data"]["uid"],
expires_at=None,
)
elif resp_json["code"] in ("1005", "1010"):
raise InvalidCredentialsException("Invalid email or password")
else:
raise Exception(
"Unknown error: {} ({})".format(
resp_json["msg"],
resp_json["code"],
)
)
def do_get_authcode(self, uid: str, access_token: str):
params: Dict[str, str | int] = {
"uid": uid,
"accessToken": access_token,
"bizType": "ECOVACS_IOT",
"deviceId": self._device_id,
"authTimespan": int(time.time() * 1000),
}
# Sign params
params_sig = self._sign_params(
{
"openId": "global",
**params,
},
AUTH_CLIENT_KEY,
AUTH_CLIENT_SECRET,
)
params["authSign"] = params_sig
params["authAppkey"] = AUTH_CLIENT_KEY
# Do request
resp = requests.get(self._get_authcode_url(), params=params)
resp.raise_for_status()
resp_json = resp.json()
if resp_json["code"] == "0000":
return resp_json["data"]["authCode"]
elif resp_json["code"] == "1005":
raise InvalidCredentialsException("Invalid email or password")
else:
raise ApiErrorException(
"Unknown error: {} ({})".format(
resp_json["msg"],
resp_json["code"],
)
)
def do_login_by_iot_token(self, user_id: str, auth_code: str):
org = "ECOCN" if self._country == "cn" else "ECOWW"
country = "Chinese" if self._country == "cn" else self._country.upper()
resp = self._portal_client.do_post(
USER_DO_PATH,
{
"todo": "loginByItToken",
"edition": "ECOGLOBLE",
"userId": user_id,
"token": auth_code,
"realm": REALM,
"resource": self._device_id[0:8],
"org": org,
"last": "",
"country": country,
},
)
if resp["result"] == "ok":
return Credentials(
access_token=resp["token"],
user_id=resp["userId"],
# Tokens appear to have ~7 day expiry.
# Set expiry to 2 days to eagerly refresh
# TODO(NW): Decode the JWT header returned and pass along the
# expiry in this field
expires_at=int(time.time()) + 60 * 60 * 24 * 2,
)
raise ApiErrorException("Unknown error: {}".format(resp))
def login(self, account_id: str, password_hash: str):
exch_resp = self.do_account_password_exchange(account_id, password_hash)
auth_code = self.do_get_authcode(exch_resp.user_id, exch_resp.access_token)
return self.do_login_by_iot_token(exch_resp.user_id, auth_code)
class Authenticator:
def __init__(
self,
auth_client: DeebotAuthClient,
country: str,
device_id: str,
account_id: str,
password_hash: str,
cached_credentials: Credentials = None,
on_credentials_changed: Callable[[Credentials], None] = None,
):
self._auth_client = auth_client
self._country = country
self._device_id = device_id
self._account_id = account_id
self._password_hash = password_hash
self._lock = threading.Lock()
self._credentials = cached_credentials
self._on_credentials_changed = on_credentials_changed
def authenticate(self, force=False):
with self._lock:
should_login = False
if self._credentials is None or force:
LOGGER.debug("No cached credentials, performing login")
should_login = True
elif self._credentials.expires_at < time.time():
LOGGER.debug("Credentials have expired, performing login")
should_login = True
if should_login:
self._credentials = self._auth_client.login(
self._account_id, self._password_hash
)
if self._on_credentials_changed is not None:
self._on_credentials_changed(self._credentials)
return self._credentials
def invalidate(self):
pass
| 32.662281
| 87
| 0.559017
|
2667fc0f7b80baab6b1abc7b6e4e3a9a1bd7c6c0
| 1,481
|
py
|
Python
|
unishare/__init__.py
|
davidn1998/uni-share
|
1c174485180c62ea818b3413e9214a1bc0889c6f
|
[
"MIT"
] | null | null | null |
unishare/__init__.py
|
davidn1998/uni-share
|
1c174485180c62ea818b3413e9214a1bc0889c6f
|
[
"MIT"
] | 1
|
2020-10-27T22:30:46.000Z
|
2020-10-27T22:30:46.000Z
|
unishare/__init__.py
|
davidn1998/uni-share
|
1c174485180c62ea818b3413e9214a1bc0889c6f
|
[
"MIT"
] | null | null | null |
from flask import Flask
from unishare.database import db
import os
def create_app(testing=False):
# Create the app
app = Flask(__name__, instance_relative_config=True)
# Load the default config file
app.config.from_object('config')
# Overide default configuration
if testing == False:
# Load the instance config when not testing (THIS WILL NOT WORK WITH HEROKU)
# app.config.from_pyfile('config.py', silent=True)
# FOR HEROKU - Load environment variables from os
app.config.update(
SECRET_KEY = os.environ.get('SECRET_KEY'),
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL'),
SQLALCHEMY_TRACK_MODIFICATIONS = False,
TESTING = False
)
else:
# Load the test config if passed in
app.config.update(
SECRET_KEY = 'test123',
SQLALCHEMY_DATABASE_URI = 'postgresql://foo:bar@localhost:5432/testing',
TESTING = True
)
# Load Database
db.init_app(app)
with app.app_context():
db.create_all()
# Import and register auth blueprint
from unishare import auth
app.register_blueprint(auth.bp)
# Import and register blog blueprint
from unishare import blog
app.register_blueprint(blog.bp)
app.add_url_rule('/', endpoint='index')
# Import and register blog blueprint
from unishare import user
app.register_blueprint(user.bp)
return app
| 29.62
| 84
| 0.653612
|
bf91cbd901e297b542c72a4fd93ae442b037e123
| 2,007
|
py
|
Python
|
ingestion/src/metadata/ingestion/api/stage.py
|
troyel/OpenMetadata
|
4577f12bfde471afd8655ce4ee949fcca3d7fd95
|
[
"Apache-2.0"
] | 864
|
2021-08-13T23:48:45.000Z
|
2022-03-31T18:36:30.000Z
|
ingestion/src/metadata/ingestion/api/stage.py
|
troyel/OpenMetadata
|
4577f12bfde471afd8655ce4ee949fcca3d7fd95
|
[
"Apache-2.0"
] | 2,701
|
2021-08-14T06:05:12.000Z
|
2022-03-31T23:48:32.000Z
|
ingestion/src/metadata/ingestion/api/stage.py
|
troyel/OpenMetadata
|
4577f12bfde471afd8655ce4ee949fcca3d7fd95
|
[
"Apache-2.0"
] | 144
|
2021-08-16T20:44:08.000Z
|
2022-03-29T14:12:30.000Z
|
# Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass, field
from typing import Any, Dict, Generic, List
from metadata.ingestion.api.closeable import Closeable
from metadata.ingestion.api.common import Entity, WorkflowContext
from metadata.ingestion.api.status import Status
@dataclass
class StageStatus(Status):
records_produced = 0
warnings: Dict[str, List[str]] = field(default_factory=dict)
failures: Dict[str, List[str]] = field(default_factory=dict)
def records_status(self, record: Any) -> None:
self.records_produced += 1
def warning_status(self, key: str, reason: str) -> None:
if key not in self.warnings:
self.warnings[key] = []
self.warnings[key].append(reason)
def failure_status(self, key: str, reason: str) -> None:
if key not in self.failures:
self.failures[key] = []
self.failures[key].append(reason)
@dataclass # type: ignore[misc]
class Stage(Closeable, Generic[Entity], metaclass=ABCMeta):
ctx: WorkflowContext
@classmethod
@abstractmethod
def create(
cls, config_dict: dict, metadata_config_dict: dict, ctx: WorkflowContext
) -> "Stage":
pass
@abstractmethod
def stage_record(self, record: Entity):
pass
@abstractmethod
def get_status(self) -> StageStatus:
pass
@abstractmethod
def close(self) -> None:
pass
| 31.359375
| 80
| 0.702541
|
1b471ce711071e5f70b2da2d3e7770ebc4b0b013
| 40,429
|
py
|
Python
|
tests/blockchain/test_blockchain_transactions.py
|
navroudsari/chia-blockchain
|
fceb39f783392a32f43b562de1e85e810e913005
|
[
"Apache-2.0"
] | 11,902
|
2019-12-05T00:14:29.000Z
|
2022-03-31T23:25:37.000Z
|
tests/blockchain/test_blockchain_transactions.py
|
navroudsari/chia-blockchain
|
fceb39f783392a32f43b562de1e85e810e913005
|
[
"Apache-2.0"
] | 5,246
|
2019-12-05T04:00:03.000Z
|
2022-03-31T21:33:30.000Z
|
tests/blockchain/test_blockchain_transactions.py
|
Devh4ox4d/silishitcoin
|
4372d06aa4a54220f2bde29c8081410503679a82
|
[
"Apache-2.0"
] | 2,149
|
2019-12-05T11:12:53.000Z
|
2022-03-31T06:08:34.000Z
|
import asyncio
import logging
import pytest
from clvm.casts import int_to_bytes
from chia.consensus.blockchain import ReceiveBlockResult
from chia.protocols import full_node_protocol
from chia.types.announcement import Announcement
from chia.types.condition_opcodes import ConditionOpcode
from chia.types.condition_with_args import ConditionWithArgs
from chia.types.spend_bundle import SpendBundle
from chia.util.errors import ConsensusError, Err
from chia.util.ints import uint64
from tests.wallet_tools import WalletTool
from tests.core.full_node.test_full_node import connect_and_get_peer
from tests.setup_nodes import bt, setup_two_nodes, test_constants
from tests.util.generator_tools_testing import run_and_get_removals_and_additions
BURN_PUZZLE_HASH = b"0" * 32
WALLET_A = WalletTool(test_constants)
WALLET_A_PUZZLE_HASHES = [WALLET_A.get_new_puzzlehash() for _ in range(5)]
log = logging.getLogger(__name__)
@pytest.fixture(scope="session")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
class TestBlockchainTransactions:
@pytest.fixture(scope="function")
async def two_nodes(self):
async for _ in setup_two_nodes(test_constants):
yield _
@pytest.mark.asyncio
async def test_basic_blockchain_tx(self, two_nodes):
num_blocks = 10
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
peer = await connect_and_get_peer(server_1, server_2)
full_node_1 = full_node_api_1.full_node
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block), None)
spend_block = blocks[2]
spend_coin = None
for coin in list(spend_block.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin = coin
spend_bundle = wallet_a.generate_signed_transaction(1000, receiver_puzzlehash, spend_coin)
assert spend_bundle is not None
tx: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(spend_bundle)
await full_node_api_1.respond_transaction(tx, peer)
sb = full_node_1.mempool_manager.get_spendbundle(spend_bundle.name())
assert sb is spend_bundle
last_block = blocks[-1]
next_spendbundle, additions, removals = await full_node_1.mempool_manager.create_bundle_from_mempool(
last_block.header_hash
)
assert next_spendbundle is not None
new_blocks = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=next_spendbundle,
guarantee_transaction_block=True,
)
next_block = new_blocks[-1]
await full_node_1.respond_block(full_node_protocol.RespondBlock(next_block))
assert next_block.header_hash == full_node_1.blockchain.get_peak().header_hash
added_coins = next_spendbundle.additions()
# Two coins are added, main spend and change
assert len(added_coins) == 2
for coin in added_coins:
unspent = await full_node_1.coin_store.get_coin_record(coin.name())
assert unspent is not None
assert not unspent.spent
assert not unspent.coinbase
@pytest.mark.asyncio
async def test_validate_blockchain_with_double_spend(self, two_nodes):
num_blocks = 5
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_3, server_1, server_2 = two_nodes
full_node_1 = full_node_api_1.full_node
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
spend_block = blocks[2]
spend_coin = None
for coin in list(spend_block.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin = coin
spend_bundle = wallet_a.generate_signed_transaction(1000, receiver_puzzlehash, spend_coin)
spend_bundle_double = wallet_a.generate_signed_transaction(1001, receiver_puzzlehash, spend_coin)
block_spendbundle = SpendBundle.aggregate([spend_bundle, spend_bundle_double])
new_blocks = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block_spendbundle,
guarantee_transaction_block=True,
)
next_block = new_blocks[-1]
res, err, _, _ = await full_node_1.blockchain.receive_block(next_block)
assert res == ReceiveBlockResult.INVALID_BLOCK
assert err == Err.DOUBLE_SPEND
@pytest.mark.asyncio
async def test_validate_blockchain_duplicate_output(self, two_nodes):
num_blocks = 3
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
full_node_1 = full_node_api_1.full_node
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
spend_block = blocks[2]
spend_coin = None
for coin in list(spend_block.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin = coin
spend_bundle = wallet_a.generate_signed_transaction(
1000, receiver_puzzlehash, spend_coin, additional_outputs=[(receiver_puzzlehash, 1000)]
)
new_blocks = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=spend_bundle,
guarantee_transaction_block=True,
)
next_block = new_blocks[-1]
res, err, _, _ = await full_node_1.blockchain.receive_block(next_block)
assert res == ReceiveBlockResult.INVALID_BLOCK
assert err == Err.DUPLICATE_OUTPUT
@pytest.mark.asyncio
async def test_validate_blockchain_with_reorg_double_spend(self, two_nodes):
num_blocks = 10
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
spend_block = blocks[2]
spend_coin = None
for coin in list(spend_block.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin = coin
spend_bundle = wallet_a.generate_signed_transaction(1000, receiver_puzzlehash, spend_coin)
blocks_spend = bt.get_consecutive_blocks(
1,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
guarantee_transaction_block=True,
transaction_data=spend_bundle,
)
# Move chain to height 10, with a spend at height 10
for block in blocks_spend:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Reorg at height 5, add up to and including height 12
new_blocks = bt.get_consecutive_blocks(
7,
blocks[:6],
farmer_reward_puzzle_hash=coinbase_puzzlehash,
guarantee_transaction_block=True,
seed=b"another seed",
)
for block in new_blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Spend the same coin in the new reorg chain at height 13
new_blocks = bt.get_consecutive_blocks(
1,
new_blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
guarantee_transaction_block=True,
transaction_data=spend_bundle,
)
res, err, _, _ = await full_node_api_1.full_node.blockchain.receive_block(new_blocks[-1])
assert err is None
assert res == ReceiveBlockResult.NEW_PEAK
# But can't spend it twice
new_blocks_double = bt.get_consecutive_blocks(
1,
new_blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
guarantee_transaction_block=True,
transaction_data=spend_bundle,
)
res, err, _, _ = await full_node_api_1.full_node.blockchain.receive_block(new_blocks_double[-1])
assert err is Err.DOUBLE_SPEND
assert res == ReceiveBlockResult.INVALID_BLOCK
# Now test Reorg at block 5, same spend at block height 12
new_blocks_reorg = bt.get_consecutive_blocks(
1,
new_blocks[:12],
farmer_reward_puzzle_hash=coinbase_puzzlehash,
guarantee_transaction_block=True,
transaction_data=spend_bundle,
seed=b"spend at 12 is ok",
)
for block in new_blocks_reorg:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Spend at height 13 is also OK (same height)
new_blocks_reorg = bt.get_consecutive_blocks(
1,
new_blocks[:13],
farmer_reward_puzzle_hash=coinbase_puzzlehash,
guarantee_transaction_block=True,
transaction_data=spend_bundle,
seed=b"spend at 13 is ok",
)
for block in new_blocks_reorg:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Spend at height 14 is not OK (already spend)
new_blocks_reorg = bt.get_consecutive_blocks(
1,
new_blocks[:14],
farmer_reward_puzzle_hash=coinbase_puzzlehash,
guarantee_transaction_block=True,
transaction_data=spend_bundle,
seed=b"spend at 14 is double spend",
)
with pytest.raises(ConsensusError):
for block in new_blocks_reorg:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
@pytest.mark.asyncio
async def test_validate_blockchain_spend_reorg_coin(self, two_nodes):
num_blocks = 10
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_1_puzzlehash = WALLET_A_PUZZLE_HASHES[1]
receiver_2_puzzlehash = WALLET_A_PUZZLE_HASHES[2]
receiver_3_puzzlehash = WALLET_A_PUZZLE_HASHES[3]
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
spend_block = blocks[2]
spend_coin = None
for coin in list(spend_block.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin = coin
assert spend_coin
spend_bundle = wallet_a.generate_signed_transaction(uint64(1000), receiver_1_puzzlehash, spend_coin)
new_blocks = bt.get_consecutive_blocks(
1,
blocks[:5],
seed=b"spend_reorg_coin",
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=spend_bundle,
guarantee_transaction_block=True,
)
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(new_blocks[-1]))
coin_2 = None
for coin in run_and_get_removals_and_additions(
new_blocks[-1], test_constants.MAX_BLOCK_COST_CLVM, test_constants.COST_PER_BYTE
)[1]:
if coin.puzzle_hash == receiver_1_puzzlehash:
coin_2 = coin
break
assert coin_2 is not None
spend_bundle = wallet_a.generate_signed_transaction(uint64(1000), receiver_2_puzzlehash, coin_2)
new_blocks = bt.get_consecutive_blocks(
1,
new_blocks[:6],
seed=b"spend_reorg_coin",
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=spend_bundle,
guarantee_transaction_block=True,
)
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(new_blocks[-1]))
coin_3 = None
for coin in run_and_get_removals_and_additions(
new_blocks[-1], test_constants.MAX_BLOCK_COST_CLVM, test_constants.COST_PER_BYTE
)[1]:
if coin.puzzle_hash == receiver_2_puzzlehash:
coin_3 = coin
break
assert coin_3 is not None
spend_bundle = wallet_a.generate_signed_transaction(uint64(1000), receiver_3_puzzlehash, coin_3)
new_blocks = bt.get_consecutive_blocks(
1,
new_blocks[:7],
seed=b"spend_reorg_coin",
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=spend_bundle,
guarantee_transaction_block=True,
)
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(new_blocks[-1]))
@pytest.mark.asyncio
async def test_validate_blockchain_spend_reorg_cb_coin(self, two_nodes):
num_blocks = 15
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_1_puzzlehash = WALLET_A_PUZZLE_HASHES[1]
blocks = bt.get_consecutive_blocks(num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Spends a coinbase created in reorg
new_blocks = bt.get_consecutive_blocks(
5,
blocks[:6],
seed=b"reorg cb coin",
farmer_reward_puzzle_hash=coinbase_puzzlehash,
guarantee_transaction_block=True,
)
for block in new_blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
spend_block = new_blocks[-1]
spend_coin = None
for coin in list(spend_block.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin = coin
spend_bundle = wallet_a.generate_signed_transaction(1000, receiver_1_puzzlehash, spend_coin)
new_blocks = bt.get_consecutive_blocks(
1,
new_blocks,
seed=b"reorg cb coin",
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=spend_bundle,
guarantee_transaction_block=True,
)
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(new_blocks[-1]))
@pytest.mark.asyncio
async def test_validate_blockchain_spend_reorg_since_genesis(self, two_nodes):
num_blocks = 10
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_1_puzzlehash = WALLET_A_PUZZLE_HASHES[1]
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
spend_block = blocks[-1]
spend_coin = None
for coin in list(spend_block.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin = coin
spend_bundle = wallet_a.generate_signed_transaction(1000, receiver_1_puzzlehash, spend_coin)
new_blocks = bt.get_consecutive_blocks(
1, blocks, seed=b"", farmer_reward_puzzle_hash=coinbase_puzzlehash, transaction_data=spend_bundle
)
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(new_blocks[-1]))
# Spends a coin in a genesis reorg, that was already spent
new_blocks = bt.get_consecutive_blocks(
12,
[],
seed=b"reorg since genesis",
farmer_reward_puzzle_hash=coinbase_puzzlehash,
guarantee_transaction_block=True,
)
for block in new_blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
new_blocks = bt.get_consecutive_blocks(
1,
new_blocks,
seed=b"reorg since genesis",
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=spend_bundle,
)
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(new_blocks[-1]))
@pytest.mark.asyncio
async def test_assert_my_coin_id(self, two_nodes):
num_blocks = 10
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
# Farm blocks
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
full_node_1 = full_node_api_1.full_node
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Coinbase that gets spent
spend_block = blocks[2]
bad_block = blocks[3]
spend_coin = None
bad_spend_coin = None
for coin in list(spend_block.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin = coin
for coin in list(bad_block.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
bad_spend_coin = coin
valid_cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_COIN_ID, [spend_coin.name()])
valid_dic = {valid_cvp.opcode: [valid_cvp]}
bad_cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_COIN_ID, [bad_spend_coin.name()])
bad_dic = {bad_cvp.opcode: [bad_cvp]}
bad_spend_bundle = wallet_a.generate_signed_transaction(1000, receiver_puzzlehash, spend_coin, bad_dic)
valid_spend_bundle = wallet_a.generate_signed_transaction(1000, receiver_puzzlehash, spend_coin, valid_dic)
assert bad_spend_bundle is not None
assert valid_spend_bundle is not None
# Invalid block bundle
# Create another block that includes our transaction
invalid_new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=bad_spend_bundle,
guarantee_transaction_block=True,
)
# Try to validate that block
res, err, _, _ = await full_node_1.blockchain.receive_block(invalid_new_blocks[-1])
assert res == ReceiveBlockResult.INVALID_BLOCK
assert err == Err.ASSERT_MY_COIN_ID_FAILED
# Valid block bundle
# Create another block that includes our transaction
new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=valid_spend_bundle,
guarantee_transaction_block=True,
)
res, err, _, _ = await full_node_1.blockchain.receive_block(new_blocks[-1])
assert res == ReceiveBlockResult.NEW_PEAK
assert err is None
@pytest.mark.asyncio
async def test_assert_coin_announcement_consumed(self, two_nodes):
num_blocks = 10
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
# Farm blocks
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
full_node_1 = full_node_api_1.full_node
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Coinbase that gets spent
block1 = blocks[2]
block2 = blocks[3]
spend_coin_block_1 = None
spend_coin_block_2 = None
for coin in list(block1.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin_block_1 = coin
for coin in list(block2.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin_block_2 = coin
# This condition requires block2 coinbase to be spent
block1_cvp = ConditionWithArgs(
ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT,
[Announcement(spend_coin_block_2.name(), b"test").name()],
)
block1_dic = {block1_cvp.opcode: [block1_cvp]}
block1_spend_bundle = wallet_a.generate_signed_transaction(
1000, receiver_puzzlehash, spend_coin_block_1, block1_dic
)
# This condition requires block1 coinbase to be spent
block2_cvp = ConditionWithArgs(
ConditionOpcode.CREATE_COIN_ANNOUNCEMENT,
[b"test"],
)
block2_dic = {block2_cvp.opcode: [block2_cvp]}
block2_spend_bundle = wallet_a.generate_signed_transaction(
1000, receiver_puzzlehash, spend_coin_block_2, block2_dic
)
# Invalid block bundle
assert block1_spend_bundle is not None
# Create another block that includes our transaction
invalid_new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle,
guarantee_transaction_block=True,
)
# Try to validate that block
res, err, _, _ = await full_node_1.blockchain.receive_block(invalid_new_blocks[-1])
assert res == ReceiveBlockResult.INVALID_BLOCK
assert err == Err.ASSERT_ANNOUNCE_CONSUMED_FAILED
# bundle_together contains both transactions
bundle_together = SpendBundle.aggregate([block1_spend_bundle, block2_spend_bundle])
# Create another block that includes our transaction
new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=bundle_together,
guarantee_transaction_block=True,
)
# Try to validate newly created block
res, err, _, _ = await full_node_1.blockchain.receive_block(new_blocks[-1])
assert res == ReceiveBlockResult.NEW_PEAK
assert err is None
@pytest.mark.asyncio
async def test_assert_puzzle_announcement_consumed(self, two_nodes):
num_blocks = 10
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
# Farm blocks
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
full_node_1 = full_node_api_1.full_node
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Coinbase that gets spent
block1 = blocks[2]
block2 = blocks[3]
spend_coin_block_1 = None
spend_coin_block_2 = None
for coin in list(block1.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin_block_1 = coin
for coin in list(block2.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin_block_2 = coin
# This condition requires block2 coinbase to be spent
block1_cvp = ConditionWithArgs(
ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT,
[Announcement(spend_coin_block_2.puzzle_hash, b"test").name()],
)
block1_dic = {block1_cvp.opcode: [block1_cvp]}
block1_spend_bundle = wallet_a.generate_signed_transaction(
1000, receiver_puzzlehash, spend_coin_block_1, block1_dic
)
# This condition requires block1 coinbase to be spent
block2_cvp = ConditionWithArgs(
ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT,
[b"test"],
)
block2_dic = {block2_cvp.opcode: [block2_cvp]}
block2_spend_bundle = wallet_a.generate_signed_transaction(
1000, receiver_puzzlehash, spend_coin_block_2, block2_dic
)
# Invalid block bundle
assert block1_spend_bundle is not None
# Create another block that includes our transaction
invalid_new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle,
guarantee_transaction_block=True,
)
# Try to validate that block
res, err, _, _ = await full_node_1.blockchain.receive_block(invalid_new_blocks[-1])
assert res == ReceiveBlockResult.INVALID_BLOCK
assert err == Err.ASSERT_ANNOUNCE_CONSUMED_FAILED
# bundle_together contains both transactions
bundle_together = SpendBundle.aggregate([block1_spend_bundle, block2_spend_bundle])
# Create another block that includes our transaction
new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=bundle_together,
guarantee_transaction_block=True,
)
# Try to validate newly created block
res, err, _, _ = await full_node_1.blockchain.receive_block(new_blocks[-1])
assert res == ReceiveBlockResult.NEW_PEAK
assert err is None
@pytest.mark.asyncio
async def test_assert_height_absolute(self, two_nodes):
num_blocks = 10
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
# Farm blocks
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
full_node_1 = full_node_api_1.full_node
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Coinbase that gets spent
block1 = blocks[2]
spend_coin_block_1 = None
for coin in list(block1.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin_block_1 = coin
# This condition requires block1 coinbase to be spent after index 10
block1_cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, [int_to_bytes(10)])
block1_dic = {block1_cvp.opcode: [block1_cvp]}
block1_spend_bundle = wallet_a.generate_signed_transaction(
1000, receiver_puzzlehash, spend_coin_block_1, block1_dic
)
# program that will be sent too early
assert block1_spend_bundle is not None
invalid_new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle,
guarantee_transaction_block=True,
)
# Try to validate that block at index 10
res, err, _, _ = await full_node_1.blockchain.receive_block(invalid_new_blocks[-1])
assert res == ReceiveBlockResult.INVALID_BLOCK
assert err == Err.ASSERT_HEIGHT_ABSOLUTE_FAILED
new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
guarantee_transaction_block=True,
)
res, _, _, _ = await full_node_1.blockchain.receive_block(new_blocks[-1])
assert res == ReceiveBlockResult.NEW_PEAK
# At index 11, it can be spent
new_blocks = bt.get_consecutive_blocks(
1,
new_blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle,
guarantee_transaction_block=True,
)
res, err, _, _ = await full_node_1.blockchain.receive_block(new_blocks[-1])
assert err is None
assert res == ReceiveBlockResult.NEW_PEAK
@pytest.mark.asyncio
async def test_assert_height_relative(self, two_nodes):
num_blocks = 11
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
# Farm blocks
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
full_node_1 = full_node_api_1.full_node
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Coinbase that gets spent
block1 = blocks[2]
spend_coin_block_1 = None
for coin in list(block1.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin_block_1 = coin
# This condition requires block1 coinbase to be spent after index 11
# This condition requires block1 coinbase to be spent more than 10 block after it was farmed
# block index has to be greater than (2 + 9 = 11)
block1_cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_RELATIVE, [int_to_bytes(9)])
block1_dic = {block1_cvp.opcode: [block1_cvp]}
block1_spend_bundle = wallet_a.generate_signed_transaction(
1000, receiver_puzzlehash, spend_coin_block_1, block1_dic
)
# program that will be sent too early
assert block1_spend_bundle is not None
invalid_new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle,
guarantee_transaction_block=True,
)
# Try to validate that block at index 11
res, err, _, _ = await full_node_1.blockchain.receive_block(invalid_new_blocks[-1])
assert res == ReceiveBlockResult.INVALID_BLOCK
assert err == Err.ASSERT_HEIGHT_RELATIVE_FAILED
new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
guarantee_transaction_block=True,
)
res, _, _, _ = await full_node_1.blockchain.receive_block(new_blocks[-1])
assert res == ReceiveBlockResult.NEW_PEAK
# At index 12, it can be spent
new_blocks = bt.get_consecutive_blocks(
1,
new_blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle,
guarantee_transaction_block=True,
)
res, err, _, _ = await full_node_1.blockchain.receive_block(new_blocks[-1])
assert err is None
assert res == ReceiveBlockResult.NEW_PEAK
@pytest.mark.asyncio
async def test_assert_seconds_relative(self, two_nodes):
num_blocks = 10
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
# Farm blocks
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
full_node_1 = full_node_api_1.full_node
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Coinbase that gets spent
block1 = blocks[2]
spend_coin_block_1 = None
for coin in list(block1.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin_block_1 = coin
# This condition requires block1 coinbase to be spent 300 seconds after coin creation
block1_cvp = ConditionWithArgs(ConditionOpcode.ASSERT_SECONDS_RELATIVE, [int_to_bytes(300)])
block1_dic = {block1_cvp.opcode: [block1_cvp]}
block1_spend_bundle = wallet_a.generate_signed_transaction(
1000, receiver_puzzlehash, spend_coin_block_1, block1_dic
)
# program that will be sent to early
assert block1_spend_bundle is not None
invalid_new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle,
time_per_block=20,
guarantee_transaction_block=True,
)
# Try to validate that block before 300 sec
res, err, _, _ = await full_node_1.blockchain.receive_block(invalid_new_blocks[-1])
assert res == ReceiveBlockResult.INVALID_BLOCK
assert err == Err.ASSERT_SECONDS_RELATIVE_FAILED
valid_new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle,
guarantee_transaction_block=True,
time_per_block=301,
)
res, err, _, _ = await full_node_1.blockchain.receive_block(valid_new_blocks[-1])
assert err is None
assert res == ReceiveBlockResult.NEW_PEAK
@pytest.mark.asyncio
async def test_assert_seconds_absolute(self, two_nodes):
num_blocks = 10
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
# Farm blocks
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
full_node_1 = full_node_api_1.full_node
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Coinbase that gets spent
block1 = blocks[2]
spend_coin_block_1 = None
for coin in list(block1.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin_block_1 = coin
# This condition requires block1 coinbase to be spent after 30 seconds from now
current_time_plus3 = uint64(blocks[-1].foliage_transaction_block.timestamp + 30)
block1_cvp = ConditionWithArgs(ConditionOpcode.ASSERT_SECONDS_ABSOLUTE, [int_to_bytes(current_time_plus3)])
block1_dic = {block1_cvp.opcode: [block1_cvp]}
block1_spend_bundle = wallet_a.generate_signed_transaction(
1000, receiver_puzzlehash, spend_coin_block_1, block1_dic
)
# program that will be sent to early
assert block1_spend_bundle is not None
invalid_new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle,
time_per_block=20,
guarantee_transaction_block=True,
)
# Try to validate that block before 30 sec
res, err, _, _ = await full_node_1.blockchain.receive_block(invalid_new_blocks[-1])
assert res == ReceiveBlockResult.INVALID_BLOCK
assert err == Err.ASSERT_SECONDS_ABSOLUTE_FAILED
valid_new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle,
guarantee_transaction_block=True,
time_per_block=31,
)
res, err, _, _ = await full_node_1.blockchain.receive_block(valid_new_blocks[-1])
assert err is None
assert res == ReceiveBlockResult.NEW_PEAK
@pytest.mark.asyncio
async def test_assert_fee_condition(self, two_nodes):
num_blocks = 10
wallet_a = WALLET_A
coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0]
receiver_puzzlehash = BURN_PUZZLE_HASH
# Farm blocks
blocks = bt.get_consecutive_blocks(
num_blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True
)
full_node_api_1, full_node_api_2, server_1, server_2 = two_nodes
full_node_1 = full_node_api_1.full_node
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Coinbase that gets spent
block1 = blocks[2]
spend_coin_block_1 = None
for coin in list(block1.get_included_reward_coins()):
if coin.puzzle_hash == coinbase_puzzlehash:
spend_coin_block_1 = coin
# This condition requires fee to be 10 mojo
cvp_fee = ConditionWithArgs(ConditionOpcode.RESERVE_FEE, [int_to_bytes(10)])
# This spend bundle has 9 mojo as fee
block1_dic_bad = {cvp_fee.opcode: [cvp_fee]}
block1_dic_good = {cvp_fee.opcode: [cvp_fee]}
block1_spend_bundle_bad = wallet_a.generate_signed_transaction(
1000, receiver_puzzlehash, spend_coin_block_1, block1_dic_bad, fee=9
)
block1_spend_bundle_good = wallet_a.generate_signed_transaction(
1000, receiver_puzzlehash, spend_coin_block_1, block1_dic_good, fee=10
)
log.warning(block1_spend_bundle_good.additions())
log.warning(f"Spend bundle fees: {block1_spend_bundle_good.fees()}")
invalid_new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle_bad,
guarantee_transaction_block=True,
)
res, err, _, _ = await full_node_1.blockchain.receive_block(invalid_new_blocks[-1])
assert res == ReceiveBlockResult.INVALID_BLOCK
assert err == Err.RESERVE_FEE_CONDITION_FAILED
valid_new_blocks = bt.get_consecutive_blocks(
1,
blocks,
farmer_reward_puzzle_hash=coinbase_puzzlehash,
transaction_data=block1_spend_bundle_good,
guarantee_transaction_block=True,
)
res, err, _, _ = await full_node_1.blockchain.receive_block(valid_new_blocks[-1])
assert err is None
assert res == ReceiveBlockResult.NEW_PEAK
| 39.753196
| 115
| 0.678919
|
a5fbf6931255b7bfc35abfbf7c9d082d5c1a047c
| 4,755
|
py
|
Python
|
imblearn/under_sampling/prototype_selection/tests/test_instance_hardness_threshold.py
|
seabay/UnbalancedDataset
|
b15b868019343052d4b57bf748d658367166c8b3
|
[
"MIT"
] | 1
|
2018-07-11T06:47:11.000Z
|
2018-07-11T06:47:11.000Z
|
imblearn/under_sampling/prototype_selection/tests/test_instance_hardness_threshold.py
|
seabay/UnbalancedDataset
|
b15b868019343052d4b57bf748d658367166c8b3
|
[
"MIT"
] | null | null | null |
imblearn/under_sampling/prototype_selection/tests/test_instance_hardness_threshold.py
|
seabay/UnbalancedDataset
|
b15b868019343052d4b57bf748d658367166c8b3
|
[
"MIT"
] | null | null | null |
"""Test the module ."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
from __future__ import print_function
import numpy as np
from pytest import raises
from sklearn.utils.testing import assert_array_equal
from sklearn.ensemble import GradientBoostingClassifier
from imblearn.under_sampling import InstanceHardnessThreshold
RND_SEED = 0
X = np.array([[-0.3879569, 0.6894251], [-0.09322739, 1.28177189], [
-0.77740357, 0.74097941
], [0.91542919, -0.65453327], [-0.03852113, 0.40910479], [
-0.43877303, 1.07366684
], [-0.85795321, 0.82980738], [-0.18430329, 0.52328473], [
-0.30126957, -0.66268378
], [-0.65571327, 0.42412021], [-0.28305528, 0.30284991],
[0.20246714, -0.34727125], [1.06446472, -1.09279772],
[0.30543283, -0.02589502], [-0.00717161, 0.00318087]])
Y = np.array([0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0])
ESTIMATOR = GradientBoostingClassifier(random_state=RND_SEED)
def test_iht_init():
sampling_strategy = 'auto'
iht = InstanceHardnessThreshold(
ESTIMATOR, sampling_strategy=sampling_strategy, random_state=RND_SEED)
assert iht.sampling_strategy == sampling_strategy
assert iht.random_state == RND_SEED
def test_iht_fit_sample():
iht = InstanceHardnessThreshold(ESTIMATOR, random_state=RND_SEED)
X_resampled, y_resampled = iht.fit_sample(X, Y)
X_gt = np.array([[-0.3879569, 0.6894251], [0.91542919, -0.65453327], [
-0.65571327, 0.42412021
], [1.06446472, -1.09279772], [0.30543283, -0.02589502], [
-0.00717161, 0.00318087
], [-0.09322739, 1.28177189], [-0.77740357, 0.74097941],
[-0.43877303, 1.07366684], [-0.85795321, 0.82980738],
[-0.18430329, 0.52328473], [-0.28305528, 0.30284991]])
y_gt = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_iht_fit_sample_with_indices():
iht = InstanceHardnessThreshold(
ESTIMATOR, return_indices=True, random_state=RND_SEED)
X_resampled, y_resampled, idx_under = iht.fit_sample(X, Y)
X_gt = np.array([[-0.3879569, 0.6894251], [0.91542919, -0.65453327], [
-0.65571327, 0.42412021
], [1.06446472, -1.09279772], [0.30543283, -0.02589502], [
-0.00717161, 0.00318087
], [-0.09322739, 1.28177189], [-0.77740357, 0.74097941],
[-0.43877303, 1.07366684], [-0.85795321, 0.82980738],
[-0.18430329, 0.52328473], [-0.28305528, 0.30284991]])
y_gt = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
idx_gt = np.array([0, 3, 9, 12, 13, 14, 1, 2, 5, 6, 7, 10])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
assert_array_equal(idx_under, idx_gt)
def test_iht_fit_sample_half():
sampling_strategy = {0: 6, 1: 8}
iht = InstanceHardnessThreshold(
ESTIMATOR, sampling_strategy=sampling_strategy, random_state=RND_SEED)
X_resampled, y_resampled = iht.fit_sample(X, Y)
X_gt = np.array([[-0.3879569, 0.6894251], [0.91542919, -0.65453327], [
-0.65571327, 0.42412021
], [1.06446472, -1.09279772], [0.30543283, -0.02589502], [
-0.00717161, 0.00318087
], [-0.09322739, 1.28177189], [-0.77740357, 0.74097941],
[-0.03852113, 0.40910479], [-0.43877303, 1.07366684],
[-0.85795321, 0.82980738], [-0.18430329, 0.52328473],
[-0.30126957, -0.66268378], [-0.28305528, 0.30284991]])
y_gt = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_iht_fit_sample_class_obj():
est = GradientBoostingClassifier(random_state=RND_SEED)
iht = InstanceHardnessThreshold(estimator=est, random_state=RND_SEED)
X_resampled, y_resampled = iht.fit_sample(X, Y)
X_gt = np.array([[-0.3879569, 0.6894251], [0.91542919, -0.65453327], [
-0.65571327, 0.42412021
], [1.06446472, -1.09279772], [0.30543283, -0.02589502], [
-0.00717161, 0.00318087
], [-0.09322739, 1.28177189], [-0.77740357, 0.74097941],
[-0.43877303, 1.07366684], [-0.85795321, 0.82980738],
[-0.18430329, 0.52328473], [-0.28305528, 0.30284991]])
y_gt = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_iht_fit_sample_wrong_class_obj():
from sklearn.cluster import KMeans
est = KMeans()
iht = InstanceHardnessThreshold(estimator=est, random_state=RND_SEED)
with raises(ValueError, match="Invalid parameter `estimator`"):
iht.fit_sample(X, Y)
| 40.991379
| 78
| 0.642061
|
64a694c83c308f4a1b9eb36edac4a3bce98e83b9
| 7,792
|
py
|
Python
|
DQM/BeamMonitor/test/Alca_BeamMonitor_file.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 1
|
2021-01-25T16:39:35.000Z
|
2021-01-25T16:39:35.000Z
|
DQM/BeamMonitor/test/Alca_BeamMonitor_file.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 28
|
2019-08-15T15:21:11.000Z
|
2021-12-29T14:13:18.000Z
|
DQM/BeamMonitor/test/Alca_BeamMonitor_file.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 1
|
2020-08-18T10:29:49.000Z
|
2020-08-18T10:29:49.000Z
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("DQM")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
#"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/07CBA04E-BEA0-AE4B-A9F1-3C6DF122B40E.root",
#"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/0869AF69-CD0D-6F45-B1C7-FC1BF3AC7A01.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/0874B9E6-DAA6-3148-8A8A-4A323D682591.root",
# "file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/0E38963B-7E4A-9447-A56F-7E87903E2ED4.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/0E497339-C393-ED4A-8FA6-E372183A841F.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/0F68686E-FE48-7F42-AF13-6F9F058D7BB6.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/0874B9E6-DAA6-3148-8A8A-4A323D682591.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/0E38963B-7E4A-9447-A56F-7E87903E2ED4.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/0E497339-C393-ED4A-8FA6-E372183A841F.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/0F68686E-FE48-7F42-AF13-6F9F058D7BB6.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/11F4A099-8675-D248-B648-B39C866557DD.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/120196ED-8F8F-A044-8037-3B7416D9B273.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/1386E376-340A-964E-ADFC-F30D662B4DD8.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/159017AF-F91B-FC48-88FC-0BEF7C3C36F1.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/15A187C2-B285-B54D-9BDF-DB34CCCA9480.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/1789D57E-0D78-A342-BCDD-F3DC1193AF77.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/17F827A5-F84D-6D40-AB10-A1B11EA74CAC.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/1985F374-DBC9-5F4F-A52D-93FBB031FA5F.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/19DE7ADA-C714-9141-946B-311861E05DCE.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/1C7884AD-B58B-F441-959A-3EFC70EBECB0.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/1D4C6C74-E523-E84B-916F-5E9509837053.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/213103A9-2731-F740-B718-189C9D94750F.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/23A6C766-D39F-5F44-B017-D0CD2B42C398.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/26005441-0069-B24E-B040-91F00D129557.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/28CF7DC0-A45E-0248-938F-764FC31853B1.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/29B394F7-6729-3E44-9846-EBB63B2C4B88.root",
"file:/eos/cms/store/data/Run2018D/JetHT/RAW-RECO/JetHTJetPlusHOFilter-12Nov2019_UL2018_rsb-v1/120000/2B9B72F2-06F4-A043-9BDE-5B9C6C454705.root",
)
# , duplicateCheckMode = cms.untracked.string('noDuplicateCheck')
)
# initialize MessageLogger
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.categories = ["AlcaBeamMonitor"]
process.MessageLogger.cerr = cms.untracked.PSet(placeholder = cms.untracked.bool(True))
process.MessageLogger.cout = cms.untracked.PSet(
threshold = cms.untracked.string('INFO'),
default = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
AlcaBeamMonitor = cms.untracked.PSet(
reportEvery = cms.untracked.int32(1), # every 1000th only
limit = cms.untracked.int32(0)
)
)
#process.MessageLogger.statistics.append('cout')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(2000)
)
process.load("DQM.BeamMonitor.AlcaBeamMonitor_cff")
process.load("CondCore.DBCommon.CondDBSetup_cfi")
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
from Configuration.AlCa.GlobalTag import GlobalTag as gtCustomise
process.GlobalTag = gtCustomise(process.GlobalTag, 'auto:run2_data', '')
# you may need to set manually the GT in the line below
#process.GlobalTag.globaltag = '100X_upgrade2018_realistic_v10'
# DQM Live Environment
process.load("DQM.Integration.config.environment_cfi")
process.dqmEnv.subSystemFolder = 'BeamMonitor'
process.dqmSaver.tag = 'BeamMonitor'
process.dqmEnvPixelLess = process.dqmEnv.clone()
process.dqmEnvPixelLess.subSystemFolder = 'BeamMonitor_PixelLess'
#import RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi
#process.offlineBeamSpotForDQM = RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi.onlineBeamSpotProducer.clone()
# # summary
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True),
numberOfThreads = cms.untracked.uint32(4),
numberOfStreams = cms.untracked.uint32 (4),
numberOfConcurrentLuminosityBlocks = cms.untracked.uint32(2)
)
process.pp = cms.Path(process.alcaBeamMonitor+process.dqmSaver)
process.schedule = cms.Schedule(process.pp)
| 82.021053
| 214
| 0.643352
|
c98b0748dd34dcb22e512ab58b6937a1c1125c9f
| 11,037
|
py
|
Python
|
UserCode/trentc/AcousticAnalysisSlope.py
|
RunzZhang/SBCcode
|
e75b8e751cec5fb2c28950edef0c82f005caedcb
|
[
"MIT"
] | 4
|
2018-08-27T18:02:34.000Z
|
2020-06-09T21:19:04.000Z
|
UserCode/trentc/AcousticAnalysisSlope.py
|
RunzZhang/SBCcode
|
e75b8e751cec5fb2c28950edef0c82f005caedcb
|
[
"MIT"
] | null | null | null |
UserCode/trentc/AcousticAnalysisSlope.py
|
RunzZhang/SBCcode
|
e75b8e751cec5fb2c28950edef0c82f005caedcb
|
[
"MIT"
] | 4
|
2019-06-20T21:36:26.000Z
|
2020-11-10T17:23:14.000Z
|
#Author: Trent Cwiok
import pdb
import numpy as np
import math
import SBCcode as sbc
from matplotlib import pyplot as plt
from scipy import signal as sig
def main(event_dict, low_tau=5e-4, high_tau=5e-3, window_width_ms=10, offset_from_t0_ms=2, bin_edges=np.array((500, 10000), dtype=np.float64), show_figures_t0=False, show_figures_loudness=False):
"""
A top-level function that makes calls to the FindBubbleStart and FindLoudness helper functions.
It then parses the output of those functions and returns a dictionary containing that information.
Required inputs:
event_dict -- an event dictionary generated by SBCcode.get_event()
Keyword arguments:
low_tau -- a Float, the time constant used by the low-pass filter applied to the acoustic trace in FindBubbleStart
high_tau -- identical to low_tau, but used in the high-pass filter
window_width_ms -- a Float, the length of each time window slice taken of the FFT, in milliseconds
offset_from_t0_ms -- a Float, the number of milliseconds before the bubble t0 where the first time window will start
bin_edges -- a 1-dimensional Ndarray, contains the values of the bin edges in Hz used to partition the FFT
show_figures-- a Boolean, determines if figures showing the bubble_t0 and FFT plots will be displayed
Output format:
Returns a dictionary containing the following keys...
bubble_t0 -- the time in milliseconds where the bubble appears in the acoustic trace
bubble_loudness -- an Ndarray, contains the loudness calculated for each time window between each set of bin edges
bin_edges -- an Ndarray, a copy of the bin edges passed to the function to help with indexing the bubble loudnesses
time_windows -- an Ndarray, the actual times in milliseconds that were samples from the acoustic trace
"""
acoustic_analysis_dict = dict()
#Calls FindBubbleStart and stores the returned value to its respective dictionary entry
bubble_start = FindBubbleStart(event_dict, low_tau, high_tau, show_figures_t0)
#pdb.set_trace()
acoustic_analysis_dict['bubble_t0'] = bubble_start[1]
#Calls FindLoudness and stores the returned values of the tuple to their respective dictionary entries
bubble_loudness = FindLoudness(event_dict, bubble_start[0], window_width_ms, offset_from_t0_ms, bin_edges, show_figures_loudness)
acoustic_analysis_dict['bubble_loudness'] = bubble_loudness[0]
acoustic_analysis_dict['ap_time_windows'] = bubble_loudness[1]
acoustic_analysis_dict['ap_frequency_bins'] = bubble_loudness[2]
return acoustic_analysis_dict
def FindBubbleStart(event_dict, low_tau, high_tau, show_figures_t0):
"""
A helper function to main which finds the time in the acoustic trace at which bubble formation begins.
Required inputs:
See main's keyword arguments -- all inputs are optional inputs of main
Output format:
Returns a tuple containing both the time of bubble formation and its corresponding index
"""
# Checks that the event dictionary was properly loaded and passed
if not event_dict['fastDAQ']['loaded']:
print "Failed to load fastDAQ dictionary, process terminated."
return np.float64(np.NaN), np.float64(np.NaN)
# Reads and stores information determined by the DAQ
time_zero = int(event_dict['fastDAQ']['caldata']['pretrigger_samples'][0])
time_step = event_dict['fastDAQ']['caldata']['dt'][0]
# Calculates the average and standard deviation of the trace before any bubble formation
base_sampling = event_dict['fastDAQ']['Piezo1'][:100000]
base_mean = np.mean(base_sampling, dtype = np.float64)
base_stdev = np.std(base_sampling, dtype = np.float64)
# Normalizes the acoustic trace to an average of zero
event_dict['fastDAQ']['Piezo1'] -= base_mean
# Uses scipy's low and high pass filters to create a bandwidth filter -- bandwidth is determined by low and high tau and are passed to the function
filtered_low = sig.lfilter([1-math.exp(-time_step/low_tau)], [1, -math.exp(-time_step/low_tau)], event_dict['fastDAQ']['Piezo1'], axis = 0)
filtered_both = sig.lfilter([math.exp(-time_step/high_tau),-math.exp(-time_step/high_tau)], [1, -math.exp(-time_step/high_tau)], filtered_low, axis = 0)
# Calculates the average and standard deviation of the filtered trace before bubble formation
filtered_sampling = filtered_both[:100000]
filtered_mean = np.mean(filtered_sampling, dtype = np.float64)
filtered_stdev = np.std(filtered_sampling, dtype = np.float64)
# Normalizes mean to zero
filtered_both -= filtered_mean
# Scales both the filtered and unfiltered traces by their respective standard deviations -- Y-axis is now in units of sigma
filtered_both = (filtered_both/filtered_stdev)
event_dict['fastDAQ']['Piezo1'] = (event_dict['fastDAQ']['Piezo1']/base_stdev)
# Declaration of loop variables
bubble = False
low_res_start = None
spike = False
index = 0
# This loop starts from the start of the trace and steps forward until it finds a region where the trace exceeeds a certain
# absolute value standard deviation threshold. If the trace remains above this threshold for a certain duration, it records
# the index where the trace first crossed the threshold as a bubble.
while (not bubble) and (index < time_zero):
value = abs(filtered_both[index])
# If the value is less than 2 sigma, there is no bubble
if value < 2 and low_res_start != None:
spike = False
low_res_start = None
# Else, a bubble start is labelled
elif value >= 2 and low_res_start == None:
low_res_start = index
spike = True
# If the bubble label persists, it is confirmed and the loop ends
if spike and (abs(event_dict['fastDAQ']['time'][index]-event_dict['fastDAQ']['time'][low_res_start]) > .0001):
bubble = True
index += 1
# Declaration of loops variables
index = low_res_start
high_res_start = None
found = False
# This loop starts from where the previous loop labelled the bubble formation and searches for the start of the bubble with
# finer resolution. It then steps BACKWARDS, searching for a point at which the trace has a standard deviation of essentially
# zero, and if it remains within that range for a certain duration, it stores that value as the t0 of the bubble formation.
while not found and (index > 0):
x1 = index
x2 = index - 100
y1 = filtered_both[x1]
y2 = filtered_both[x2]
slope = GetAbsSlope(y1, y2, x1, x2)
if (slope < .5) and (filtered_both[x2] < 3):
high_res_start = x2
found = True
index -= 1
if not found:
return np.float64(np.NaN), np.float64(np.NaN)
# Optional function argument for plotting tools
if show_figures_t0:
plt.plot(event_dict['fastDAQ']['time'], event_dict['fastDAQ']['Piezo1'], 'b-',event_dict['fastDAQ']['time'], filtered_both, 'r-', event_dict['fastDAQ']['time'][int(high_res_start)], 0, 'r^', markersize = 10.0)
plt.axis([-.2,.2,-100,100])
plt.show()
return high_res_start, event_dict['fastDAQ']['time'][high_res_start]
def FindLoudness(event_dict, bubble_t0_index, window_width_ms, offset_from_t0_ms, bin_edges, show_figures_loudness):
"""
"""
# Checks that the event dictionary was properly loaded and passed
if not event_dict['fastDAQ']['loaded']:
print "Failed to load fastDAQ dictionary, process terminated."
return np.float64(np.NaN), np.float64(np.NaN), np.float64(np.NaN)
if np.isnan(bubble_t0_index):
return np.float64(np.NaN), np.float64(np.NaN), np.float64(np.NaN)
# Reads and stores information determined by the DAQ
time_step = event_dict['fastDAQ']['caldata']['dt'][0]
# Converts function inputs from milliseconds to seconds
window_width_sec = window_width_ms*1e-3
offset_from_t0_sec = offset_from_t0_ms*1e-3
# Gets the indices of those times
window_width_index = int(window_width_sec/time_step)
offset_from_t0_index = int(offset_from_t0_sec/time_step)
# Generates an n-by-2 Ndarray, where n is the number of time windows (NOT IMPLEMENTED): axis 0 is the start of each window, axis 1 the end
times_array_sec = np.array([(event_dict['fastDAQ']['time'][bubble_t0_index-(2*offset_from_t0_index)-window_width_index],
event_dict['fastDAQ']['time'][bubble_t0_index-(2*offset_from_t0_index)]), (event_dict['fastDAQ']['time'][bubble_t0_index-offset_from_t0_index],
event_dict['fastDAQ']['time'][bubble_t0_index-offset_from_t0_index+window_width_index])], dtype=np.float64)
# Converts all the times in the times_array to milliseconds
times_array_ms = times_array_sec*1000
try:
# Performs a Fast Fourier Transform on the bubble and non-bubble parts of the trace, then calculates the power
fft_bubble_amp = np.fft.rfft(event_dict['fastDAQ']['Piezo1']
[bubble_t0_index-offset_from_t0_index:bubble_t0_index-offset_from_t0_index+window_width_index], axis=0)
fft_bubble_power = (abs(fft_bubble_amp))**2
fft_sample_amp = np.fft.rfft(event_dict['fastDAQ']['Piezo1']
[bubble_t0_index-(2*offset_from_t0_index)-window_width_index:bubble_t0_index-(2*offset_from_t0_index)], axis=0)
fft_sample_power = (abs(fft_sample_amp))**2
except IndexError:
print "Index error encountered with the time windows. Process Terminated."
return np.float64(np.NaN), np.float64(np.NaN), np.float64(np.NaN)
# Finds the df of the Fourier Transform
freq_step = 1/window_width_sec
# Uses the df to generate the range of Hertz which the FFT spans
freq_scale = np.linspace(0, freq_step*len(fft_bubble_power), num=len(fft_bubble_power))
# Creates an empty array to store the loudness of the bubble and non-bubble time windows
loudness_array = np.zeros((2, len(bin_edges)-1), dtype=np.float64)
# Finds the corresponding indices of the frequency bin edges and stores them in an array
bin_edges_indices = np.zeros(len(bin_edges), dtype=np.float64)
for ii in range(len(bin_edges)):
masked = freq_scale < bin_edges[ii]
index = np.nonzero(masked)[0]
try:
bin_edges_indices[ii] = index[-1]
except IndexError:
print "Index error encountered in finding bin edge indices. Process terminated."
return np.float64(np.NaN), np.float64(np.NaN), np.float64(np.NaN)
# Uses the bin edge indices to calculate the loudness of each frequency bin -- the loudness is the sum of all points times df squared
for ii in range(len(bin_edges_indices)-1):
bubble_loudness = np.sum((fft_bubble_power*(freq_step**2))[bin_edges_indices[ii]:bin_edges_indices[ii+1]], dtype=np.float64)
sample_loudness = np.sum((fft_sample_power*(freq_step**2))[bin_edges_indices[ii]:bin_edges_indices[ii+1]], dtype=np.float64)
loudness_array[0][ii] = sample_loudness
loudness_array[1][ii] = bubble_loudness
# Optional function argument for plotting tools
if show_figures_loudness:
plt.plot(freq_scale, fft_bubble_power*(freq_scale**2), 'b-', freq_scale, fft_sample_power*(freq_scale**2), 'r--')
plt.loglog()
plt.show()
return loudness_array, times_array_ms, bin_edges
def GetAbsSlope(x1, x2, y1, y2):
return abs((y1-y2)/(x1-x2))
| 47.779221
| 212
| 0.747305
|
68ae3e772e663412dddd5ebbb13fceb4e1750e2a
| 8,719
|
py
|
Python
|
utilities/autoware_camera_lidar_calibrator/nodes/cameracalibrator.py
|
alanjclark/autoware.ai
|
ba97edbbffb6f22e78912bf96400a59ef6a13daf
|
[
"Apache-2.0"
] | 2
|
2020-11-13T11:11:16.000Z
|
2022-03-09T20:24:54.000Z
|
utilities/autoware_camera_lidar_calibrator/nodes/cameracalibrator.py
|
alanjclark/autoware.ai
|
ba97edbbffb6f22e78912bf96400a59ef6a13daf
|
[
"Apache-2.0"
] | 40
|
2019-06-24T16:56:15.000Z
|
2022-02-28T13:41:58.000Z
|
utilities/autoware_camera_lidar_calibrator/nodes/cameracalibrator.py
|
alanjclark/autoware.ai
|
ba97edbbffb6f22e78912bf96400a59ef6a13daf
|
[
"Apache-2.0"
] | 8
|
2019-08-20T18:54:00.000Z
|
2022-02-09T13:54:41.000Z
|
#!/usr/bin/python3
#
# Copyright 2015-2019 Autoware Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# v1.0 Jacob Lambert 2018-03-05
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import cv2
import functools
import message_filters
import os
import rospy
from autoware_camera_calibration.camera_calibrator import OpenCVCalibrationNode
from autoware_camera_calibration.calibrator import ChessboardInfo, Patterns
from message_filters import ApproximateTimeSynchronizer
def main():
from optparse import OptionParser, OptionGroup
parser = OptionParser("%prog --size SIZE1 --square SQUARE1 [ --size SIZE2 --square SQUARE2 ]",
description=None)
parser.add_option("-c", "--camera_name",
type="string", default='autoware_camera_calibration',
help="name of the camera to appear in the calibration file")
parser.add_option("-o", "--output",
type="string", default="yaml",
help="type of output - 'yaml' or 'tar'")
parser.add_option("-d", "--detection",
type="string", default="cv2",
help="Chessboard detection algorithm, OpenCV2 or Matlab (python matlab engine) - 'cv2', 'matlab'")
group = OptionGroup(parser, "Chessboard Options",
"You must specify one or more chessboards as pairs of --size and --square options.")
group.add_option("-p", "--pattern",
type="string", default="chessboard",
help="calibration pattern to detect - 'chessboard', 'circles', 'acircles'")
group.add_option("-s", "--size",
action="append", default=[],
help="chessboard size as NxM, counting interior corners (e.g. a standard chessboard is 7x7)")
group.add_option("-q", "--square",
action="append", default=[],
help="chessboard square size in meters")
group.add_option("--min_samples",
type="int", default=40,
help="defines the minimum number of samples before allowing to calibrate regardless of the status")
parser.add_option_group(group)
group = OptionGroup(parser, "ROS Communication Options")
group.add_option("--approximate",
type="float", default=0.0,
help="allow specified slop (in seconds) when pairing images from unsynchronized stereo cameras")
group.add_option("--no-service-check",
action="store_false", dest="service_check", default=True,
help="disable check for set_camera_info services at startup")
parser.add_option_group(group)
group = OptionGroup(parser, "Calibration Optimizer Options")
group.add_option("--fix-principal-point",
action="store_true", default=False,
help="fix the principal point at the image center")
group.add_option("--fix-aspect-ratio",
action="store_true", default=False,
help="enforce focal lengths (fx, fy) are equal")
group.add_option("--zero-tangent-dist",
action="store_true", default=False,
help="set tangential distortion coefficients (p1, p2) to zero")
group.add_option("-k", "--k-coefficients",
type="int", default=3, metavar="NUM_COEFFS",
help="number of radial distortion coefficients to use (up to 6, default %default)")
group.add_option("--disable_calib_cb_fast_check", action='store_true', default=False,
help="uses the CALIB_CB_FAST_CHECK flag for findChessboardCorners")
parser.add_option_group(group)
options, args = parser.parse_args()
if len(options.size) != len(options.square):
parser.error("Number of size and square inputs must be the same!")
if options.detection == "cv2":
print('Using OpenCV 2 for chessboard corner detection')
elif options.detection == "matlab":
print('Using matlab for chessboard corner detection')
else:
print('Unrecognized detection method %s, defaulting to OpenCV 2' % options.detection)
options.detection = "cv2"
if options.output == "yaml":
print('Saving as autoware yaml')
elif options.output == "tar":
print('Saving as tar')
else:
print('Unrecognized output method %s, defaulting to Autoware Yaml' % options.output)
options.output = "yaml"
if not options.square:
options.square.append("0.108")
options.size.append("8x6")
boards = []
for (sz, sq) in zip(options.size, options.square):
size = tuple([int(c) for c in sz.split('x')])
boards.append(ChessboardInfo(size[0], size[1], float(sq)))
if options.approximate == 0.0:
sync = message_filters.TimeSynchronizer
else:
sync = functools.partial(ApproximateTimeSynchronizer, slop=options.approximate)
num_ks = options.k_coefficients
calib_flags = 0
if options.fix_principal_point:
calib_flags |= cv2.CALIB_FIX_PRINCIPAL_POINT
if options.fix_aspect_ratio:
calib_flags |= cv2.CALIB_FIX_ASPECT_RATIO
if options.zero_tangent_dist:
calib_flags |= cv2.CALIB_ZERO_TANGENT_DIST
if (num_ks > 3):
calib_flags |= cv2.CALIB_RATIONAL_MODEL
if (num_ks < 6):
calib_flags |= cv2.CALIB_FIX_K6
if (num_ks < 5):
calib_flags |= cv2.CALIB_FIX_K5
if (num_ks < 4):
calib_flags |= cv2.CALIB_FIX_K4
if (num_ks < 3):
calib_flags |= cv2.CALIB_FIX_K3
if (num_ks < 2):
calib_flags |= cv2.CALIB_FIX_K2
if (num_ks < 1):
calib_flags |= cv2.CALIB_FIX_K1
pattern = Patterns.Chessboard
if options.pattern == 'circles':
pattern = Patterns.Circles
elif options.pattern == 'acircles':
pattern = Patterns.ACircles
elif options.pattern != 'chessboard':
print('Unrecognized pattern %s, defaulting to chessboard' % options.pattern)
if options.disable_calib_cb_fast_check:
checkerboard_flags = 0
else:
checkerboard_flags = cv2.CALIB_CB_FAST_CHECK
rospy.init_node('cameracalibrator')
node = OpenCVCalibrationNode(boards, options.service_check, sync, calib_flags, pattern, options.camera_name,
options.detection, options.output, min_good_enough=options.min_samples, checkerboard_flags=checkerboard_flags)
rospy.spin()
if __name__ == "__main__":
try:
main()
except Exception as e:
import traceback
traceback.print_exc()
| 44.258883
| 143
| 0.667393
|
ac425cb78d9ca3ff1da3f63606e0d7fc2e31a43e
| 256
|
py
|
Python
|
source/test_filter.py
|
alex-turantsev/Misoi_kontr1
|
874a04b609eb7cfd5b7717ec4b7dd2321362855f
|
[
"MIT"
] | null | null | null |
source/test_filter.py
|
alex-turantsev/Misoi_kontr1
|
874a04b609eb7cfd5b7717ec4b7dd2321362855f
|
[
"MIT"
] | null | null | null |
source/test_filter.py
|
alex-turantsev/Misoi_kontr1
|
874a04b609eb7cfd5b7717ec4b7dd2321362855f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
from PIL import ImageTk, Image
from operator_filter import operator_filter
image_path = '/Users/alex/Projects/images.jpeg'
img = Image.open(image_path)
filt = operator_filter()
filt.apply_filter(image=img)
| 21.333333
| 47
| 0.753906
|
374977122ed3012ed6fc11f5ad952107f8ffb561
| 5,119
|
py
|
Python
|
test/functional/wallet-accounts.py
|
farsider350/AUTX-Core
|
6d00d1e027a5a6dffb3b0815a155e4515ced007b
|
[
"MIT"
] | null | null | null |
test/functional/wallet-accounts.py
|
farsider350/AUTX-Core
|
6d00d1e027a5a6dffb3b0815a155e4515ced007b
|
[
"MIT"
] | null | null | null |
test/functional/wallet-accounts.py
|
farsider350/AUTX-Core
|
6d00d1e027a5a6dffb3b0815a155e4515ced007b
|
[
"MIT"
] | 1
|
2021-01-03T02:35:54.000Z
|
2021-01-03T02:35:54.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test account RPCs.
RPCs tested are:
- getaccountaddress
- getaddressesbyaccount
- listaddressgroupings
- setaccount
- sendfrom (with account arguments)
- move (with account arguments)
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class WalletAccountsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-paytxfee=0.0001"]]
def run_test(self):
node = self.nodes[0]
# Check that there's no UTXO on any of the nodes
assert_equal(len(node.listunspent()), 0)
# Note each time we call generate, all generated coins go into
# the same address, so we call twice to get two addresses w/500 each
node.generate(1)
node.generate(101)
assert_equal(node.getbalance(), 1000)
# there should be 2 address groups
# each with 1 address with a balance of 500 autx
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 2)
# the addresses aren't linked now, but will be after we send to the
# common address
linked_addresses = set()
for address_group in address_groups:
assert_equal(len(address_group), 1)
assert_equal(len(address_group[0]), 2)
assert_equal(address_group[0][1], 500)
linked_addresses.add(address_group[0][0])
# send 500 from each address to a third address not in this wallet
# There's some fee that will come back to us when the miner reward
# matures.
common_address = "yd5KMREs3GLMe6mTJYr3YrH1juwNwrFCfB"
txid = node.sendmany(
fromaccount="",
amounts={common_address: 1000},
minconf=1,
addlocked=False,
comment="",
subtractfeefrom=[common_address],
)
tx_details = node.gettransaction(txid)
fee = -tx_details['details'][0]['fee']
# there should be 1 address group, with the previously
# unlinked addresses now linked (they both have 0 balance)
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 1)
assert_equal(len(address_groups[0]), 2)
assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses)
assert_equal([a[1] for a in address_groups[0]], [0, 0])
node.generate(1)
# we want to reset so that the "" account has what's expected.
# otherwise we're off by exactly the fee amount as that's mined
# and matures in the next 100 blocks
node.sendfrom("", common_address, fee)
accounts = ["a", "b", "c", "d", "e"]
amount_to_send = 1.0
account_addresses = dict()
for account in accounts:
address = node.getaccountaddress(account)
account_addresses[account] = address
node.getnewaddress(account)
assert_equal(node.getaccount(address), account)
assert(address in node.getaddressesbyaccount(account))
node.sendfrom("", address, amount_to_send)
node.generate(1)
for i in range(len(accounts)):
from_account = accounts[i]
to_account = accounts[(i+1) % len(accounts)]
to_address = account_addresses[to_account]
node.sendfrom(from_account, to_address, amount_to_send)
node.generate(1)
for account in accounts:
address = node.getaccountaddress(account)
assert(address != account_addresses[account])
assert_equal(node.getreceivedbyaccount(account), 2)
node.move(account, "", node.getbalance(account))
node.generate(101)
expected_account_balances = {"": 52000}
for account in accounts:
expected_account_balances[account] = 0
assert_equal(node.listaccounts(), expected_account_balances)
assert_equal(node.getbalance(""), 52000)
for account in accounts:
address = node.getaccountaddress("")
node.setaccount(address, account)
assert(address in node.getaddressesbyaccount(account))
assert(address not in node.getaddressesbyaccount(""))
for account in accounts:
addresses = []
for x in range(10):
addresses.append(node.getnewaddress())
multisig_address = node.addmultisigaddress(5, addresses, account)
node.sendfrom("", multisig_address, 50)
node.generate(101)
for account in accounts:
assert_equal(node.getbalance(account), 50)
if __name__ == '__main__':
WalletAccountsTest().main()
| 37.639706
| 78
| 0.622387
|
e398e9da1cb1fad29425dc4770d94d951187d450
| 7,287
|
py
|
Python
|
python/pyarrow/serialization.py
|
jbapple-cloudera/arrow
|
e4f27131cf0b70fc1d166fda0db25362e1b187d1
|
[
"Apache-2.0"
] | 3
|
2018-11-19T13:38:21.000Z
|
2019-08-28T14:56:37.000Z
|
python/pyarrow/serialization.py
|
jbapple-cloudera/arrow
|
e4f27131cf0b70fc1d166fda0db25362e1b187d1
|
[
"Apache-2.0"
] | null | null | null |
python/pyarrow/serialization.py
|
jbapple-cloudera/arrow
|
e4f27131cf0b70fc1d166fda0db25362e1b187d1
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict, defaultdict
import six
import sys
import numpy as np
from pyarrow.compat import builtin_pickle
from pyarrow.lib import (SerializationContext, _default_serialization_context,
py_buffer)
try:
import cloudpickle
except ImportError:
cloudpickle = builtin_pickle
# ----------------------------------------------------------------------
# Set up serialization for numpy with dtype object (primitive types are
# handled efficiently with Arrow's Tensor facilities, see
# python_to_arrow.cc)
def _serialize_numpy_array_list(obj):
if obj.dtype.str != '|O':
# Make the array c_contiguous if necessary so that we can call change
# the view.
if not obj.flags.c_contiguous:
obj = np.ascontiguousarray(obj)
return obj.view('uint8'), obj.dtype.str
else:
return obj.tolist(), obj.dtype.str
def _deserialize_numpy_array_list(data):
if data[1] != '|O':
assert data[0].dtype == np.uint8
return data[0].view(data[1])
else:
return np.array(data[0], dtype=np.dtype(data[1]))
def _pickle_to_buffer(x):
pickled = builtin_pickle.dumps(x, protocol=builtin_pickle.HIGHEST_PROTOCOL)
return py_buffer(pickled)
def _load_pickle_from_buffer(data):
as_memoryview = memoryview(data)
if six.PY2:
return builtin_pickle.loads(as_memoryview.tobytes())
else:
return builtin_pickle.loads(as_memoryview)
# ----------------------------------------------------------------------
# pandas-specific serialization matters
def _register_custom_pandas_handlers(context):
# ARROW-1784, faster path for pandas-only visibility
try:
import pandas as pd
except ImportError:
return
import pyarrow.pandas_compat as pdcompat
sparse_type_error_msg = (
'{0} serialization is not supported.\n'
'Note that {0} is planned to be deprecated '
'in pandas future releases.\n'
'See https://github.com/pandas-dev/pandas/issues/19239 '
'for more information.'
)
def _serialize_pandas_dataframe(obj):
if isinstance(obj, pd.SparseDataFrame):
raise NotImplementedError(
sparse_type_error_msg.format('SparseDataFrame')
)
return pdcompat.dataframe_to_serialized_dict(obj)
def _deserialize_pandas_dataframe(data):
return pdcompat.serialized_dict_to_dataframe(data)
def _serialize_pandas_series(obj):
if isinstance(obj, pd.SparseSeries):
raise NotImplementedError(
sparse_type_error_msg.format('SparseSeries')
)
return _serialize_pandas_dataframe(pd.DataFrame({obj.name: obj}))
def _deserialize_pandas_series(data):
deserialized = _deserialize_pandas_dataframe(data)
return deserialized[deserialized.columns[0]]
context.register_type(
pd.Series, 'pd.Series',
custom_serializer=_serialize_pandas_series,
custom_deserializer=_deserialize_pandas_series)
context.register_type(
pd.Index, 'pd.Index',
custom_serializer=_pickle_to_buffer,
custom_deserializer=_load_pickle_from_buffer)
context.register_type(
pd.DataFrame, 'pd.DataFrame',
custom_serializer=_serialize_pandas_dataframe,
custom_deserializer=_deserialize_pandas_dataframe)
def register_torch_serialization_handlers(serialization_context):
# ----------------------------------------------------------------------
# Set up serialization for pytorch tensors
try:
import torch
def _serialize_torch_tensor(obj):
return obj.detach().numpy()
def _deserialize_torch_tensor(data):
return torch.from_numpy(data)
for t in [torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor,
torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.Tensor]:
serialization_context.register_type(
t, "torch." + t.__name__,
custom_serializer=_serialize_torch_tensor,
custom_deserializer=_deserialize_torch_tensor)
except ImportError:
# no torch
pass
def register_default_serialization_handlers(serialization_context):
# ----------------------------------------------------------------------
# Set up serialization for primitive datatypes
# TODO(pcm): This is currently a workaround until arrow supports
# arbitrary precision integers. This is only called on long integers,
# see the associated case in the append method in python_to_arrow.cc
serialization_context.register_type(
int, "int",
custom_serializer=lambda obj: str(obj),
custom_deserializer=lambda data: int(data))
if (sys.version_info < (3, 0)):
serialization_context.register_type(
long, "long", # noqa: F821
custom_serializer=lambda obj: str(obj),
custom_deserializer=lambda data: long(data)) # noqa: F821
def _serialize_ordered_dict(obj):
return list(obj.keys()), list(obj.values())
def _deserialize_ordered_dict(data):
return OrderedDict(zip(data[0], data[1]))
serialization_context.register_type(
OrderedDict, "OrderedDict",
custom_serializer=_serialize_ordered_dict,
custom_deserializer=_deserialize_ordered_dict)
def _serialize_default_dict(obj):
return list(obj.keys()), list(obj.values()), obj.default_factory
def _deserialize_default_dict(data):
return defaultdict(data[2], zip(data[0], data[1]))
serialization_context.register_type(
defaultdict, "defaultdict",
custom_serializer=_serialize_default_dict,
custom_deserializer=_deserialize_default_dict)
serialization_context.register_type(
type(lambda: 0), "function",
pickle=True)
serialization_context.register_type(type, "type", pickle=True)
serialization_context.register_type(
np.ndarray, 'np.array',
custom_serializer=_serialize_numpy_array_list,
custom_deserializer=_deserialize_numpy_array_list)
_register_custom_pandas_handlers(serialization_context)
def default_serialization_context():
context = SerializationContext()
register_default_serialization_handlers(context)
return context
register_default_serialization_handlers(_default_serialization_context)
| 33.426606
| 79
| 0.678743
|
2833e7fa3fec7ba8cee5bc45c32b791d378f3bc4
| 11,508
|
py
|
Python
|
configurations/je_os_fixedaggr_relloc_filtered.py
|
ShuaiW/kaggle-heart
|
022997f27add953c74af2b371c67d9d86cbdccc3
|
[
"MIT"
] | 182
|
2016-03-15T01:51:29.000Z
|
2021-04-21T09:49:05.000Z
|
configurations/je_os_fixedaggr_relloc_filtered.py
|
weidezhang/kaggle-heart
|
022997f27add953c74af2b371c67d9d86cbdccc3
|
[
"MIT"
] | 1
|
2018-06-22T16:46:12.000Z
|
2018-06-22T21:08:09.000Z
|
configurations/je_os_fixedaggr_relloc_filtered.py
|
weidezhang/kaggle-heart
|
022997f27add953c74af2b371c67d9d86cbdccc3
|
[
"MIT"
] | 61
|
2016-03-15T00:58:28.000Z
|
2020-03-06T22:00:41.000Z
|
"""Single slice vgg with normalised scale.
"""
import functools
import lasagne as nn
import numpy as np
import theano
import theano.tensor as T
import data_loader
import deep_learning_layers
import image_transform
import layers
import preprocess
import postprocess
import objectives
import theano_printer
import updates
import utils
# Random params
rng = np.random
take_a_dump = False # dump a lot of data in a pkl-dump file. (for debugging)
dump_network_loaded_data = False # dump the outputs from the dataloader (for debugging)
# Memory usage scheme
caching = None
# Save and validation frequency
validate_every = 200
validate_train_set = True
save_every = 20
restart_from_save = False
dump_network_loaded_data = False
# Training (schedule) parameters
# - batch sizes
batch_size = 8
sunny_batch_size = 4
batches_per_chunk = 8
num_epochs_train = 400
# - learning rate and method
base_lr = 0.0001
learning_rate_schedule = {
0: base_lr,
9*num_epochs_train/10: base_lr/10,
19*num_epochs_train/20: base_lr/100,
}
momentum = 0.9
build_updates = updates.build_adam_updates
# Preprocessing stuff
cleaning_processes = [
preprocess.set_upside_up,]
cleaning_processes_post = [
functools.partial(preprocess.normalize_contrast_zmuv, z=2)]
augmentation_params = {
"rotation": (-180, 180),
"shear": (0, 0),
"translation": (-8, 8),
"flip_vert": (0, 1),
"roll_time": (0, 0),
"flip_time": (0, 0),
}
use_hough_roi = True
preprocess_train = functools.partial( # normscale_resize_and_augment has a bug
preprocess.preprocess_normscale,
normscale_resize_and_augment_function=functools.partial(
image_transform.normscale_resize_and_augment_2,
normalised_patch_size=(64,64)))
preprocess_validation = functools.partial(preprocess_train, augment=False)
preprocess_test = preprocess_train
sunny_preprocess_train = preprocess.sunny_preprocess_with_augmentation
sunny_preprocess_validation = preprocess.sunny_preprocess_validation
sunny_preprocess_test = preprocess.sunny_preprocess_validation
# Data generators
create_train_gen = data_loader.generate_train_batch
create_eval_valid_gen = functools.partial(data_loader.generate_validation_batch, set="validation")
create_eval_train_gen = functools.partial(data_loader.generate_validation_batch, set="train")
create_test_gen = functools.partial(data_loader.generate_test_batch, set=["validation", "test"])
def filter_samples(folders):
# don't use patients who don't have more than 6 slices
return [
folder for folder in folders
if data_loader.compute_nr_slices(folder) > 6]
# Input sizes
image_size = 64
nr_slices = 22
data_sizes = {
"sliced:data:sax": (batch_size, nr_slices, 30, image_size, image_size),
"sliced:data:sax:locations": (batch_size, nr_slices),
"sliced:data:sax:is_not_padded": (batch_size, nr_slices),
"sliced:data:randomslices": (batch_size, nr_slices, 30, image_size, image_size),
"sliced:data:singleslice:difference:middle": (batch_size, 29, image_size, image_size),
"sliced:data:singleslice:difference": (batch_size, 29, image_size, image_size),
"sliced:data:singleslice": (batch_size, 30, image_size, image_size),
"sliced:data:ax": (batch_size, 30, 15, image_size, image_size),
"sliced:data:shape": (batch_size, 2,),
"sunny": (sunny_batch_size, 1, image_size, image_size)
# TBC with the metadata
}
# Objective
l2_weight = 0.000
l2_weight_out = 0.000
def build_objective(interface_layers):
# l2 regu on certain layers
l2_penalty = nn.regularization.regularize_layer_params_weighted(
interface_layers["regularizable"], nn.regularization.l2)
# build objective
return objectives.KaggleObjective(interface_layers["outputs"], penalty=l2_penalty)
# Testing
postprocess = postprocess.postprocess
test_time_augmentations = 100 # More augmentations since a we only use single slices
tta_average_method = lambda x: np.cumsum(utils.norm_geometric_average(utils.cdf_to_pdf(x)))
# nonlinearity putting a lower bound on it's output
def lb_softplus(lb):
return lambda x: nn.nonlinearities.softplus(x) + lb
init = nn.init.Orthogonal()
rnn_layer = functools.partial(nn.layers.RecurrentLayer,
W_in_to_hid=init,
W_hid_to_hid=init,
b=nn.init.Constant(0.1),
nonlinearity=nn.nonlinearities.rectify,
hid_init=nn.init.Constant(0.),
backwards=False,
learn_init=True,
gradient_steps=-1,
grad_clipping=False,
unroll_scan=False,
precompute_input=False)
# Architecture
def build_model():
#################
# Regular model #
#################
input_size = data_sizes["sliced:data:sax"]
input_size_mask = data_sizes["sliced:data:sax:is_not_padded"]
input_size_locations = data_sizes["sliced:data:sax:locations"]
l0 = nn.layers.InputLayer(input_size)
lin_slice_mask = nn.layers.InputLayer(input_size_mask)
lin_slice_locations = nn.layers.InputLayer(input_size_locations)
# PREPROCESS SLICES SEPERATELY
# Convolutional layers and some dense layers are defined in a submodel
l0_slices = nn.layers.ReshapeLayer(l0, (-1, [2], [3], [4]))
relative_slice_locations = layers.RelativeLocationLayer(lin_slice_locations)
relative_slice_locations_slices = nn.layers.ReshapeLayer(relative_slice_locations, (-1, 1,))
# relloc_slices_repeated = nn.layers.ConcatLayer([relative_slice_locations_slices]*image_size, axis=2)
# relloc_slices_repeated = nn.layers.ConcatLayer([relloc_slices_repeated]*image_size, axis=3)
# relloc_slices = nn.layers.ReshapeLayer(relloc_slices_repeated, (-1, 1, image_size, image_size))
# l0_slices_enhanced = nn.layers.ConcatLayer([l0_slices, relloc_slices], axis=1)
l1a = nn.layers.dnn.Conv2DDNNLayer(l0_slices, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l1b = nn.layers.dnn.Conv2DDNNLayer(l1a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l1 = nn.layers.dnn.MaxPool2DDNNLayer(l1b, pool_size=(2,2), stride=(2,2))
l2a = nn.layers.dnn.Conv2DDNNLayer(l1, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l2b = nn.layers.dnn.Conv2DDNNLayer(l2a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l2 = nn.layers.dnn.MaxPool2DDNNLayer(l2b, pool_size=(2,2), stride=(2,2))
l3a = nn.layers.dnn.Conv2DDNNLayer(l2, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3b = nn.layers.dnn.Conv2DDNNLayer(l3a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3c = nn.layers.dnn.Conv2DDNNLayer(l3b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3 = nn.layers.dnn.MaxPool2DDNNLayer(l3c, pool_size=(2,2), stride=(2,2))
l4a = nn.layers.dnn.Conv2DDNNLayer(l3, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4b = nn.layers.dnn.Conv2DDNNLayer(l4a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4c = nn.layers.dnn.Conv2DDNNLayer(l4b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4 = nn.layers.dnn.MaxPool2DDNNLayer(l4c, pool_size=(2,2), stride=(2,2))
l5a = nn.layers.dnn.Conv2DDNNLayer(l4, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5b = nn.layers.dnn.Conv2DDNNLayer(l5a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5c = nn.layers.dnn.Conv2DDNNLayer(l5b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5 = nn.layers.dnn.MaxPool2DDNNLayer(l5c, pool_size=(2,2), stride=(2,2))
l5_flat = nn.layers.FlattenLayer(l5, 2)
l5_flat_enhanced = nn.layers.ConcatLayer([l5_flat, ], axis=1)
# Systole Dense layers
ldsys1 = nn.layers.DenseLayer(l5_flat_enhanced, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
ldsys1drop = nn.layers.dropout(ldsys1, p=0.5)
ldsys2 = nn.layers.DenseLayer(ldsys1drop, num_units=512, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
ldsys2drop = nn.layers.dropout(ldsys2, p=0.5)
l_sys_mu = nn.layers.DenseLayer(ldsys2drop, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(20.0), nonlinearity=None)
l_sys_sigma = nn.layers.DenseLayer(ldsys2drop, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(3.), nonlinearity=lb_softplus(.1))
# Diastole Dense layers
lddia1 = nn.layers.DenseLayer(l5_flat_enhanced, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
lddia1drop = nn.layers.dropout(lddia1, p=0.5)
lddia2 = nn.layers.DenseLayer(lddia1drop, num_units=512, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
lddia2drop = nn.layers.dropout(lddia2, p=0.5)
l_dia_mu = nn.layers.DenseLayer(lddia2drop, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(20.0), nonlinearity=None)
l_dia_sigma = nn.layers.DenseLayer(lddia2drop, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(3.), nonlinearity=lb_softplus(.1))
# AGGREGATE SLICES PER PATIENT
l_scaled_slice_locations = layers.TrainableScaleLayer(lin_slice_locations, scale=nn.init.Constant(0.1), trainable=False)
# Systole
l_pat_sys_ss_mu = nn.layers.ReshapeLayer(l_sys_mu, (-1, nr_slices))
l_pat_sys_ss_sigma = nn.layers.ReshapeLayer(l_sys_sigma, (-1, nr_slices))
l_pat_sys_aggr_mu_sigma = layers.JeroenLayer([l_pat_sys_ss_mu, l_pat_sys_ss_sigma, lin_slice_mask, l_scaled_slice_locations], rescale_input=1.)
l_systole = layers.MuSigmaErfLayer(l_pat_sys_aggr_mu_sigma)
# Diastole
l_pat_dia_ss_mu = nn.layers.ReshapeLayer(l_dia_mu, (-1, nr_slices))
l_pat_dia_ss_sigma = nn.layers.ReshapeLayer(l_dia_sigma, (-1, nr_slices))
l_pat_dia_aggr_mu_sigma = layers.JeroenLayer([l_pat_dia_ss_mu, l_pat_dia_ss_sigma, lin_slice_mask, l_scaled_slice_locations], rescale_input=1.)
l_diastole = layers.MuSigmaErfLayer(l_pat_dia_aggr_mu_sigma)
return {
"inputs":{
"sliced:data:sax": l0,
"sliced:data:sax:is_not_padded": lin_slice_mask,
"sliced:data:sax:locations": lin_slice_locations,
},
"outputs": {
"systole": l_systole,
"diastole": l_diastole,
},
"regularizable": {
ldsys1: l2_weight,
ldsys2: l2_weight,
l_sys_mu: l2_weight_out,
l_sys_sigma: l2_weight_out,
lddia1: l2_weight,
lddia2: l2_weight,
l_dia_mu: l2_weight_out,
l_dia_sigma: l2_weight_out,
},
}
| 44.261538
| 181
| 0.734359
|
5e9b120f14eeeb06dbec8778bfa8628498b0c451
| 6,220
|
py
|
Python
|
tests/python/gpu/test_kvstore_gpu.py
|
noob0/incubator-mxnet
|
5a8b873343495c2d54a6c005d006b58be3978cb6
|
[
"Apache-2.0"
] | 28
|
2018-02-02T05:00:16.000Z
|
2022-01-02T18:27:39.000Z
|
tests/python/gpu/test_kvstore_gpu.py
|
noob0/incubator-mxnet
|
5a8b873343495c2d54a6c005d006b58be3978cb6
|
[
"Apache-2.0"
] | 27
|
2017-07-04T17:45:51.000Z
|
2019-09-12T06:56:27.000Z
|
tests/python/gpu/test_kvstore_gpu.py
|
noob0/incubator-mxnet
|
5a8b873343495c2d54a6c005d006b58be3978cb6
|
[
"Apache-2.0"
] | 7
|
2015-11-20T18:09:30.000Z
|
2017-11-24T16:52:25.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
import sys
import os
import mxnet as mx
import numpy as np
import unittest
from mxnet.test_utils import assert_almost_equal, default_context
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import setup_module, with_seed, teardown
shape = (4, 4)
keys = [5, 7, 11]
str_keys = ['b', 'c', 'd']
class EnvManager:
def __init__(self, key, val):
self._key = key
self._next_val = val
self._prev_val = None
def __enter__(self):
try:
self._prev_val = os.environ[self._key]
except KeyError:
self._prev_val = ''
os.environ[self._key] = self._next_val
def __exit__(self, ptype, value, trace):
os.environ[self._key] = self._prev_val
def init_kv_with_str(stype='default', kv_type='local'):
"""init kv """
kv = mx.kv.create(kv_type)
# single
kv.init('a', mx.nd.zeros(shape, stype=stype))
# list
kv.init(str_keys, [mx.nd.zeros(shape=shape, stype=stype)] * len(keys))
return kv
# Test seed 89411477 (module seed 1829754103) resulted in a py3-gpu CI runner core dump.
# Not reproducible, so this test is back on random seeds.
@with_seed()
def test_rsp_push_pull():
def check_rsp_push_pull(kv_type, sparse_pull, is_push_cpu=True):
kv = init_kv_with_str('row_sparse', kv_type)
kv.init('e', mx.nd.ones(shape).tostype('row_sparse'))
push_ctxs = [mx.cpu(i) if is_push_cpu else mx.gpu(i) for i in range(2)]
kv.push('e', [mx.nd.ones(shape, ctx=context).tostype('row_sparse') for context in push_ctxs])
def check_rsp_pull(kv, count, ctxs, sparse_pull, is_same_rowid=False, use_slice=False):
num_rows = shape[0]
row_ids = []
all_row_ids = np.arange(num_rows)
vals = [mx.nd.sparse.zeros(shape=shape, ctx=ctxs[i], stype='row_sparse') for i in range(count)]
if is_same_rowid:
row_id = np.random.randint(num_rows, size=num_rows)
row_ids = [mx.nd.array(row_id)] * count
elif use_slice:
total_row_ids = mx.nd.array(np.random.randint(num_rows, size=count*num_rows))
row_ids = [total_row_ids[i*num_rows : (i+1)*num_rows] for i in range(count)]
else:
for i in range(count):
row_id = np.random.randint(num_rows, size=num_rows)
row_ids.append(mx.nd.array(row_id))
row_ids_to_pull = row_ids[0] if (len(row_ids) == 1 or is_same_rowid) else row_ids
vals_to_pull = vals[0] if len(vals) == 1 else vals
kv.row_sparse_pull('e', out=vals_to_pull, row_ids=row_ids_to_pull)
for val, row_id in zip(vals, row_ids):
retained = val.asnumpy()
excluded_row_ids = np.setdiff1d(all_row_ids, row_id.asnumpy())
for row in range(num_rows):
expected_val = np.zeros_like(retained[row])
expected_val += 0 if row in excluded_row_ids else 2
assert_almost_equal(retained[row], expected_val)
if sparse_pull is True:
kv.pull('e', out=vals_to_pull, ignore_sparse=False)
for val in vals:
retained = val.asnumpy()
expected_val = np.zeros_like(retained)
expected_val[:] = 2
assert_almost_equal(retained, expected_val)
check_rsp_pull(kv, 1, [mx.gpu(0)], sparse_pull)
check_rsp_pull(kv, 1, [mx.cpu(0)], sparse_pull)
check_rsp_pull(kv, 4, [mx.gpu(i//2) for i in range(4)], sparse_pull)
check_rsp_pull(kv, 4, [mx.gpu(i//2) for i in range(4)], sparse_pull, is_same_rowid=True)
check_rsp_pull(kv, 4, [mx.cpu(i) for i in range(4)], sparse_pull)
check_rsp_pull(kv, 4, [mx.cpu(i) for i in range(4)], sparse_pull, is_same_rowid=True)
check_rsp_pull(kv, 4, [mx.gpu(i//2) for i in range(4)], sparse_pull, use_slice=True)
check_rsp_pull(kv, 4, [mx.cpu(i) for i in range(4)], sparse_pull, use_slice=True)
envs = ["","1"]
key = "MXNET_KVSTORE_USETREE"
for val in envs:
with EnvManager(key, val):
if val is "1":
sparse_pull = False
else:
sparse_pull = True
check_rsp_push_pull('local', sparse_pull)
check_rsp_push_pull('device', sparse_pull)
check_rsp_push_pull('device', sparse_pull, is_push_cpu=False)
def test_row_sparse_pull_single_device():
kvstore = mx.kv.create('device')
copy = mx.nd.random_normal(shape=(4,4), ctx=mx.gpu(0))
grad = copy.tostype("row_sparse")
key = 0
kvstore.init(key, grad)
idx = grad.indices
kvstore.push(key, grad)
kvstore.row_sparse_pull(key, out=grad, row_ids=idx)
assert_almost_equal(grad.asnumpy(), copy.asnumpy())
def test_rsp_push_pull_large_rowid():
num_rows = 793470
val = mx.nd.ones((num_rows, 1)).tostype('row_sparse').copyto(mx.gpu())
kv = mx.kv.create('device')
kv.init('a', val)
out = mx.nd.zeros((num_rows,1), stype='row_sparse').copyto(mx.gpu())
kv.push('a', val)
kv.row_sparse_pull('a', out=out, row_ids=mx.nd.arange(0, num_rows, dtype='int64'))
assert(out.indices.shape[0] == num_rows)
if __name__ == '__main__':
import nose
nose.runmodule()
| 41.192053
| 107
| 0.637781
|
edc212c1cfa362ba4419a47aa787fb8635f04d45
| 10,165
|
py
|
Python
|
trainer.py
|
stefanosantaris/DMTKG
|
f5cba46b055e7c7aa3048ace0d24fce1a5300313
|
[
"Apache-2.0"
] | 1
|
2020-11-13T19:07:28.000Z
|
2020-11-13T19:07:28.000Z
|
trainer.py
|
stefanosantaris/DMTKG
|
f5cba46b055e7c7aa3048ace0d24fce1a5300313
|
[
"Apache-2.0"
] | 5
|
2020-07-27T10:41:47.000Z
|
2022-02-10T01:40:29.000Z
|
trainer.py
|
stefanosantaris/DMTKG
|
f5cba46b055e7c7aa3048ace0d24fce1a5300313
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
from utils.dataset.GraphLoader import GraphLoader
from utils.utils import sparse_to_tuple
import scipy.sparse as sps
import numpy as np
import random
import time
class Trainer():
def __init__(self, exp_params):
super(Trainer, self).__init__()
self.graph_loader = GraphLoader(exp_params['path'] + "/" + exp_params['extract_folder'] +"/")
def prepare_test_adj(self, input_graph, ground_truth_adj):
coords, values, shape = sparse_to_tuple(input_graph)
ground_truth_adj = (ground_truth_adj[:input_graph.shape[0], :input_graph.shape[1]]).todense()
for coord in coords:
ground_truth_adj[coord[0], coord[1]] = 0.
ground_truth_adj[coord[1], coord[0]] = 0.
return sps.triu(sps.csr_matrix(ground_truth_adj, dtype=float))
def normalize(self, adj):
adj_with_diag = adj + sps.identity(adj.shape[0], dtype=np.float32).tocsr()
rowsum = np.array(adj_with_diag.sum(1))
degree_mat_inv_sqrt = sps.diags(np.power(rowsum, -0.5).flatten())
adj_normalized = adj_with_diag.dot(degree_mat_inv_sqrt).transpose().dot(degree_mat_inv_sqrt).tocoo().astype(np.float32)
return adj_normalized
def construct_dataset(self, graph, window_size, negative_sample):
start_graph = max(0, graph - window_size + 1)
max_id = 0
for i in range(start_graph, graph + 1):
adj = self.graph_loader.read_adjacency(i, max_id)
max_id = adj.shape[0] - 1
train_adj_sps = []
total_train_edges = np.zeros((max_id + 1, max_id + 1))
for i in range(start_graph, graph + 1):
adj = self.graph_loader.read_adjacency(i, max_id)
tmp_train_adj_dense = adj.todense()
tmp_train_adj_dense = np.where(tmp_train_adj_dense > 0.2, tmp_train_adj_dense, 0)
tmp_train_adj_sparse = sps.csr_matrix(tmp_train_adj_dense)
coords, values, shape = sparse_to_tuple(tmp_train_adj_sparse)
for coord in coords:
total_train_edges[coord[0], coord[1]] = 1
train_adj_sps.append(tmp_train_adj_sparse)
# Construct a full matrix with ones to generate negative sample tuples
train_ns = np.ones_like(total_train_edges) - total_train_edges - sps.identity(total_train_edges.shape[0])
ns_coord, ns_values, ns_shape = sparse_to_tuple(train_ns)
train_adj_norm = []
features = []
train_adj_labels = []
train_adj_inds = []
features_tuples = sparse_to_tuple(sps.identity(adj.shape[0], dtype=np.float32, format='coo'))
for i, adj in enumerate(train_adj_sps):
adj_norm_coord, adj_norm_values, adj_norm_shape = sparse_to_tuple(self.normalize(adj))
train_adj_norm.append(tf.SparseTensor(indices=adj_norm_coord,
values=np.array(adj_norm_values, dtype='float32'),
dense_shape=[adj_norm_shape[0], adj_norm_shape[1]]))
features.append(tf.SparseTensor(indices=features_tuples[0], values=features_tuples[1],
dense_shape=[features_tuples[2][0], features_tuples[2][1]]))
tmp_train_adj_dense = adj.todense()
train_coord, train_values, train_shape = sparse_to_tuple(adj)
tmp_train_adj_ind = np.zeros_like(tmp_train_adj_dense)
sequence = [i for i in range(len(ns_coord))]
random_coords = set(random.sample(sequence, negative_sample * len(train_coord)))
for coord in train_coord:
tmp_train_adj_ind[coord[0], coord[1]] = 1
for coord in random_coords:
tmp_train_adj_ind[ns_coord[coord][0], ns_coord[coord][1]] = 1
nnz_ind = np.nonzero(tmp_train_adj_ind)
tmp_train_label_val = tmp_train_adj_dense[nnz_ind]
train_adj_label_tensor = tf.convert_to_tensor(tmp_train_label_val, dtype=tf.float32)
train_adj_labels.append(train_adj_label_tensor)
ind_list = []
for i in range(len(nnz_ind[0])):
ind_list.append([nnz_ind[0][i], nnz_ind[1][i]])
train_adj_inds.append(tf.convert_to_tensor(ind_list, dtype=tf.int32))
test_adj_dense = self.prepare_test_adj(sps.csr_matrix(total_train_edges), self.graph_loader.read_adjacency(graph + 1, max_id)).todense()
test_adj_high = np.where(test_adj_dense > 0.2, test_adj_dense, 0)
test_adj_ind = np.where(test_adj_high > 0., 1, 0)
nnz_ind = np.nonzero(test_adj_ind)
ind_list = []
for i in range(len(nnz_ind[0])):
ind_list.append([nnz_ind[0][i], nnz_ind[1][i]])
test_adj = tf.convert_to_tensor(test_adj_high[nnz_ind], dtype=tf.float32)
test_adj_ind = tf.convert_to_tensor(ind_list, dtype=tf.int32)
return train_adj_norm, train_adj_labels, train_adj_inds, features, test_adj, test_adj_ind
def count_parameters(self,model):
return np.sum([np.prod(v.get_shape().as_list()) for v in model.trainable_variables])
def get_edge_embeddings(self, embeddings, indices):
src_embeddings = tf.gather(embeddings, indices[:,0])
dst_embeddings = tf.gather(embeddings, indices[:,1])
return tf.multiply(src_embeddings, dst_embeddings)
def evaluate_model(self, emb_size, train_embeddings, train_values, test_embeddings, test_values):
evaluation_model = tf.keras.models.Sequential([
tf.keras.layers.Dense(emb_size, activation=tf.nn.relu),
tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)])
evaluation_model.compile(loss=tf.keras.losses.MSE, optimizer='adam')
evaluation_model.fit(train_embeddings, train_values,
epochs=10, verbose=0, batch_size=512)
test_res = evaluation_model(test_embeddings)
m = tf.keras.metrics.RootMeanSquaredError()
m.update_state(test_values, test_res)
rmse_score = m.result().numpy()
m = tf.keras.metrics.MeanAbsoluteError()
m.update_state(test_values, test_res)
mae_score = m.result().numpy()
return mae_score, rmse_score
def train_model(self, args):
num_exp = args.num_exp
start_graph = args.start_graph
end_graph = args.end_graph
dropout = args.dropout
negative_sample = args.ns
emb = args.emb
window_size = args.window
learning_rate = args.learning_rate
results = {}
print("Start training")
for graph in range(start_graph, end_graph + 1):
results[graph] = {'num_params': 0, 'mae': 0., 'rmse': 0.}
mae = []
rmse = []
number_of_params = []
print("Construct Dataset")
train_adj_norm, train_adj_label, train_adj_ind, features, test_adj, test_adj_ind = self.construct_dataset(graph, window_size, negative_sample)
print("Start experimentation")
for i in range(num_exp):
print("Experiment {} for GRAPH {}".format(i, graph))
device = "/GPU:0"
if args.cuda == 0:
device = "/CPU:0"
with tf.device(device):
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
model = DeepGraph(eigvecs=emb)
for epoch in range(100):
with tf.GradientTape() as tape:
z, z_mean, z_std, reconstruction = model(features, train_adj_norm)
total_loss = 0
previous_kls = []
for k in range(len(z)):
reconstruct_val = tf.gather_nd(reconstruction[k], train_adj_ind[k])
m = tf.keras.metrics.RootMeanSquaredError()
m.update_state(reconstruct_val, train_adj_label[k])
reconstruction_loss = m.result().numpy()
# KL Divergence
kl = (0.5 / train_adj_norm[k].shape[0]) * tf.reduce_mean(
tf.reduce_sum(1 + 2 * z_std[k] - tf.square(z_mean[k]) - tf.square(tf.exp(z_std[k])), 1))
previous_kls.append(kl)
final_kl = tf.reduce_mean(previous_kls[-len(train_adj_norm):])
total_loss += reconstruction_loss - final_kl
grads = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
number_of_params.append(self.count_parameters(model))
z, z_mean,z_std, reconstruction = model(features, train_adj_norm)
train_edge_embeddings = tf.convert_to_tensor(self.get_edge_embeddings(z[-1], train_adj_ind[-1]), dtype=tf.float32)
train_values = tf.reshape(train_adj_label[-1],[train_adj_label[-1].shape[1], 1])
test_edge_embeddings = tf.convert_to_tensor(self.get_edge_embeddings(z[-1], test_adj_ind), dtype=tf.float32)
test_values = test_adj
mae_score, rmse_score = self.evaluate_model(emb, train_edge_embeddings, train_values, test_edge_embeddings, test_values)
mae.append(mae_score)
rmse.append(rmse_score)
tf.keras.backend.clear_session()
del train_adj_norm, train_adj_label, train_adj_ind, features, test_adj, test_adj_ind
results[graph]['num_params'] = np.mean(number_of_params)
results[graph]['mae'] = np.mean(mae)
results[graph]['rmse'] = np.mean(rmse)
print(
"Graph {} : N_PARAMS {} : MAE {} : RMSE {}".format(
graph,
results[graph]['num_params'],
results[graph]['mae'],
results[graph]['rmse']
))
return results
| 46.415525
| 154
| 0.605509
|
d0ed864e9bec6eb7da7982919bac786ce3b6ece9
| 781
|
py
|
Python
|
factory/dataset_factory.py
|
LZX-0201/L3-Net-
|
fdb8398e7ff87a0a148f82f320cab391c2c1ff8d
|
[
"MIT"
] | 7
|
2021-11-19T13:16:25.000Z
|
2022-02-11T03:24:30.000Z
|
factory/dataset_factory.py
|
LZX-0201/L3-Net-
|
fdb8398e7ff87a0a148f82f320cab391c2c1ff8d
|
[
"MIT"
] | null | null | null |
factory/dataset_factory.py
|
LZX-0201/L3-Net-
|
fdb8398e7ff87a0a148f82f320cab391c2c1ff8d
|
[
"MIT"
] | 1
|
2022-01-02T08:32:13.000Z
|
2022-01-02T08:32:13.000Z
|
from dataset import *
class DatasetFactory:
singleton_dataset = None
def __init__(self):
pass
@staticmethod
def get_dataset(data_config):
class_name = data_config['dataset_class']
all_classes = DatasetBase.__subclasses__()
for cls in all_classes:
if cls.__name__ == class_name:
return cls(data_config['config_file']['expanded'])
raise TypeError(f'no class named \'{class_name}\' found in dataset folder')
@classmethod
def get_singleton_dataset(cls, data_config=None):
if data_config is None:
return cls.singleton_dataset
if cls.singleton_dataset is None:
cls.singleton_dataset = cls.get_dataset(data_config)
return cls.singleton_dataset
| 31.24
| 83
| 0.663252
|
99d34052319eb0b9f7d7a9f8b0e04c6e8623cae8
| 7,676
|
py
|
Python
|
src/demo/fast_abs_rl-master/decoding.py
|
nilax97/nlp_project
|
b366060728342fc177a24a2ba8db2d97e860b959
|
[
"MIT"
] | 8
|
2020-04-04T06:15:43.000Z
|
2021-11-17T11:19:48.000Z
|
src/demo/fast_abs_rl-master/decoding.py
|
nilax97/nlp_project
|
b366060728342fc177a24a2ba8db2d97e860b959
|
[
"MIT"
] | 3
|
2021-06-08T21:05:01.000Z
|
2022-03-12T00:19:12.000Z
|
src/demo/fast_abs_rl-master/decoding.py
|
nilax97/nlp_project
|
b366060728342fc177a24a2ba8db2d97e860b959
|
[
"MIT"
] | null | null | null |
""" decoding utilities"""
import json
import re
import os
from os.path import join
import pickle as pkl
from itertools import starmap
from cytoolz import curry
import torch
from utils import PAD, UNK, START, END
from model.copy_summ import CopySumm
from model.extract import ExtractSumm, PtrExtractSumm
from model.rl import ActorCritic
from data.batcher import conver2id, pad_batch_tensorize
from data.data import CnnDmDataset
try:
DATASET_DIR = os.environ['DATA']
except KeyError:
print('please use environment variable to specify data directories')
class DecodeDataset(CnnDmDataset):
""" get the article sentences only (for decoding use)"""
def __init__(self, split):
assert split in ['val', 'test']
super().__init__(split, DATASET_DIR)
def __getitem__(self, i):
js_data = super().__getitem__(i)
art_sents = js_data['article']
return art_sents
def make_html_safe(s):
"""Rouge use html, has to make output html safe"""
return s.replace("<", "<").replace(">", ">")
def load_best_ckpt(model_dir, reverse=False):
""" reverse=False->loss, reverse=True->reward/score"""
ckpts = os.listdir(join(model_dir, 'ckpt'))
ckpt_matcher = re.compile('^ckpt-.*-[0-9]*')
ckpts = sorted([c for c in ckpts if ckpt_matcher.match(c)],
key=lambda c: float(c.split('-')[1]), reverse=reverse)
print('loading checkpoint {}...'.format(ckpts[0]))
ckpt = torch.load(
join(model_dir, 'ckpt/{}'.format(ckpts[0])), map_location='cpu'
)['state_dict']
return ckpt
class Abstractor(object):
def __init__(self, abs_dir, max_len=30, cuda=True):
abs_meta = json.load(open(join(abs_dir, 'meta.json')))
assert abs_meta['net'] == 'base_abstractor'
abs_args = abs_meta['net_args']
abs_ckpt = load_best_ckpt(abs_dir)
word2id = pkl.load(open(join(abs_dir, 'vocab.pkl'), 'rb'))
abstractor = CopySumm(**abs_args)
abstractor.load_state_dict(abs_ckpt)
self._device = torch.device('cuda' if cuda else 'cpu')
self._net = abstractor.to(self._device)
self._word2id = word2id
self._id2word = {i: w for w, i in word2id.items()}
self._max_len = max_len
def _prepro(self, raw_article_sents):
ext_word2id = dict(self._word2id)
ext_id2word = dict(self._id2word)
for raw_words in raw_article_sents:
for w in raw_words:
if not w in ext_word2id:
ext_word2id[w] = len(ext_word2id)
ext_id2word[len(ext_id2word)] = w
articles = conver2id(UNK, self._word2id, raw_article_sents)
art_lens = [len(art) for art in articles]
article = pad_batch_tensorize(articles, PAD, cuda=False
).to(self._device)
extend_arts = conver2id(UNK, ext_word2id, raw_article_sents)
extend_art = pad_batch_tensorize(extend_arts, PAD, cuda=False
).to(self._device)
extend_vsize = len(ext_word2id)
dec_args = (article, art_lens, extend_art, extend_vsize,
START, END, UNK, self._max_len)
return dec_args, ext_id2word
def __call__(self, raw_article_sents):
self._net.eval()
dec_args, id2word = self._prepro(raw_article_sents)
decs, attns = self._net.batch_decode(*dec_args)
def argmax(arr, keys):
return arr[max(range(len(arr)), key=lambda i: keys[i].item())]
dec_sents = []
for i, raw_words in enumerate(raw_article_sents):
dec = []
for id_, attn in zip(decs, attns):
if id_[i] == END:
break
elif id_[i] == UNK:
dec.append(argmax(raw_words, attn[i]))
else:
dec.append(id2word[id_[i].item()])
dec_sents.append(dec)
return dec_sents
class BeamAbstractor(Abstractor):
def __call__(self, raw_article_sents, beam_size=5, diverse=1.0):
self._net.eval()
dec_args, id2word = self._prepro(raw_article_sents)
dec_args = (*dec_args, beam_size, diverse)
all_beams = self._net.batched_beamsearch(*dec_args)
all_beams = list(starmap(_process_beam(id2word),
zip(all_beams, raw_article_sents)))
return all_beams
@curry
def _process_beam(id2word, beam, art_sent):
def process_hyp(hyp):
seq = []
for i, attn in zip(hyp.sequence[1:], hyp.attns[:-1]):
if i == UNK:
copy_word = art_sent[max(range(len(art_sent)),
key=lambda j: attn[j].item())]
seq.append(copy_word)
else:
seq.append(id2word[i])
hyp.sequence = seq
del hyp.hists
del hyp.attns
return hyp
return list(map(process_hyp, beam))
class Extractor(object):
def __init__(self, ext_dir, max_ext=5, cuda=True):
ext_meta = json.load(open(join(ext_dir, 'meta.json')))
if ext_meta['net'] == 'ml_ff_extractor':
ext_cls = ExtractSumm
elif ext_meta['net'] == 'ml_rnn_extractor':
ext_cls = PtrExtractSumm
else:
raise ValueError()
ext_ckpt = load_best_ckpt(ext_dir)
ext_args = ext_meta['net_args']
extractor = ext_cls(**ext_args)
extractor.load_state_dict(ext_ckpt)
word2id = pkl.load(open(join(ext_dir, 'vocab.pkl'), 'rb'))
self._device = torch.device('cuda' if cuda else 'cpu')
self._net = extractor.to(self._device)
self._word2id = word2id
self._id2word = {i: w for w, i in word2id.items()}
self._max_ext = max_ext
def __call__(self, raw_article_sents):
self._net.eval()
n_art = len(raw_article_sents)
articles = conver2id(UNK, self._word2id, raw_article_sents)
article = pad_batch_tensorize(articles, PAD, cuda=False
).to(self._device)
indices = self._net.extract([article], k=min(n_art, self._max_ext))
return indices
class ArticleBatcher(object):
def __init__(self, word2id, cuda=True):
self._device = torch.device('cuda' if cuda else 'cpu')
self._word2id = word2id
self._device = torch.device('cuda' if cuda else 'cpu')
def __call__(self, raw_article_sents):
articles = conver2id(UNK, self._word2id, raw_article_sents)
article = pad_batch_tensorize(articles, PAD, cuda=False
).to(self._device)
return article
class RLExtractor(object):
def __init__(self, ext_dir, cuda=True):
ext_meta = json.load(open(join(ext_dir, 'meta.json')))
assert ext_meta['net'] == 'rnn-ext_abs_rl'
ext_args = ext_meta['net_args']['extractor']['net_args']
word2id = pkl.load(open(join(ext_dir, 'agent_vocab.pkl'), 'rb'))
extractor = PtrExtractSumm(**ext_args)
agent = ActorCritic(extractor._sent_enc,
extractor._art_enc,
extractor._extractor,
ArticleBatcher(word2id, cuda))
ext_ckpt = load_best_ckpt(ext_dir, reverse=True)
agent.load_state_dict(ext_ckpt)
self._device = torch.device('cuda' if cuda else 'cpu')
self._net = agent.to(self._device)
self._word2id = word2id
self._id2word = {i: w for w, i in word2id.items()}
def __call__(self, raw_article_sents):
self._net.eval()
indices = self._net(raw_article_sents)
return indices
| 37.812808
| 75
| 0.607217
|
685d7709366660be79d98f26d27411b98b337119
| 1,642
|
py
|
Python
|
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_05_01/models/virtual_network_rule_py3.py
|
pjquirk/azure-sdk-for-python
|
cbf02ec4f177b96eae1dbbba87c34c2c93880150
|
[
"MIT"
] | null | null | null |
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_05_01/models/virtual_network_rule_py3.py
|
pjquirk/azure-sdk-for-python
|
cbf02ec4f177b96eae1dbbba87c34c2c93880150
|
[
"MIT"
] | null | null | null |
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_05_01/models/virtual_network_rule_py3.py
|
pjquirk/azure-sdk-for-python
|
cbf02ec4f177b96eae1dbbba87c34c2c93880150
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualNetworkRule(Model):
"""Virtual network rule.
All required parameters must be populated in order to send to Azure.
:param action: The action of virtual network rule. Possible values
include: 'Allow'. Default value: "Allow" .
:type action: str or
~azure.mgmt.containerregistry.v2019_05_01.models.Action
:param virtual_network_resource_id: Required. Resource ID of a subnet, for
example:
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}.
:type virtual_network_resource_id: str
"""
_validation = {
'virtual_network_resource_id': {'required': True},
}
_attribute_map = {
'action': {'key': 'action', 'type': 'str'},
'virtual_network_resource_id': {'key': 'id', 'type': 'str'},
}
def __init__(self, *, virtual_network_resource_id: str, action="Allow", **kwargs) -> None:
super(VirtualNetworkRule, self).__init__(**kwargs)
self.action = action
self.virtual_network_resource_id = virtual_network_resource_id
| 38.186047
| 148
| 0.646163
|
145ff7cd59bf75144b6b899ecb7383e46371273c
| 133
|
py
|
Python
|
GeneticSimPython/GeneticSimPython/Genetic/constants.py
|
Scimson/GeneticSim
|
7175e0ead69185bcf3221a91b5bac40b4cc42495
|
[
"Apache-2.0"
] | null | null | null |
GeneticSimPython/GeneticSimPython/Genetic/constants.py
|
Scimson/GeneticSim
|
7175e0ead69185bcf3221a91b5bac40b4cc42495
|
[
"Apache-2.0"
] | null | null | null |
GeneticSimPython/GeneticSimPython/Genetic/constants.py
|
Scimson/GeneticSim
|
7175e0ead69185bcf3221a91b5bac40b4cc42495
|
[
"Apache-2.0"
] | null | null | null |
INITIAL_GENE_VALUE_LIMIT = 1
MUTATION_PROBABILITY = 0.3
MUTATION_SIZE = 0.2
GENERATION_SIZE = 8
VERBOSE = False
DECIMAL_PRECISION = 3
| 22.166667
| 28
| 0.81203
|
c3a5468296dd87b66588816b6535751cd40807c6
| 23,881
|
py
|
Python
|
test/functional/test_runner.py
|
purplefox81/bitcoin-testnet-for-fun
|
c3fa891b93e02901ce6b4ee1b10e8a3cfa9c151e
|
[
"MIT"
] | 20
|
2019-04-03T06:30:39.000Z
|
2019-11-07T08:57:50.000Z
|
test/functional/test_runner.py
|
purplefox81/bitcoin-testnet-for-fun
|
c3fa891b93e02901ce6b4ee1b10e8a3cfa9c151e
|
[
"MIT"
] | null | null | null |
test/functional/test_runner.py
|
purplefox81/bitcoin-testnet-for-fun
|
c3fa891b93e02901ce6b4ee1b10e8a3cfa9c151e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
# 20 minutes represented in seconds
TRAVIS_TIMEOUT_DURATION = 20 * 60
BASE_SCRIPTS = [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet_hd.py',
'wallet_backup.py',
# vv Tests less than 5m vv
'feature_block.py',
'rpc_fundrawtransaction.py',
'p2p_compactblocks.py',
'feature_segwit.py',
# vv Tests less than 2m vv
'wallet_basic.py',
'wallet_labels.py',
'p2p_segwit.py',
'wallet_dump.py',
'wallet_listtransactions.py',
# vv Tests less than 60s vv
'p2p_sendheaders.py',
'wallet_zapwallettxes.py',
'wallet_importmulti.py',
'mempool_limit.py',
'rpc_txoutproof.py',
'wallet_listreceivedby.py',
'wallet_abandonconflict.py',
'feature_csv_activation.py',
'rpc_rawtransaction.py',
'wallet_address_types.py',
'feature_reindex.py',
# vv Tests less than 30s vv
'wallet_keypool_topup.py',
'interface_zmq.py',
'interface_bitcoin_cli.py',
'mempool_resurrect.py',
'wallet_txn_doublespend.py --mineblock',
'wallet_txn_clone.py',
'wallet_txn_clone.py --segwit',
'rpc_getchaintips.py',
'interface_rest.py',
'mempool_spend_coinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'wallet_multiwallet.py',
'wallet_multiwallet.py --usecli',
'interface_http.py',
'rpc_users.py',
'feature_proxy.py',
'rpc_signrawtransaction.py',
'p2p_disconnect_ban.py',
'rpc_decodescript.py',
'rpc_blockchain.py',
'rpc_deprecated.py',
'wallet_disable.py',
'rpc_net.py',
'wallet_keypool.py',
'p2p_mempool.py',
'mining_prioritisetransaction.py',
'p2p_invalid_block.py',
'p2p_invalid_tx.py',
'feature_versionbits_warning.py',
'rpc_preciousblock.py',
'wallet_importprunedfunds.py',
'rpc_zmq.py',
'rpc_signmessage.py',
'feature_nulldummy.py',
'mempool_accept.py',
'wallet_import_rescan.py',
'rpc_bind.py --ipv4',
'rpc_bind.py --ipv6',
'rpc_bind.py --nonloopback',
'mining_basic.py',
'wallet_bumpfee.py',
'rpc_named_arguments.py',
'wallet_listsinceblock.py',
'p2p_leak.py',
'wallet_encryption.py',
'feature_dersig.py',
'feature_cltv.py',
'rpc_uptime.py',
'wallet_resendwallettransactions.py',
'wallet_fallbackfee.py',
'feature_minchainwork.py',
'rpc_getblockstats.py',
'p2p_fingerprint.py',
'feature_uacomment.py',
'p2p_unrequested_blocks.py',
'feature_includeconf.py',
'feature_logging.py',
'p2p_node_network_limited.py',
'feature_blocksdir.py',
'feature_config_args.py',
'feature_help.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_pruning.py',
# vv Tests less than 20m vv
'feature_fee_estimation.py',
# vv Tests less than 5m vv
'feature_maxuploadtarget.py',
'mempool_packages.py',
'feature_dbcrash.py',
# vv Tests less than 2m vv
'feature_bip68_sequence.py',
'mining_getblocktemplate_longpoll.py',
'p2p_timeouts.py',
# vv Tests less than 60s vv
'p2p_feefilter.py',
# vv Tests less than 30s vv
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py',
'wallet_txn_clone.py --mineblock',
'feature_notifications.py',
'rpc_invalidateblock.py',
'feature_rbf.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
parser.add_argument('--failfast', action='store_true', help='stop execution after the first test failure')
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile, encoding="utf8"))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/bitcoin_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print("No functional tests to run. Wallet, utils, and bitcoind must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
test_list = []
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [re.sub("\.py$", "", test) + ".py" for test in tests]
for test in tests:
if test in ALL_SCRIPTS:
test_list.append(test)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test))
elif args.extended:
# Include extended tests
test_list += ALL_SCRIPTS
else:
# Run base tests only
test_list += BASE_SCRIPTS
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
exclude_tests = [re.sub("\.py$", "", test) + ".py" for test in args.exclude.split(',')]
for exclude_test in exclude_tests:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([sys.executable, os.path.join(config["environment"]["SRCDIR"], 'test', 'functional', test_list[0].split()[0]), '-h'])
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(
test_list,
config["environment"]["SRCDIR"],
config["environment"]["BUILDDIR"],
tmpdir,
jobs=args.jobs,
enable_coverage=args.coverage,
args=passon_args,
combined_logs_len=args.combinedlogslen,
failfast=args.failfast,
)
def run_tests(test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False):
args = args or []
# Warn if bitcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "bitcoind"]) is not None:
print("%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
tests_dir = src_dir + '/test/functional/'
flags = ['--cachedir={}'.format(cache_dir)] + args
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
try:
subprocess.check_output([sys.executable, tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
#Run Tests
job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
start_time = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
for _ in range(len(test_list)):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
if test_result.status == "Passed":
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0]))
else:
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs, _ = subprocess.Popen([sys.executable, os.path.join(tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
if failfast:
logging.debug("Early exiting after test failure")
break
print_results(test_results, max_len_name, (int(time.time() - start_time)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
# This will be a no-op unless failfast is True in which case there may be dangling
# processes which need to be killed.
job_queue.kill_and_join()
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
results += RED[1]
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
if not all_passed:
results += RED[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
test = self.test_list.pop(0)
portseed = len(self.test_list)
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = test.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((test,
time.time(),
subprocess.Popen([sys.executable, self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for job in self.jobs:
(name, start_time, proc, testdir, log_out, log_err) = job
if os.getenv('TRAVIS') == 'true' and int(time.time() - start_time) > TRAVIS_TIMEOUT_DURATION:
# In travis, timeout individual tests (to stop tests hanging and not providing useful output).
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(job)
return TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr
print('.', end='', flush=True)
def kill_and_join(self):
"""Send SIGKILL to all jobs and block until all have ended."""
procs = [i[2] for i in self.jobs]
for proc in procs:
proc.kill()
for proc in procs:
proc.wait()
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def sort_key(self):
if self.status == "Passed":
return 0, self.name.lower()
elif self.status == "Failed":
return 2, self.name.lower()
elif self.status == "Skipped":
return 1, self.name.lower()
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that test scripts start with one of the allowed name prefixes."""
good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if bad_script_names:
print("%sERROR:%s %d tests not meeting naming conventions:" % (BOLD[1], BOLD[0], len(bad_script_names)))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
raise AssertionError("Some tests are not following naming convention!")
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([test_file for test_file in os.listdir(script_dir) if test_file.endswith(".py")])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if os.getenv('TRAVIS') == 'true':
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % command) for command in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r', encoding="utf8") as coverage_ref_file:
all_cmds.update([line.strip() for line in coverage_ref_file.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r', encoding="utf8") as coverage_file:
covered_cmds.update([line.strip() for line in coverage_file.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
| 39.085106
| 195
| 0.634102
|
b79b99c01655b3acb52a59fe5a1e033a5054d447
| 615
|
py
|
Python
|
todo_app/forms.py
|
edarakchiev/todo_project
|
d546c959b999d2dff3a366087c05ff691e95bb6b
|
[
"MIT"
] | null | null | null |
todo_app/forms.py
|
edarakchiev/todo_project
|
d546c959b999d2dff3a366087c05ff691e95bb6b
|
[
"MIT"
] | null | null | null |
todo_app/forms.py
|
edarakchiev/todo_project
|
d546c959b999d2dff3a366087c05ff691e95bb6b
|
[
"MIT"
] | null | null | null |
from django import forms
class CreateTodoForm(forms.Form):
text = forms.CharField(
max_length=30,
)
description = forms.CharField(
widget=forms.Textarea(
attrs={
'rows': 3,
'cols': 40,
}
),
)
is_done = forms.BooleanField(
required=False
)
bots_catcher = forms.CharField(
widget=forms.HiddenInput(),
required=False
)
def clean_bots_catcher(self):
value = self.cleaned_data['bots_catcher']
if value:
raise forms.ValidationError('You are a bot')
| 21.206897
| 56
| 0.547967
|
8814531d79cb690c3b678aa71b09d097f5dea4cf
| 1,575
|
py
|
Python
|
shops/fashionnova.py
|
ikp4success/shopasource
|
9a9ed5c58a8b37b6ff169b45f7fdfcb44809fd88
|
[
"Apache-2.0"
] | 3
|
2019-12-04T07:08:55.000Z
|
2020-12-08T01:38:46.000Z
|
shops/fashionnova.py
|
ikp4success/shopasource
|
9a9ed5c58a8b37b6ff169b45f7fdfcb44809fd88
|
[
"Apache-2.0"
] | null | null | null |
shops/fashionnova.py
|
ikp4success/shopasource
|
9a9ed5c58a8b37b6ff169b45f7fdfcb44809fd88
|
[
"Apache-2.0"
] | null | null | null |
from shops.shop_base import ShopBase
class Fashionnova(ShopBase):
name = "FASHIONNOVA"
def start_requests(self):
# uuid_v = uuid.uuid4()
uuid_v = "8fb37bd6-aef1-4d7c-be3f-88bafef01308"
shop_url = self.shop_url.format(keyword=self._search_keyword, uuid=uuid_v)
yield self.get_request(shop_url, self.get_best_link)
def get_best_link(self, response):
items = self.safe_grab(self.safe_json(response.text), ["items"])
for item in items:
item_url = self.safe_grab(item, ["u"])
yield self.get_request(
url=item_url,
callback=self.parse_data,
domain_url="https://www.fashionnova.com/",
)
def parse_data(self, response):
image_url = response.css("#large-thumb ::attr(src)").extract_first()
title = self.extract_items(
response.css("#product-info .title ::text").extract()
)
description = "\n".join(
list(
set(
response.css(
".description .group .group-body ul li::text"
).extract()
)
)
)
price = response.css(".deal spanclass ::text").extract_first()
yield self.generate_result_meta(
shop_link=response.url,
image_url=image_url,
shop_name=self.name,
price=price,
title=title,
searched_keyword=self._search_keyword,
content_description=description,
)
| 32.8125
| 82
| 0.556825
|
b7f22b13b32ec40d0e5f338babf26b31f20d2417
| 4,056
|
py
|
Python
|
kb++.py
|
happymonkey1/kbl
|
b254bde2d809829bde9cd1f906169bbddfcb28c5
|
[
"MIT"
] | 1
|
2021-10-12T02:08:13.000Z
|
2021-10-12T02:08:13.000Z
|
kb++.py
|
happymonkey1/kbl
|
b254bde2d809829bde9cd1f906169bbddfcb28c5
|
[
"MIT"
] | null | null | null |
kb++.py
|
happymonkey1/kbl
|
b254bde2d809829bde9cd1f906169bbddfcb28c5
|
[
"MIT"
] | null | null | null |
import sys
LANG_NAME = "kb++"
COUNTER = 0
def iota(reset = False) -> int:
global COUNTER
if reset:
COUNTER = 0
i = COUNTER
COUNTER += 1
return i
''' Define Operations '''
OP_ASSIGN = iota(True)
OP_ADD = iota()
OP_PRINT = iota()
COUNT_OPS = iota()
''' Define Types '''
TYPE_i32 = iota(True)
COUNT_TYPES = iota()
''' Operation Implementations '''
def assign():
assert False, "assign not implemented"
def add():
assert False, "add not implemented"
def pprint():
assert False, "print not implemented"
def unpack(packed) -> tuple:
return (packed[0], packed[1:] if len(packed) > 1 else None)
def find_col(line : str, start : int, predicate):
while start < len(line) and predicate(line[start]):
start += 1
return start
def lex_line(line : str) -> tuple:
# TODO(Sean) implement semi colon line endings?
tokens: List[tuple[str, int]] = []
col: int = find_col(line, 0, lambda x: x.isspace())
while col < len(line):
token_end = find_col(line, col, lambda x: not x.isspace())
token_str = line[col:token_end]
tokens.append((token_str, col))
col = token_end
col = find_col(line, col, lambda x: x.isspace())
return tokens
def lex_file(file) -> list[tuple]:
lines: List[List[tuple[str, int]]] = []
with open(file, 'r') as f:
for row_index, line in enumerate(f.readlines()):
line_tokens = lex_line(line)
lines.append(line_tokens)
return lines
def token_to_op(token : tuple) -> int:
assert COUNT_OPS == 3, "Not all operations are implmented in token_to_op."
token_repr = token[0]
if token_repr == "<-":
return OP_ASSIGN
elif token_repr == "+":
return OP_ADD
elif token_repr == "print":
return OP_PRINT
else:
return -1
def parse_line(line_tokens : list[tuple[str, int]], row : int) -> list[tuple[int, int]]:
# TODO(Sean) kb++ only supports one operation per line
operations = []
i = 0
while i < len(line_tokens):
token = line_tokens[i]
if (op := token_to_op(token)) != -1:
operations.append((op, row))
break
i += 1
#stack += [token for index, token in enumerate(line_tokens) if index != i ]
return operations
def build_program(file) -> list[tuple[int, int]]:
lines = lex_file(file)
ops: list[tuple[int, int]] = []
for row, tokens in enumerate(lines):
op_token: tuple[int, int] = parse_line(tokens, row)
ops.append(op_token)
return ops
def interpret_program(program : list[tuple[int, int]]):
# TODO: more types
stack: dict[str, int] = {}
for line in program:
for op in line:
assert COUNT_OPS == 3, "Exhaustive list of implemented ops in interpret_program"
print(op)
if op[0] == OP_ASSIGN:
stack[var_name] = var_data
elif op[0] == OP_ADD:
add()
elif op[0] == OP_PRINT:
pprint()
else:
assert False, "Unreachable code in interpret_program"
def compile_program(file):
assert False, "compilation not implemented"
def usage(file):
print(f"[USAGE]: {file} [COMMAND] -[OPTIONAL ARGS] [FILE]")
print(f" int interpret the {LANG_NAME} program")
print(f" com compiles the {LANG_NAME} program to assembly")
if __name__ == "__main__":
argv = sys.argv
assert len(argv) >= 1
compiler, *argv = argv
if len(argv) < 2:
usage(argv[0])
print("[ERROR]: no command specified")
assert len(argv) == 2, f"{compiler} only supports [COMMAND] [FILE]"
command, program_path = argv
program = build_program(program_path)
if command == "int":
interpret_program(program)
elif command == "com":
compile_program(program)
else:
usage(compiler)
print(f"[ERROR]: unknown command '{command}' specified")
| 27.972414
| 92
| 0.586292
|
a338a86c55af8de5a2ba8c4e62b5a4ea7fd7c2fd
| 3,934
|
py
|
Python
|
azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2018_09_01/models/encoded_task_run_request.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2018_09_01/models/encoded_task_run_request.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2018_09_01/models/encoded_task_run_request.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .run_request import RunRequest
class EncodedTaskRunRequest(RunRequest):
"""The parameters for a quick task run request.
All required parameters must be populated in order to send to Azure.
:param is_archive_enabled: The value that indicates whether archiving is
enabled for the run or not. Default value: False .
:type is_archive_enabled: bool
:param type: Required. Constant filled by server.
:type type: str
:param encoded_task_content: Required. Base64 encoded value of the
template/definition file content.
:type encoded_task_content: str
:param encoded_values_content: Base64 encoded value of the
parameters/values file content.
:type encoded_values_content: str
:param values: The collection of overridable values that can be passed
when running a task.
:type values:
list[~azure.mgmt.containerregistry.v2018_09_01.models.SetValue]
:param timeout: Run timeout in seconds. Default value: 3600 .
:type timeout: int
:param platform: Required. The platform properties against which the run
has to happen.
:type platform:
~azure.mgmt.containerregistry.v2018_09_01.models.PlatformProperties
:param agent_configuration: The machine configuration of the run agent.
:type agent_configuration:
~azure.mgmt.containerregistry.v2018_09_01.models.AgentProperties
:param source_location: The URL(absolute or relative) of the source
context. It can be an URL to a tar or git repository.
If it is relative URL, the relative path should be obtained from calling
listBuildSourceUploadUrl API.
:type source_location: str
:param credentials: The properties that describes a set of credentials
that will be used when this run is invoked.
:type credentials:
~azure.mgmt.containerregistry.v2018_09_01.models.Credentials
"""
_validation = {
'type': {'required': True},
'encoded_task_content': {'required': True},
'timeout': {'maximum': 28800, 'minimum': 300},
'platform': {'required': True},
}
_attribute_map = {
'is_archive_enabled': {'key': 'isArchiveEnabled', 'type': 'bool'},
'type': {'key': 'type', 'type': 'str'},
'encoded_task_content': {'key': 'encodedTaskContent', 'type': 'str'},
'encoded_values_content': {'key': 'encodedValuesContent', 'type': 'str'},
'values': {'key': 'values', 'type': '[SetValue]'},
'timeout': {'key': 'timeout', 'type': 'int'},
'platform': {'key': 'platform', 'type': 'PlatformProperties'},
'agent_configuration': {'key': 'agentConfiguration', 'type': 'AgentProperties'},
'source_location': {'key': 'sourceLocation', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'Credentials'},
}
def __init__(self, **kwargs):
super(EncodedTaskRunRequest, self).__init__(**kwargs)
self.encoded_task_content = kwargs.get('encoded_task_content', None)
self.encoded_values_content = kwargs.get('encoded_values_content', None)
self.values = kwargs.get('values', None)
self.timeout = kwargs.get('timeout', 3600)
self.platform = kwargs.get('platform', None)
self.agent_configuration = kwargs.get('agent_configuration', None)
self.source_location = kwargs.get('source_location', None)
self.credentials = kwargs.get('credentials', None)
self.type = 'EncodedTaskRunRequest'
| 45.744186
| 88
| 0.663447
|
0900224a01ccfb7e9e2455901e528d4e255d2ad3
| 21,511
|
py
|
Python
|
darts/models/forecasting/rnn_model.py
|
bruecks2/darts
|
4cf51f7cfffdc53049bdca6f9eb54eaf5810eaa5
|
[
"Apache-2.0"
] | null | null | null |
darts/models/forecasting/rnn_model.py
|
bruecks2/darts
|
4cf51f7cfffdc53049bdca6f9eb54eaf5810eaa5
|
[
"Apache-2.0"
] | null | null | null |
darts/models/forecasting/rnn_model.py
|
bruecks2/darts
|
4cf51f7cfffdc53049bdca6f9eb54eaf5810eaa5
|
[
"Apache-2.0"
] | 1
|
2022-02-15T11:06:29.000Z
|
2022-02-15T11:06:29.000Z
|
"""
Recurrent Neural Networks
-------------------------
"""
from typing import Optional, Sequence, Tuple, Union
import torch
import torch.nn as nn
from darts.logging import get_logger, raise_if_not
from darts.models.forecasting.pl_forecasting_module import PLDualCovariatesModule
from darts.models.forecasting.torch_forecasting_model import DualCovariatesTorchModel
from darts.timeseries import TimeSeries
from darts.utils.data import DualCovariatesShiftedDataset, TrainingDataset
logger = get_logger(__name__)
# TODO add batch norm
class _RNNModule(PLDualCovariatesModule):
def __init__(
self,
name: str,
input_size: int,
hidden_dim: int,
num_layers: int,
target_size: int,
nr_params: int,
dropout: float = 0.0,
**kwargs
):
"""PyTorch module implementing an RNN to be used in `RNNModel`.
PyTorch module implementing a simple RNN with the specified `name` type.
This module combines a PyTorch RNN module, together with one fully connected layer which
maps the hidden state of the RNN at each step to the output value of the model at that
time step.
Parameters
----------
name
The name of the specific PyTorch RNN module ("RNN", "GRU" or "LSTM").
input_size
The dimensionality of the input time series.
hidden_dim
The number of features in the hidden state `h` of the RNN module.
num_layers
The number of recurrent layers.
target_size
The dimensionality of the output time series.
nr_params
The number of parameters of the likelihood (or 1 if no likelihood is used).
dropout
The fraction of neurons that are dropped in all-but-last RNN layers.
**kwargs
all parameters required for :class:`darts.model.forecasting_models.PLForecastingModule` base class.
Inputs
------
x of shape `(batch_size, input_length, input_size)`
Tensor containing the features of the input sequence. The `input_length` is not fixed.
Outputs
-------
y of shape `(batch_size, output_chunk_length, target_size, nr_params)`
Tensor containing the outputs of the RNN at every time step of the input sequence.
During training the whole tensor is used as output, whereas during prediction we only use y[:, -1, :].
However, this module always returns the whole Tensor.
"""
# RNNModule doesn't really need input and output_chunk_length for PLModule
super().__init__(**kwargs)
# Defining parameters
self.target_size = target_size
self.nr_params = nr_params
self.name = name
# Defining the RNN module
self.rnn = getattr(nn, name)(
input_size, hidden_dim, num_layers, batch_first=True, dropout=dropout
)
# The RNN module needs a linear layer V that transforms hidden states into outputs, individually
self.V = nn.Linear(hidden_dim, target_size * nr_params)
def forward(self, x_in: Tuple, h=None):
x, _ = x_in
# data is of size (batch_size, input_length, input_size)
batch_size = x.shape[0]
# out is of size (batch_size, input_length, hidden_dim)
out, last_hidden_state = self.rnn(x) if h is None else self.rnn(x, h)
# Here, we apply the V matrix to every hidden state to produce the outputs
predictions = self.V(out)
# predictions is of size (batch_size, input_length, target_size)
predictions = predictions.view(batch_size, -1, self.target_size, self.nr_params)
# returns outputs for all inputs, only the last one is needed for prediction time
return predictions, last_hidden_state
def _produce_train_output(self, input_batch: Tuple):
(
past_target,
historic_future_covariates,
future_covariates,
static_covariates,
) = input_batch
# For the RNN we concatenate the past_target with the future_covariates
# (they have the same length because we enforce a Shift dataset for RNNs)
model_input = (
torch.cat([past_target, future_covariates], dim=2)
if future_covariates is not None
else past_target,
static_covariates,
)
return self(model_input)[0]
def _produce_predict_output(self, x: Tuple, last_hidden_state=None):
"""overwrite parent classes `_produce_predict_output` method"""
output, hidden = self(x, last_hidden_state)
if self.likelihood:
return self.likelihood.sample(output), hidden
else:
return output.squeeze(dim=-1), hidden
def _get_batch_prediction(
self, n: int, input_batch: Tuple, roll_size: int
) -> torch.Tensor:
"""
This model is recurrent, so we have to write a specific way to obtain the time series forecasts of length n.
"""
(
past_target,
historic_future_covariates,
future_covariates,
static_covariates,
) = input_batch
if historic_future_covariates is not None:
# RNNs need as inputs (target[t] and covariates[t+1]) so here we shift the covariates
all_covariates = torch.cat(
[historic_future_covariates[:, 1:, :], future_covariates], dim=1
)
cov_past, cov_future = (
all_covariates[:, : past_target.shape[1], :],
all_covariates[:, past_target.shape[1] :, :],
)
input_series = torch.cat([past_target, cov_past], dim=2)
else:
input_series = past_target
cov_future = None
batch_prediction = []
out, last_hidden_state = self._produce_predict_output(
(input_series, static_covariates)
)
batch_prediction.append(out[:, -1:, :])
prediction_length = 1
while prediction_length < n:
# create new input to model from last prediction and current covariates, if available
new_input = (
torch.cat(
[
out[:, -1:, :],
cov_future[:, prediction_length - 1 : prediction_length, :],
],
dim=2,
)
if cov_future is not None
else out[:, -1:, :]
)
# feed new input to model, including the last hidden state from the previous iteration
out, last_hidden_state = self._produce_predict_output(
(new_input, static_covariates), last_hidden_state
)
# append prediction to batch prediction array, increase counter
batch_prediction.append(out[:, -1:, :])
prediction_length += 1
# bring predictions into desired format and drop unnecessary values
batch_prediction = torch.cat(batch_prediction, dim=1)
batch_prediction = batch_prediction[:, :n, :]
return batch_prediction
class RNNModel(DualCovariatesTorchModel):
def __init__(
self,
input_chunk_length: int,
model: Union[str, nn.Module] = "RNN",
hidden_dim: int = 25,
n_rnn_layers: int = 1,
dropout: float = 0.0,
training_length: int = 24,
**kwargs
):
"""Recurrent Neural Network Model (RNNs).
This class provides three variants of RNNs:
* Vanilla RNN
* LSTM
* GRU
RNNModel is fully recurrent in the sense that, at prediction time, an output is computed using these inputs:
- previous target value, which will be set to the last known target value for the first prediction,
and for all other predictions it will be set to the previous prediction (in an auto-regressive fashion),
- the previous hidden state,
- the covariates at time `t` for forecasting the target at time `t` (if the model was trained with covariates),
This model supports future covariates; and it requires these covariates to extend far enough in the past
and the future (it's a so-called "dual covariates" model as the future covariates have to be provided both
in the past and the future). The model will complain if the provided `future_covariates` series doesn't have
an appropriate time span.
For a block version using an RNN model as an encoder only and supporting past
covariates, checkout `BlockRNNModel`.
Parameters
----------
input_chunk_length
Number of past time steps that are fed to the forecasting module at prediction time.
model
Either a string specifying the RNN module type ("RNN", "LSTM" or "GRU"),
or a PyTorch module with the same specifications as
`darts.models.rnn_model._RNNModule`.
hidden_dim
Size for feature maps for each hidden RNN layer (:math:`h_n`).
n_rnn_layers
The number of recurrent layers.
dropout
Fraction of neurons afected by Dropout.
training_length
The length of both input (target and covariates) and output (target) time series used during
training. Generally speaking, `training_length` should have a higher value than `input_chunk_length`
because otherwise during training the RNN is never run for as many iterations as it will during
training. For more information on this parameter, please see `darts.utils.data.ShiftedDataset`
**kwargs
Optional arguments to initialize the pytorch_lightning.Module, pytorch_lightning.Trainer, and
Darts' :class:`TorchForecastingModel`.
loss_fn
PyTorch loss function used for training.
This parameter will be ignored for probabilistic models if the ``likelihood`` parameter is specified.
Default: ``torch.nn.MSELoss()``.
torch_metrics
A torch metric or a ``MetricCollection`` used for evaluation. A full list of available metrics can be found
at https://torchmetrics.readthedocs.io/en/latest/. Default: ``None``.
likelihood
One of Darts' :meth:`Likelihood <darts.utils.likelihood_models.Likelihood>` models to be used for
probabilistic forecasts. Default: ``None``.
optimizer_cls
The PyTorch optimizer class to be used. Default: ``torch.optim.Adam``.
optimizer_kwargs
Optionally, some keyword arguments for the PyTorch optimizer (e.g., ``{'lr': 1e-3}``
for specifying a learning rate). Otherwise the default values of the selected ``optimizer_cls``
will be used. Default: ``None``.
lr_scheduler_cls
Optionally, the PyTorch learning rate scheduler class to be used. Specifying ``None`` corresponds
to using a constant learning rate. Default: ``None``.
lr_scheduler_kwargs
Optionally, some keyword arguments for the PyTorch learning rate scheduler. Default: ``None``.
batch_size
Number of time series (input and output sequences) used in each training pass. Default: ``32``.
n_epochs
Number of epochs over which to train the model. Default: ``100``.
model_name
Name of the model. Used for creating checkpoints and saving tensorboard data. If not specified,
defaults to the following string ``"YYYY-mm-dd_HH:MM:SS_torch_model_run_PID"``, where the initial part
of the name is formatted with the local date and time, while PID is the processed ID (preventing models
spawned at the same time by different processes to share the same model_name). E.g.,
``"2021-06-14_09:53:32_torch_model_run_44607"``.
work_dir
Path of the working directory, where to save checkpoints and Tensorboard summaries.
Default: current working directory.
log_tensorboard
If set, use Tensorboard to log the different parameters. The logs will be located in:
``"{work_dir}/darts_logs/{model_name}/logs/"``. Default: ``False``.
nr_epochs_val_period
Number of epochs to wait before evaluating the validation loss (if a validation
``TimeSeries`` is passed to the :func:`fit()` method). Default: ``1``.
torch_device_str
Optionally, a string indicating the torch device to use. By default, ``torch_device_str`` is ``None``
which will run on CPU. Set it to ``"cuda"`` to use all available GPUs or ``"cuda:i"`` to only use
GPU ``i`` (``i`` must be an integer). For example "cuda:0" will use the first GPU only.
.. deprecated:: v0.17.0
``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version.
Instead, specify this with keys ``"accelerator", "gpus", "auto_select_gpus"`` in your
``pl_trainer_kwargs`` dict. Some examples for setting the devices inside the ``pl_trainer_kwargs``
dict:
- ``{"accelerator": "cpu"}`` for CPU,
- ``{"accelerator": "gpu", "gpus": [i]}`` to use only GPU ``i`` (``i`` must be an integer),
- ``{"accelerator": "gpu", "gpus": -1, "auto_select_gpus": True}`` to use all available GPUS.
For more info, see here:
https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and
https://pytorch-lightning.readthedocs.io/en/stable/advanced/multi_gpu.html#select-gpu-devices
force_reset
If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will
be discarded). Default: ``False``.
save_checkpoints
Whether or not to automatically save the untrained model and checkpoints from training.
To load the model from checkpoint, call :func:`MyModelClass.load_from_checkpoint()`, where
:class:`MyModelClass` is the :class:`TorchForecastingModel` class that was used (such as :class:`TFTModel`,
:class:`NBEATSModel`, etc.). If set to ``False``, the model can still be manually saved using
:func:`save_model()` and loaded using :func:`load_model()`. Default: ``False``.
add_encoders
A large number of past and future covariates can be automatically generated with `add_encoders`.
This can be done by adding multiple pre-defined index encoders and/or custom user-made functions that
will be used as index encoders. Additionally, a transformer such as Darts' :class:`Scaler` can be added to
transform the generated covariates. This happens all under one hood and only needs to be specified at
model creation.
Read :meth:`SequentialEncoder <darts.utils.data.encoders.SequentialEncoder>` to find out more about
``add_encoders``. Default: ``None``. An example showing some of ``add_encoders`` features:
.. highlight:: python
.. code-block:: python
add_encoders={
'cyclic': {'future': ['month']},
'datetime_attribute': {'future': ['hour', 'dayofweek']},
'position': {'past': ['absolute'], 'future': ['relative']},
'custom': {'past': [lambda idx: (idx.year - 1950) / 50]},
'transformer': Scaler()
}
..
random_state
Control the randomness of the weights initialization. Check this
`link <https://scikit-learn.org/stable/glossary.html#term-random_state>`_ for more details.
Default: ``None``.
pl_trainer_kwargs
By default :class:`TorchForecastingModel` creates a PyTorch Lightning Trainer with several useful presets
that performs the training, validation and prediction processes. These presets include automatic
checkpointing, tensorboard logging, setting the torch device and more.
With ``pl_trainer_kwargs`` you can add additional kwargs to instantiate the PyTorch Lightning trainer
object. Check the `PL Trainer documentation
<https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html>`_ for more information about the
supported kwargs. Default: ``None``.
With parameter ``"callbacks"`` you can add custom or PyTorch-Lightning built-in callbacks to Darts'
:class:`TorchForecastingModel`. Below is an example for adding EarlyStopping to the training process.
The model will stop training early if the validation loss `val_loss` does not improve beyond
specifications. For more information on callbacks, visit:
`PyTorch Lightning Callbacks
<https://pytorch-lightning.readthedocs.io/en/stable/extensions/callbacks.html>`_
.. highlight:: python
.. code-block:: python
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
# stop training when validation loss does not decrease more than 0.05 (`min_delta`) over
# a period of 5 epochs (`patience`)
my_stopper = EarlyStopping(
monitor="val_loss",
patience=5,
min_delta=0.05,
mode='min',
)
pl_trainer_kwargs={"callbacks": [my_stopper]}
..
Note that you can also use a custom PyTorch Lightning Trainer for training and prediction with optional
parameter ``trainer`` in :func:`fit()` and :func:`predict()`.
show_warnings
whether to show warnings raised from PyTorch Lightning. Useful to detect potential issues of
your forecasting use case. Default: ``False``.
"""
# create copy of model parameters
model_kwargs = {key: val for key, val in self.model_params.items()}
model_kwargs["output_chunk_length"] = 1
super().__init__(**self._extract_torch_model_params(**model_kwargs))
# extract pytorch lightning module kwargs
self.pl_module_params = self._extract_pl_module_params(**model_kwargs)
# check we got right model type specified:
if model not in ["RNN", "LSTM", "GRU"]:
raise_if_not(
isinstance(model, nn.Module),
'{} is not a valid RNN model.\n Please specify "RNN", "LSTM", '
'"GRU", or give your own PyTorch nn.Module'.format(
model.__class__.__name__
),
logger,
)
self.rnn_type_or_module = model
self.dropout = dropout
self.hidden_dim = hidden_dim
self.n_rnn_layers = n_rnn_layers
self.training_length = training_length
def _create_model(self, train_sample: Tuple[torch.Tensor]) -> torch.nn.Module:
# samples are made of (past_target, historic_future_covariates, future_covariates, future_target)
# historic_future_covariates and future_covariates have the same width
input_dim = train_sample[0].shape[1] + (
train_sample[1].shape[1] if train_sample[1] is not None else 0
)
output_dim = train_sample[-1].shape[1]
nr_params = 1 if self.likelihood is None else self.likelihood.num_parameters
if self.rnn_type_or_module in ["RNN", "LSTM", "GRU"]:
model = _RNNModule(
name=self.rnn_type_or_module,
input_size=input_dim,
target_size=output_dim,
nr_params=nr_params,
hidden_dim=self.hidden_dim,
dropout=self.dropout,
num_layers=self.n_rnn_layers,
**self.pl_module_params,
)
else:
model = self.rnn_type_or_module(
name="custom_module",
input_size=input_dim,
target_size=output_dim,
nr_params=nr_params,
hidden_dim=self.hidden_dim,
dropout=self.dropout,
num_layers=self.n_rnn_layers,
**self.pl_module_params,
)
return model
def _build_train_dataset(
self,
target: Sequence[TimeSeries],
past_covariates: Optional[Sequence[TimeSeries]],
future_covariates: Optional[Sequence[TimeSeries]],
max_samples_per_ts: Optional[int],
) -> DualCovariatesShiftedDataset:
return DualCovariatesShiftedDataset(
target_series=target,
covariates=future_covariates,
length=self.training_length,
shift=1,
max_samples_per_ts=max_samples_per_ts,
)
def _verify_train_dataset_type(self, train_dataset: TrainingDataset):
raise_if_not(
isinstance(train_dataset, DualCovariatesShiftedDataset),
"RNNModel requires a training dataset of type DualCovariatesShiftedDataset.",
)
raise_if_not(
train_dataset.ds_past.shift == 1,
"RNNModel requires a shifted training dataset with shift=1.",
)
| 45.865672
| 119
| 0.625494
|
7f3916cecad2996a44ed47cf29d75d8c6125d9b6
| 2,353
|
py
|
Python
|
fylesdk/apis/reports.py
|
Shwetabhk/fyle-sdk-py
|
7a321820a90afeb6ac9617eb04b5ba939ddbc688
|
[
"MIT"
] | null | null | null |
fylesdk/apis/reports.py
|
Shwetabhk/fyle-sdk-py
|
7a321820a90afeb6ac9617eb04b5ba939ddbc688
|
[
"MIT"
] | null | null | null |
fylesdk/apis/reports.py
|
Shwetabhk/fyle-sdk-py
|
7a321820a90afeb6ac9617eb04b5ba939ddbc688
|
[
"MIT"
] | null | null | null |
from .api_base import ApiBase
class Reports(ApiBase):
"""Class for Reports APIs."""
GET_REPORTS = '/api/tpa/v1/reports'
GET_REPORTS_COUNT = '/api/tpa/v1/reports/count'
def get(self, updated_at=None, settled_at=None, reimbursed_at=None, approved_at=None, state=None, offset=None, limit=None, exported=None):
"""Get a list of Reports.
Parameters:
updated_at (str): Date string in yyyy-MM-ddTHH:mm:ss.SSSZ format along with operator in RHS colon pattern. (optional)
offset (int): A cursor for use in pagination, offset is an object ID that defines your place in the list. (optional)
limit (int): A limit on the number of objects to be returned, between 1 and 1000. (optional)
exported (bool): If set to true, all Reports that are already submitted will alone be returned. (optional)
settled_at(str): Date string in yyyy-MM-ddTHH:mm:ss.SSSZ format along with operator in RHS colon pattern. (optional)
approved_at(str): Date string in yyyy-MM-ddTHH:mm:ss.SSSZ format along with operator in RHS colon pattern. (optional)
reimbursed_at(str): Date string in yyyy-MM-ddTHH:mm:ss.SSSZ format along with operator in RHS colon pattern. (optional)
state(str): A parameter to filter reports by the state that they're in. (optional)
Returns:
List with dicts in Reports schema.
"""
return self._get_request({
'updated_at': updated_at,
'offset': offset,
'limit': limit,
'settled_at': settled_at,
'reimbursed_at': reimbursed_at,
'approved_at': approved_at,
'exported': exported
}, Reports.GET_REPORTS)
def count(self, updated_at=None, exported=None):
"""Get the count of Reports that match the parameters.
Parameters:
updated_at (str): Date string in yyyy-MM-ddTHH:mm:ss.SSSZ format along with operator in RHS colon pattern. (optional)
exported (bool): If set to true, all Reports that are already submitted will alone be returned. (optional)
Returns:
Count of Reports.
"""
return self._get_request({
'updated_at': updated_at,
'exported': exported
}, Reports.GET_REPORTS_COUNT)
| 49.020833
| 142
| 0.640884
|
e7d51a27941888335ddd5a81789a7aebddbfbfe8
| 6,456
|
py
|
Python
|
glance/tests/functional/db/test_migrations.py
|
dchavoll/stx-glance
|
1ec64167057e3368f27a1a81aca294b771e79c5e
|
[
"Apache-2.0"
] | null | null | null |
glance/tests/functional/db/test_migrations.py
|
dchavoll/stx-glance
|
1ec64167057e3368f27a1a81aca294b771e79c5e
|
[
"Apache-2.0"
] | 5
|
2018-09-18T15:51:40.000Z
|
2019-01-08T16:38:44.000Z
|
glance/tests/functional/db/test_migrations.py
|
dchavoll/stx-glance
|
1ec64167057e3368f27a1a81aca294b771e79c5e
|
[
"Apache-2.0"
] | 6
|
2018-09-06T14:50:23.000Z
|
2018-11-27T21:32:51.000Z
|
# Copyright 2016 Rackspace
# Copyright 2016 Intel Corporation
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from alembic import command as alembic_command
from alembic import script as alembic_script
from oslo_db.sqlalchemy import test_base
from oslo_db.sqlalchemy import test_migrations
import sqlalchemy.types as types
from glance.db import migration as db_migration
from glance.db.sqlalchemy import alembic_migrations
from glance.db.sqlalchemy.alembic_migrations import versions
from glance.db.sqlalchemy import models
from glance.db.sqlalchemy import models_metadef
import glance.tests.utils as test_utils
class AlembicMigrationsMixin(object):
def _get_revisions(self, config, head=None):
head = head or db_migration.LATEST_REVISION
scripts_dir = alembic_script.ScriptDirectory.from_config(config)
revisions = list(scripts_dir.walk_revisions(base='base',
head=head))
revisions = list(reversed(revisions))
revisions = [rev.revision for rev in revisions]
return revisions
def _migrate_up(self, config, engine, revision, with_data=False):
if with_data:
data = None
pre_upgrade = getattr(self, '_pre_upgrade_%s' % revision, None)
if pre_upgrade:
data = pre_upgrade(engine)
alembic_command.upgrade(config, revision)
if with_data:
check = getattr(self, '_check_%s' % revision, None)
if check:
check(engine, data)
def test_walk_versions(self):
alembic_config = alembic_migrations.get_alembic_config(self.engine)
for revision in self._get_revisions(alembic_config):
self._migrate_up(alembic_config, self.engine, revision,
with_data=True)
class TestMysqlMigrations(test_base.MySQLOpportunisticTestCase,
AlembicMigrationsMixin):
def test_mysql_innodb_tables(self):
test_utils.db_sync(engine=self.engine)
total = self.engine.execute(
"SELECT COUNT(*) "
"FROM information_schema.TABLES "
"WHERE TABLE_SCHEMA='%s'"
% self.engine.url.database)
self.assertGreater(total.scalar(), 0, "No tables found. Wrong schema?")
noninnodb = self.engine.execute(
"SELECT count(*) "
"FROM information_schema.TABLES "
"WHERE TABLE_SCHEMA='%s' "
"AND ENGINE!='InnoDB' "
"AND TABLE_NAME!='migrate_version'"
% self.engine.url.database)
count = noninnodb.scalar()
self.assertEqual(0, count, "%d non InnoDB tables created" % count)
class TestPostgresqlMigrations(test_base.PostgreSQLOpportunisticTestCase,
AlembicMigrationsMixin):
pass
class TestSqliteMigrations(test_base.DbTestCase, AlembicMigrationsMixin):
pass
class TestMigrations(test_base.DbTestCase, test_utils.BaseTestCase):
def test_no_downgrade(self):
migrate_file = versions.__path__[0]
for parent, dirnames, filenames in os.walk(migrate_file):
for filename in filenames:
if filename.split('.')[1] == 'py':
model_name = filename.split('.')[0]
model = __import__(
'glance.db.sqlalchemy.alembic_migrations.versions.' +
model_name)
obj = getattr(getattr(getattr(getattr(getattr(
model, 'db'), 'sqlalchemy'), 'alembic_migrations'),
'versions'), model_name)
func = getattr(obj, 'downgrade', None)
self.assertIsNone(func)
class ModelsMigrationSyncMixin(object):
def get_metadata(self):
for table in models_metadef.BASE_DICT.metadata.sorted_tables:
models.BASE.metadata._add_table(table.name, table.schema, table)
return models.BASE.metadata
def get_engine(self):
return self.engine
def db_sync(self, engine):
test_utils.db_sync(engine=engine)
# TODO(akamyshikova): remove this method as soon as comparison with Variant
# will be implemented in oslo.db or alembic
def compare_type(self, ctxt, insp_col, meta_col, insp_type, meta_type):
if isinstance(meta_type, types.Variant):
meta_orig_type = meta_col.type
insp_orig_type = insp_col.type
meta_col.type = meta_type.impl
insp_col.type = meta_type.impl
try:
return self.compare_type(ctxt, insp_col, meta_col, insp_type,
meta_type.impl)
finally:
meta_col.type = meta_orig_type
insp_col.type = insp_orig_type
else:
ret = super(ModelsMigrationSyncMixin, self).compare_type(
ctxt, insp_col, meta_col, insp_type, meta_type)
if ret is not None:
return ret
return ctxt.impl.compare_type(insp_col, meta_col)
def include_object(self, object_, name, type_, reflected, compare_to):
if name in ['migrate_version'] and type_ == 'table':
return False
return True
class ModelsMigrationsSyncMysql(ModelsMigrationSyncMixin,
test_migrations.ModelsMigrationsSync,
test_base.MySQLOpportunisticTestCase):
pass
class ModelsMigrationsSyncPostgres(ModelsMigrationSyncMixin,
test_migrations.ModelsMigrationsSync,
test_base.PostgreSQLOpportunisticTestCase):
pass
class ModelsMigrationsSyncSqlite(ModelsMigrationSyncMixin,
test_migrations.ModelsMigrationsSync,
test_base.DbTestCase):
pass
| 37.103448
| 79
| 0.636462
|
57fa9b4f2df1644ab17fea6014dbe1d2c39311fd
| 1,431
|
py
|
Python
|
note3/linear_regression.py
|
yeyupiaoling/LearnPaddle2
|
7c53fa82cd6449c6583de79622edc612ec73064d
|
[
"Apache-2.0"
] | 163
|
2019-01-30T04:34:01.000Z
|
2021-12-10T12:19:03.000Z
|
note3/linear_regression.py
|
stonebb/LearnPaddle2
|
c3b6a9f5897e684b6de544cb12c959f7771a6c3c
|
[
"Apache-2.0"
] | 3
|
2019-07-15T07:14:17.000Z
|
2022-03-24T01:14:06.000Z
|
note3/linear_regression.py
|
stonebb/LearnPaddle2
|
c3b6a9f5897e684b6de544cb12c959f7771a6c3c
|
[
"Apache-2.0"
] | 83
|
2018-10-31T02:44:09.000Z
|
2022-03-25T13:40:54.000Z
|
import paddle.fluid as fluid
import numpy as np
# 定义一个简单的线性网络
x = fluid.layers.data(name='x', shape=[1], dtype='float32')
hidden = fluid.layers.fc(input=x, size=100, act='relu')
net = fluid.layers.fc(input=hidden, size=1, act=None)
# 获取预测程序
infer_program = fluid.default_main_program().clone(for_test=True)
# 定义损失函数
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=net, label=y)
avg_cost = fluid.layers.mean(cost)
# 复制一个主程序,方便之后使用
test_program = fluid.default_main_program().clone(for_test=True)
# 定义优化方法
optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.01)
opts = optimizer.minimize(avg_cost)
# 创建一个使用CPU的执行器
place = fluid.CPUPlace()
exe = fluid.Executor(place)
# 进行参数初始化
exe.run(fluid.default_startup_program())
# 定义训练和测试数据
x_data = np.array([[1.0], [2.0], [3.0], [4.0], [5.0]]).astype('float32')
y_data = np.array([[3.0], [5.0], [7.0], [9.0], [11.0]]).astype('float32')
test_data = np.array([[6.0]]).astype('float32')
# 开始训练100个pass
for pass_id in range(100):
train_cost = exe.run(program=fluid.default_main_program(),
feed={'x': x_data, 'y': y_data},
fetch_list=[avg_cost])
print("Pass:%d, Cost:%0.5f" % (pass_id, train_cost[0]))
# 开始预测
result = exe.run(program=infer_program,
feed={'x': test_data},
fetch_list=[net])
print("当x为6.0时,y为:%0.5f:" % result[0][0][0])
| 30.446809
| 73
| 0.659679
|
16b94b4557973d763a04842cafb36ecd943b9e40
| 3,108
|
py
|
Python
|
TM1py/Services/ElementService.py
|
lotsaram/TM1py
|
71a2fd1e30211e497bb2644f0d11376abd2c29a7
|
[
"MIT"
] | null | null | null |
TM1py/Services/ElementService.py
|
lotsaram/TM1py
|
71a2fd1e30211e497bb2644f0d11376abd2c29a7
|
[
"MIT"
] | null | null | null |
TM1py/Services/ElementService.py
|
lotsaram/TM1py
|
71a2fd1e30211e497bb2644f0d11376abd2c29a7
|
[
"MIT"
] | 1
|
2022-01-17T10:02:44.000Z
|
2022-01-17T10:02:44.000Z
|
# -*- coding: utf-8 -*-
import json
from TM1py.Objects.ElementAttribute import ElementAttribute
from TM1py.Services.ObjectService import ObjectService
class ElementService(ObjectService):
""" Service to handle Object Updates for TM1 Dimension (resp. Hierarchy) Elements
"""
def __init__(self, rest):
super().__init__(rest)
def get_element_attributes(self, dimension_name, hierarchy_name):
""" Get element attributes from hierarchy
:param dimension_name:
:param hierarchy_name:
:return:
"""
request = '/api/v1/Dimensions(\'{}\')/Hierarchies(\'{}\')/ElementAttributes'.format(dimension_name,
hierarchy_name)
response = self._rest.GET(request, '')
element_attributes = [ElementAttribute.from_dict(ea) for ea in json.loads(response)['value']]
return element_attributes
def get_elements_filtered_by_attribute(self, dimension_name, hierarchy_name, attribute_name, attribute_value):
""" Get all elements from a hierarchy with given attribute value
:param dimension_name:
:param hierarchy_name:
:param attribute_name:
:param attribute_value:
:return: List of element names
"""
attribute_name = attribute_name.replace(" ", "")
if isinstance(attribute_value, str):
request = "/api/v1/Dimensions('{}')/Hierarchies('{}')" \
"?$expand=Elements($filter = Attributes/{} eq '{}';$select=Name)" \
.format(dimension_name, hierarchy_name, attribute_name, attribute_value)
else:
request = "/api/v1/Dimensions('{}')/Hierarchies('{}')" \
"?$expand=Elements($filter = Attributes/{} eq {};$select=Name)" \
.format(dimension_name, hierarchy_name, attribute_name, attribute_value)
response = self._rest.GET(request)
response_as_dict = json.loads(response)
return [elem['Name'] for elem in response_as_dict['Elements']]
def create_element_attribute(self, dimension_name, hierarchy_name, element_attribute):
""" like AttrInsert
:param dimension_name:
:param hierarchy_name:
:param element_attribute: instance of TM1py.ElementAttribute
:return:
"""
request = "/api/v1/Dimensions('{}')/Hierarchies('{}')/ElementAttributes" \
.format(dimension_name, hierarchy_name)
return self._rest.POST(request, element_attribute.body)
def delete_element_attribute(self, dimension_name, hierarchy_name, element_attribute):
""" like AttrDelete
:param dimension_name:
:param hierarchy_name:
:param element_attribute: instance of TM1py.ElementAttribute
:return:
"""
request = "/api/v1/Dimensions('}}ElementAttributes_{}')/Hierarchies('}}ElementAttributes_{}')/Elements('{}')" \
.format(dimension_name, hierarchy_name, element_attribute)
return self._rest.DELETE(request, '')
| 42
| 119
| 0.630952
|
c97babb55880854648f00e7f636146acdcc5cdd7
| 31,143
|
py
|
Python
|
gen/tools/jnc_plugin/generators/java_method.py
|
xotonic/netconfessor
|
cf825e6c94db6dff77db7c236a2c4412c6f533ce
|
[
"Apache-2.0"
] | 2
|
2019-03-15T05:08:57.000Z
|
2020-09-08T08:13:07.000Z
|
gen/tools/jnc_plugin/generators/java_method.py
|
xotonic/netconfessor
|
cf825e6c94db6dff77db7c236a2c4412c6f533ce
|
[
"Apache-2.0"
] | null | null | null |
gen/tools/jnc_plugin/generators/java_method.py
|
xotonic/netconfessor
|
cf825e6c94db6dff77db7c236a2c4412c6f533ce
|
[
"Apache-2.0"
] | null | null | null |
import os
import re
from .. import util, context
from ..java_value import JavaValue
from ..method import JavaMethod
from ..ordered_set import OrderedSet
class MethodGenerator(object):
"""A generator for JavaMethod objects"""
def __init__(self, stmt, ctx):
"""Sets the attributes of the method generator, depending on stmt"""
self.stmt = stmt
self.n = util.normalize(stmt.arg)
self.n2 = util.camelize(stmt.arg)
self.children = [util.normalize(s.arg) for s in
util.search(stmt, context.yangelement_stmts | context.leaf_stmts)]
self.ctx = ctx
self.module_stmt = util.get_module(stmt)
prefix = util.search_one(self.module_stmt, 'prefix')
self.root = util.schema_class(util.normalize(prefix.arg))
self.pkg = util.get_package(stmt, ctx)
self.basepkg = self.pkg.partition('.')[0]
self.rootpkg = ctx.rootpkg.split(os.sep)
if self.rootpkg[:1] == ['src']:
self.rootpkg = self.rootpkg[1:] # src not part of package
self.rootpkg.append(util.camelize(self.module_stmt.arg))
self.is_container = stmt.keyword in ('container', 'notification')
self.is_list = stmt.keyword == 'list'
self.is_typedef = stmt.keyword == 'typedef'
self.is_leaf = stmt.keyword == 'leaf'
self.is_anyxml = stmt.keyword == 'anyxml'
self.is_leaflist = stmt.keyword == 'leaf-list'
self.is_top_level = util.get_parent(self.stmt) == self.module_stmt
self.is_augmented = self.module_stmt != util.get_module(stmt.parent)
assert (self.is_container or self.is_list or self.is_typedef
or self.is_leaf or self.is_leaflist or self.is_anyxml)
self.gen = self
if type(self) is MethodGenerator:
if self.is_typedef:
from typedef import TypedefMethodGenerator
self.gen = TypedefMethodGenerator(stmt, ctx)
elif self.is_container:
from container import ContainerMethodGenerator
self.gen = ContainerMethodGenerator(stmt, ctx)
elif self.is_list:
from list import ListMethodGenerator
self.gen = ListMethodGenerator(stmt, ctx)
elif self.is_leaflist:
from leaf_list import LeafListMethodGenerator
self.gen = LeafListMethodGenerator(stmt, ctx)
elif self.is_leaf:
from leaf import LeafMethodGenerator
self.gen = LeafMethodGenerator(stmt, ctx)
elif self.is_anyxml:
from anyxml import AnyXmlMethodGenerator
self.gen = AnyXmlMethodGenerator(stmt, ctx)
def canonical_import(self, import_, child=False):
"""Returns a string representing a class that can be imported in Java.
Does not handle Generics or Array types.
"""
if import_ == self.root:
return '.'.join(self.rootpkg + [import_])
elif import_ in self.children:
type_child = util.search_one(self.stmt, 'type')
if type_child is not None and util.normalize(type_child.arg) == import_:
try:
typedef_pkg = util.get_package(type_child.i_typedef, self.ctx)
except AttributeError:
typedef_pkg = util.get_package(type_child, self.ctx)
return '.'.join([typedef_pkg, import_])
return '.'.join([self.pkg, self.n2, import_])
elif child and import_ == self.n:
return '.'.join([self.pkg, import_])
else:
return util.get_import(import_)
def fix_imports(self, method, child=False):
res = set([])
imports = method.imports
if self.ctx.opts.import_on_demand:
imports = set([])
pkg = self.pkg
if child:
pkg = pkg.rpartition('.')[0]
pkg_classes = context.class_hierarchy.get(pkg, [])
for import_ in method.imports:
if import_.rpartition('.')[2] in pkg_classes:
if (child and not import_.rpartition('.')[1]
and import_ != self.root):
imports.add('.'.join([self.pkg, import_]))
else:
imports.add(import_)
for dependency in imports:
if dependency.startswith(('java.math', 'java.util',
'io.netconfessor', self.basepkg)):
res.add(dependency)
continue
elif dependency.endswith('>'):
for token in [_f for _f in re.findall(r'\w+', dependency) if _f]:
res.add(self.canonical_import(token, child))
elif dependency.endswith(']'):
assert dependency[:-2] and dependency[-2:] == '[]'
res.add(self.canonical_import(dependency[:-2], child))
else:
res.add(self.canonical_import(dependency, child))
method.imports = res
return method
def _root_namespace(self, stmt_arg):
"""Returns '([Root].NAMESPACE, "[stmt.arg]");'"""
return ['(', self.root, '.NAMESPACE, "', stmt_arg, '");']
def _constructor_template(self):
"""Returns a constructor invoking parent constructor, without
parameters and javadoc."""
constructor = JavaMethod(modifiers=['public'], name=self.n)
constructor.set_return_type(None)
if self.is_container or self.is_list or self.is_anyxml or self.is_leaf or self.is_leaflist:
call = ['super']
call.extend(self._root_namespace(self.stmt.arg))
constructor.add_dependency(self.root)
constructor.add_line(''.join(call))
if self.is_top_level or self.is_augmented:
constructor.add_line('setDefaultPrefix();')
setPrefix = ['setPrefix(', self.root, '.PREFIX);']
constructor.add_line(''.join(setPrefix))
elif self.is_typedef:
constructor.add_line('super(value);')
else:
return None
return self.fix_imports(constructor)
def access_methods_comment(self):
"""Returns a JavaValue representing a code structuring Java comment"""
res = [' /* Access methods for']
if hasattr(self.gen, 'is_optional') and self.gen.is_optional:
res.append('optional')
res.extend([self.stmt.keyword, 'child: "' + self.stmt.arg + '". */'])
return JavaValue(exact=[' '.join(res)])
def empty_constructor(self):
"""Returns parameter-free constructor as a JavaMethod object"""
assert not self.is_typedef, "Typedefs don't have empty constructors"
constructor = self._constructor_template()
javadoc = ['Constructor for an empty ']
javadoc.append(self.n)
javadoc.append(' object.')
constructor.add_javadoc(''.join(javadoc))
return self.fix_imports(constructor)
def constructors(self):
"""Returns a list of JavaMethods representing constructors to include
in generated class of self.stmt
"""
assert self.gen is not self, 'Avoid infinite recursion'
return self.gen.constructors()
def cloners(self):
if self.is_typedef:
return [] # Typedefs, leafs and leaflists don't have clone methods
cloners = [JavaMethod(), JavaMethod()]
a = (' an exact ', ' a shallow ')
b = ('', ' Children are not included.')
c = ('', 'Shallow')
keys = ''
if self.is_list:
getter_calls = []
for key_stmt in self.gen.key_stmts:
getter_calls.append(''.join([util.camelize(key_stmt.arg), '.getValue().toString()']))
keys = ', '.join(getter_calls)
for i, cloner in enumerate(cloners):
cloner.add_javadoc('Clones this object, returning' + a[i] + 'copy.')
cloner.add_javadoc('@return A clone of the object.' + b[i])
cloner.return_type = self.n
cloner.set_name('clone' + c[i])
copy = ''.join(['new ', self.n, '(', keys, ')'])
if self.is_list and self.gen.is_config:
cloner.add_line(self.n + ' copy;')
cloner.add_line('try {')
cloner.add_line(' copy = ' + copy + ';')
cloner.add_line('} catch (JNCException e) {')
cloner.add_line(' copy = null;')
cloner.add_line('}')
copy = 'copy'
cloner.add_line(''.join(['return (', self.n, ')clone', c[i],
'Content(', copy, ');']))
cloner = self.fix_imports(cloner)
# Clone method without children
cloner_wo_chields = JavaMethod()
cloner_wo_chields.return_type = self.n
cloner_wo_chields.set_name('cloneWithoutChildren')
cloner_wo_chields.add_line(''.join(['return new ', self.n, '();']))
cloners.append(cloner_wo_chields)
return cloners
def key_names(self):
"""Returns a method that can be used to get the keys of a statement.
The keys are returned by the generated method as a String array
with the identifiers of the keys in the statement of this generator,
which should be a list, container or notification, otherwise None is
returned. If the statement does not have any keys, the generated method
returns null.
"""
if not (self.is_list or self.is_container or self.is_anyxml):
return None
method = JavaMethod(modifiers=['public'], name='keyNames')
method.set_return_type('String[]')
method.add_javadoc('@return An array with the identifiers of any key children')
if self.is_container or self.is_anyxml or not self.gen.is_config:
method.add_line('return null;')
else:
method.add_line('return new String[] {')
for key_stmt in self.gen.key_stmts:
method.add_line('"'.join([' ' * 4,
key_stmt.arg,
',']))
method.add_line('};')
return self.fix_imports(method)
def children_names(self):
"""Returns a method that can be used to get the identifiers of the
children of the statement of this generator, excluding any keys.
"""
if not (self.is_list or self.is_container or self.is_anyxml):
return None
if self.is_anyxml:
method = JavaMethod(modifiers=['public'], name='childrenNames')
method.set_return_type('String[]')
method.add_line('return super.childrenNames();')
else:
method = JavaMethod(modifiers=['public'], name='childrenNames')
method.set_return_type('String[]')
method.add_javadoc('@return An array with the identifiers of any children, in order.')
children = util.search(self.stmt, context.yangelement_stmts | context.leaf_stmts)
method.add_line('return new String[] {')
for child in children:
method.add_line('"'.join([' ' * 4, child.arg, ',']))
method.add_line('};')
return self.fix_imports(method)
def support_method(self, fields=None):
if self.is_typedef:
return None
add_child = JavaMethod(modifiers=['public'],
return_type='void',
name='addChild',
params=[('Element', 'child')])
add_child.add_javadoc('Support method for addChild.')
add_child.add_javadoc('Adds a child to this object.')
add_child.add_javadoc('')
add_child.add_javadoc('@param child The child to add')
add_child.add_line('super.addChild(child);')
if fields is None:
fields = OrderedSet()
cond = ''
for field in fields: # could do reversed(fields) to preserve order
add_child.add_line(''.join([cond, 'if (child instanceof ',
util.normalize(field), ') ', util.camelize(field), ' = (',
util.normalize(field), ')child;']))
add_child.add_dependency(util.normalize(field))
cond = 'else '
return self.fix_imports(add_child)
def setters(self):
"""Returns a list of JavaMethods representing setters to include
in generated class of self.stmt
"""
assert self.gen is not self, 'Avoid infinite recursion'
return self.gen.setters()
def unsetter(self):
"""Returns an 'unset<Identifier>Value' JavaMethod for self.stmt"""
assert self.gen is not self, 'Avoid infinite recursion'
return None
def checker(self):
"""Returns a 'check' JavaMethod for generated class for self.stmt"""
assert self.gen is not self, 'Avoid infinite recursion'
return self.gen.checker() if self.is_typedef else None
def markers(self):
"""Generates methods that enqueues operations to be performed."""
assert self.gen is not self, 'Avoid infinite recursion'
return None if self.is_typedef else self.gen.markers()
def child_field(self):
"""Returns a string representing java code for a field"""
assert self.gen is not self, 'Avoid infinite recursion'
return self.gen.child_field() if self.is_container or self.is_anyxml or self.is_leaf else None
def _parent_template(self, method_type):
"""Returns an access method for the statement of this method generator.
method_type -- prefix of method name
"""
method = JavaMethod()
if self.is_container or self.is_list or self.is_anyxml or self.is_anyxml:
method.set_return_type(self.n)
method.set_name(method_type + self.n)
method.add_exception('JNCException')
return self.fix_imports(method, child=True)
def adders(self):
"""Returns a list of methods that adds an instance of the class to be
generated from the statement of this method generator to its parent
class.
"""
if self.is_leaflist:
assert self.gen != self
return self.gen.adders()
elif not (self.is_container or self.is_list or self.is_anyxml or self.is_leaf):
return None
number_of_adders = 2
if self.is_list and self.gen.is_config:
number_of_adders = 4
res = [self._parent_template('add') for _ in range(number_of_adders)]
for i, method in enumerate(res):
javadoc1 = ['Adds ', self.stmt.keyword, ' entry "', self.n2, '"']
javadoc2 = []
if i == 0: # Add existing object
javadoc1.append(', using an existing object.')
javadoc2.append(' '.join(['@param', self.n2, 'The object to add.']))
method.add_parameter(self.n, self.n2)
elif self.is_list and i in {1, 2} and len(res) == 4:
# Add child with String or JNC type keys
javadoc1.append(', with specified keys.')
if i == 2:
javadoc2.append('The keys are specified as strings.')
for key_stmt in self.gen.key_stmts:
# print(key_stmt.arg)
key_arg = util.camelize(key_stmt.arg) + 'Value'
javadoc2.append(''.join(['@param ', key_arg,
' Key argument of child.']))
param_type, _ = util.get_types(key_stmt, self.ctx)
if i == 2:
param_type = 'String'
method.add_parameter(param_type, key_arg)
new_child = [self.n, ' ', self.n2, ' = new ', self.n, '(']
keys = [util.camelize(s.arg) + 'Value' for s in self.gen.key_stmts]
new_child.append(', '.join(keys))
new_child.append(');')
method.add_line(''.join(new_child))
else: # Create new, for subtree filter usage
javadoc1.append('.')
javadoc2.append('This method is used for creating subtree filters.')
method.add_line(' '.join([self.n, self.n2, '= new', self.n + '();']))
method.add_javadoc(''.join(javadoc1))
for javadoc in javadoc2:
method.add_javadoc(javadoc)
method.add_javadoc('@return The added child.')
if self.is_container:
method.add_line('this.' + self.n2 + ' = ' + self.n2 + ';')
if self.is_list and i in {1, 2} and len(res) == 4:
method.add_line('return ' + method.name + '(' + self.n2 + ');')
elif self.is_leaf:
method.set_return_type(self.n)
method.add_line('this.' + self.n2 + ' = ' + self.n2 + ';')
method.add_line('insertChild(' + self.n2 + ', childrenNames());')
method.add_line('return ' + self.n2 + ';')
else:
method.add_line('insertChild(' + self.n2 + ', childrenNames());')
method.add_line('return ' + self.n2 + ';')
self.fix_imports(method, child=True)
return res
def getters(self):
"""Returns a list of JavaMethods representing getters to include
in generated class of parent
"""
assert self.gen is not self, 'Avoid infinite recursion'
return self.gen.getters() if self.is_list or self.is_leaf or self.is_anyxml else None
def deleters(self):
"""Returns a list of JavaMethods representing deleters to include
in generated class of parent
"""
assert self.gen is not self, 'Avoid infinite recursion'
if not (self.is_list or self.is_container or self.is_leaflist or self.is_anyxml):
return None
else:
return self.gen.deleters()
def child_iterator(self):
"""Returns a java iterator method"""
if not (self.is_leaflist or self.is_list):
return None
res = JavaMethod(name=(self.n2 + 'Iterator'))
res.add_javadoc(''.join(['Iterator method for the ', self.stmt.keyword,
' "', self.stmt.arg, '".']))
res.add_javadoc(''.join(['@return An iterator for the ',
self.stmt.keyword, '.']))
return_stmt = ['return new Element']
if self.is_leaflist:
res.set_return_type('ElementLeafListValueIterator')
return_stmt.append('LeafListValue')
else: # List
res.set_return_type(''.join(['ElementChildrenIterator', '<', self.n, '>']))
return_stmt.append('Children')
return_stmt.extend(['Iterator(children, "', self.stmt.arg, '");'])
res.add_line(''.join(return_stmt))
return self.fix_imports(res)
def parent_access_methods(self):
assert self.gen is not self, 'Avoid infinite recursion'
if self.is_container or self.is_list or self.is_anyxml or self.is_leaf or self.is_leaflist:
return self.gen.parent_access_methods()
else:
return None
def leaf_value_access_methods(self):
assert self.gen is not self, 'This method must be overriden'
assert self.is_leaf or self.is_leaflist
return self.gen.leaf_value_access_methods()
def create_value_method(self, this_class_name, value_type):
assert self.is_leaf or self.is_leaflist
static_type_factory = JavaMethod(name='createValue')
dependency = static_type_factory.add_dependency(value_type, this_class_name)
param_name = 'stringRepresentation'
static_type_factory.add_parameter('String', param_name, this_class_name)
static_type_factory.add_exception('JNCException')
static_type_factory.set_return_type(dependency, this_class_name)
return_stmt = 'return '
if self.gen.type_str[0] == 'io.netconfessor.YangEmpty':
static_type_factory.add_parameter('String', param_name)
l = [return_stmt, 'new ', dependency, '();']
static_type_factory.add_line(''.join(l))
else:
line = [return_stmt, 'new ', dependency,
'(', param_name]
if self.gen.type_str[0] == 'io.netconfessor.YangUnion':
line.append(', new String[] {')
static_type_factory.add_line(''.join(line))
for type_stmt in util.search(self.gen.base_type, 'type'):
member_type, _ = util.get_types(type_stmt, self.gen.ctx)
static_type_factory.add_line(' "' + member_type + '",')
line = ['}']
elif self.gen.type_str[0] == 'io.netconfessor.YangEnumeration':
line.append(', new String[] {')
static_type_factory.add_line(''.join(line))
for enum in util.search(self.gen.base_type, 'enum'):
static_type_factory.add_line(' ' + util.capitalize(enum.arg) + ',')
line = ['}']
elif self.gen.type_str[0] == 'io.netconfessor.YangBits':
line.append(',')
static_type_factory.add_line(''.join(line))
mask = 0
smap = [' new String[] {']
imap = [' new int[] {']
position = 0
for bit in util.search(self.gen.base_type, 'bit'):
smap.extend(['"', bit.arg, '", '])
pos_stmt = util.search_one(bit, 'position')
if pos_stmt:
position = int(pos_stmt.arg)
imap.extend([str(position), ', '])
mask += 1 << position
position += 1
smap.append('},')
imap.append('}')
static_type_factory.add_line(''.join([' new BigInteger("',
str(mask), '"),']))
static_type_factory.add_line(''.join(smap))
static_type_factory.add_line(''.join(imap))
line = []
elif self.gen.type_str[0] == 'io.netconfessor.YangDecimal64':
frac_digits = util.search_one(self.gen.base_type, 'fraction-digits')
line.extend([', ', frac_digits.arg])
line.append(');')
static_type_factory.add_line(''.join(line))
self.fix_imports(static_type_factory, child=True)
return static_type_factory
def value_method(self, this_class_name, value_type):
assert self.is_leaf or self.is_leaflist
setter = JavaMethod(name='value')
setter.add_exception('JNCException')
param_name = self.n2 + 'Value'
setter.add_javadoc('Sets the value ' + self.stmt.keyword +
' "' + self.stmt.arg + '",')
setter.add_line(''.join(['setValue(', param_name, ');']))
setter.add_parameter(value_type, param_name, this_class_name)
setter.add_javadoc(' '.join(['@param', param_name, 'used during instantiation.']))
self.fix_imports(setter, child=True)
return setter
def get_value_method(self, this_class_name, value_type):
assert self.is_leaf or self.is_leaflist
getter = JavaMethod(name='value')
getter.set_return_type(value_type, this_class_name)
getter.add_line(' '.join(['return',
'(' + getter.add_dependency(value_type, this_class_name) + ')', 'getValue();']))
self.fix_imports(getter, child=True)
return getter
def get_value_class_method(self, this_class_name, value_type):
assert self.is_leaf or self.is_leaflist
getter = JavaMethod(name='valueClass')
getter.set_return_type_generic('Class', value_type, this_class_name)
getter.add_line(' '.join(['return', getter.add_dependency(value_type, this_class_name) + '.class;']))
self.fix_imports(getter, child=True)
return getter
def default_value_method(self, this_class_name, value_type):
assert self.is_leaf or self.is_leaflist
default_getter = JavaMethod(name='defaultValue')
default_getter.set_return_type(value_type, this_class_name)
default_getter.add_exception('JNCException')
# Leaves with a default value returns it instead of null
if self.gen.default:
new_value = ['new ', default_getter.return_type, '("',
self.gen.default_value]
if self.gen.type_str[0] == 'io.netconfessor.YangUnion':
new_value.append('", new String[] { \n')
for type_stmt in util.search(self.gen.base_type, 'type'):
member_type, _ = util.get_types(type_stmt, self.gen.ctx)
new_value.append(' ' * 16 + '"' + member_type + '",\n')
new_value.append(' ' * 12 + '})')
elif self.gen.type_str[0] == 'io.netconfessor.YangEnumeration':
new_value.append('", new String[] {\n')
for enum in util.search(self.gen.base_type, 'enum'):
new_value.append(' ' * 16 + '"' + enum.arg + '",\n')
new_value.append(' ' * 12 + '})')
elif self.gen.type_str[0] == 'io.netconfessor.YangBits':
new_value.append('",')
default_getter.add_line(''.join(new_value))
mask = 0
smap = [' new String[] {']
imap = [' new int[] {']
position = 0
for bit in util.search(self.gen.base_type, 'bit'):
smap.extend(['"', bit.arg, '", '])
pos_stmt = util.search_one(bit, 'position')
if pos_stmt:
position = int(pos_stmt.arg)
imap.extend([str(position), ', '])
mask += 1 << position
position += 1
smap.append('},')
imap.append('}')
line = [' ' * 8, 'new BigInteger("', str(mask), '"),']
default_getter.add_line(''.join(line))
default_getter.add_line(''.join(smap))
default_getter.add_line(''.join(imap))
new_value = [' )']
elif self.gen.type_str[0] == 'io.netconfessor.YangDecimal64':
fraction_digits = util.search_one(self.gen.base_type, 'fraction-digits')
new_value.extend(['", ', fraction_digits.arg, ')'])
else:
new_value.append('")')
default_getter.add_line(''.join(['return ', ''.join(new_value), ';']))
else:
default_getter.add_line('return null;')
self.fix_imports(default_getter, child=True)
return default_getter
def is_key_method(self):
assert self.is_leaf
default_getter_predicate = JavaMethod(name='isKey')
default_getter_predicate.set_return_type('boolean')
default_getter_predicate.add_line('return ' + ('true' if self.gen.is_key else 'false') + ";")
self.gen.fix_imports(default_getter_predicate, child=True)
return default_getter_predicate
def has_default_value_method(self):
assert self.is_leaf or self.is_leaflist
default_getter_predicate = JavaMethod(name='hasDefaultValue')
default_getter_predicate.set_return_type('boolean')
default_getter_predicate.add_line('return ' + ('true' if self.gen.default else 'false') + ";")
self.gen.fix_imports(default_getter_predicate, child=True)
return default_getter_predicate
def instantiate_value_method(self, this_class_name, value_type):
assert self.is_leaf or self.is_leaflist
type_factory_method = JavaMethod(name='instantiateValue')
param_name = 'stringRepresentation'
type_factory_method.add_parameter('String', param_name, this_class_name)
type_factory_method.add_exception('JNCException')
setter_name = 'value'
if self.gen.type_str[0] == 'io.netconfessor.YangEmpty':
type_factory_method.add_parameter('String', param_name)
l = [setter_name, '(new ', type_factory_method.add_dependency(value_type, this_class_name), '());']
type_factory_method.add_line(''.join(l))
else:
line = [setter_name, '(new ', type_factory_method.add_dependency(value_type, this_class_name),
'(', param_name]
if self.gen.type_str[0] == 'io.netconfessor.YangUnion':
line.append(', new String[] {')
type_factory_method.add_line(''.join(line))
for type_stmt in util.search(self.gen.base_type, 'type'):
member_type, _ = util.get_types(type_stmt, self.gen.ctx)
type_factory_method.add_line(' "' + member_type + '",')
line = ['}']
elif self.gen.type_str[0] == 'io.netconfessor.YangEnumeration':
line.append(', new String[] {')
type_factory_method.add_line(''.join(line))
for enum in util.search(self.gen.base_type, 'enum'):
type_factory_method.add_line(' "' + enum.arg + '",')
line = ['}']
elif self.gen.type_str[0] == 'io.netconfessor.YangBits':
line.append(',')
type_factory_method.add_line(''.join(line))
mask = 0
smap = [' new String[] {']
imap = [' new int[] {']
position = 0
for bit in util.search(self.gen.base_type, 'bit'):
smap.extend(['"', bit.arg, '", '])
pos_stmt = util.search_one(bit, 'position')
if pos_stmt:
position = int(pos_stmt.arg)
imap.extend([str(position), ', '])
mask += 1 << position
position += 1
smap.append('},')
imap.append('}')
type_factory_method.add_line(''.join([' new BigInteger("',
str(mask), '"),']))
type_factory_method.add_line(''.join(smap))
type_factory_method.add_line(''.join(imap))
line = []
elif self.gen.type_str[0] == 'io.netconfessor.YangDecimal64':
frac_digits = util.search_one(self.gen.base_type, 'fraction-digits')
line.extend([', ', frac_digits.arg])
line.append('));')
type_factory_method.add_line(''.join(line))
self.fix_imports(type_factory_method, child=True)
return type_factory_method
def enums(self):
return self.gen.enums()
def enum_consts(self):
return self.gen.enum_consts()
| 45.664223
| 114
| 0.568218
|
97b8de7b0b9b2435af45d70e232e8e137cbcbf47
| 1,289
|
py
|
Python
|
S202-mongo-crud/db/database.py
|
GuilhermeMarcondesPixel/labS202
|
671dd8ba88e25fdfc70eae517b305367ea8c0a50
|
[
"MIT"
] | null | null | null |
S202-mongo-crud/db/database.py
|
GuilhermeMarcondesPixel/labS202
|
671dd8ba88e25fdfc70eae517b305367ea8c0a50
|
[
"MIT"
] | null | null | null |
S202-mongo-crud/db/database.py
|
GuilhermeMarcondesPixel/labS202
|
671dd8ba88e25fdfc70eae517b305367ea8c0a50
|
[
"MIT"
] | null | null | null |
import pymongo
class Database:
def __init__(self, database, collection, dataset=None):
connectionString = "mongodb://localhost:27017/?readPreference=primary&appname=MongoDB%20Compass&directConnection=true&ssl=false"
self.clusterConnection = pymongo.MongoClient(
connectionString,
# CASO OCORRA O ERRO [SSL_INVALID_CERTIFICATE]
tlsAllowInvalidCertificates=True
)
self.db = self.clusterConnection[database]
self.collection = self.db[collection]
if dataset:
self.dataset = dataset
def resetDatabase(self):
self.db.drop_collection(self.collection)
self.collection.insert_many(self.dataset)
def create(self, nome, autor, ano, preco):
return self.collection.insert_one({"nome": nome, "autor": autor, "ano": ano, "preco": preco})
def read(self):
return self.collection.find({})
def update(self, nome, preco):
return self.collection.update_one(
{"nome": nome},
{
"$set": {"nome": nome},
"$set": {"preco": preco},
"$currentDate": {"lastModified": True}
}
)
def delete(self, nome):
return self.collection.delete_one({"nome": nome})
| 33.051282
| 136
| 0.605896
|
c0e69480886dfb96a16903c170a1062298287263
| 5,755
|
py
|
Python
|
server/app/outputs/dmxfixtures/gobo.py
|
BasementCat/audio-reactive-led-strip
|
acbfd3709ecf3f970c604045bb62da0b47661330
|
[
"MIT"
] | 1
|
2020-05-14T06:27:34.000Z
|
2020-05-14T06:27:34.000Z
|
server/app/outputs/dmxfixtures/gobo.py
|
BasementCat/audio-reactive-led-strip
|
acbfd3709ecf3f970c604045bb62da0b47661330
|
[
"MIT"
] | null | null | null |
server/app/outputs/dmxfixtures/gobo.py
|
BasementCat/audio-reactive-led-strip
|
acbfd3709ecf3f970c604045bb62da0b47661330
|
[
"MIT"
] | null | null | null |
import random
from app.effects import Effect
from app.lib.misc import map_to_range
from . import BasicDMX
from .movinghead import MovingHeadMixin
class GoboMixin:
INVERT = ['speed', 'strobe']
CLEAR_EFFECTS_ON_NEW_STATE = ['pan', 'tilt', 'speed', 'dim']
RESET_ON_NEW_STATE = ['speed', 'dim']
def _get_dead_coasting_effects(self):
color = self.map_color(None, 1, 0)
gobo = self.map_gobo(None, 1, 0)
return {
'color': Effect(color, color, 8),
'gobo': Effect(gobo, gobo, 8),
}
def _map_pan_tilt(self, function, trigger, value, threshold):
if value < threshold:
return
cur_value = self.auto_state[function]
distance = int(map_to_range(value, threshold) * (max(cur_value, 255 - cur_value)))
choices = [
min(cur_value + distance, 255),
max(cur_value - distance, 0),
]
return random.choice(choices)
def map_pan(self, trigger, value, threshold):
return self._map_pan_tilt('pan', trigger, value, threshold)
def map_tilt(self, trigger, value, threshold):
return self._map_pan_tilt('tilt', trigger, value, threshold)
def map_color(self, trigger, value, threshold):
if value >= threshold:
half_color = random.random() > 0.75
if half_color:
return random.randint(57, 127)
return random.randint(0, 56)
def map_gobo(self, trigger, value, threshold):
if value >= threshold:
dither = random.random() > 0.75
if dither:
return random.randint(64, 127)
return random.randint(0, 63)
def map_strobe(self, trigger, value, threshold):
# should set a high threshold for this
if value > threshold:
# TODO: might need a different value for some lights
if 'strobe' not in self.effects:
self.effects['strobe'] = Effect(255, 255, 1, 0)
class UKingGobo(GoboMixin, MovingHeadMixin, BasicDMX):
FUNCTIONS = {
'pan': 1,
'pan_fine': 2,
'tilt': 3,
'tilt_fine': 4,
'color': 5,
'gobo': 6,
'strobe': 7,
'dim': 8,
'speed': 9,
'mode': 10,
'dim_mode': 11,
}
INITIALIZE = {
'pan': 0,
'pan_fine': 0,
'tilt': 0,
'tilt_fine': 0,
'color': 0,
'gobo': 0,
'strobe': 0,
'dim': 255,
'speed': 255,
'mode': 0,
'dim_mode': 0
}
RATES = {
'pan': 0.75,
'tilt': 0.75,
'gobo': 0.25,
'color': 0.25,
'strobe': 10,
'dim': 0.125,
}
ENUMS = {
'color': {
'white': (0, 9),
'red': (10, 19),
'green': (20, 29),
'blue': (30, 39),
'yellow': (40, 49),
'orange': (50, 59),
'cyan': (60, 69),
'pink': (70, 79),
'pink_cyan': (80, 89),
'cyan_orange': (90, 99),
'orange_yellow': (100, 109),
'yellow_blue': (110, 119),
'blue_green': (120, 127),
},
'gobo': {
'none': (0, 7),
'broken_circle': (8, 15),
'burst': (16, 23),
'3_spot_circle': (24, 31),
'square_spots': (32, 39),
'droplets': (40, 47),
'swirl': (48, 55),
'stripes': (56, 63),
'dither_none': (64, 71),
'dither_broken_circle': (72, 79),
'dither_burst': (80, 87),
'dither_3_spot_circle': (88, 95),
'dither_square_spots': (96, 103),
'dither_droplets': (104, 111),
'dither_swirl': (112, 119),
'dither_stripes': (120, 127),
}
}
class UnnamedGobo(GoboMixin, MovingHeadMixin, BasicDMX):
FUNCTIONS = {
'pan': 1,
'pan_fine': 2,
'tilt': 3,
'tilt_fine': 4,
'color': 5,
'gobo': 6,
'strobe': 7,
'dim': 8,
'speed': 9,
'mode': 10,
'dim_mode': 11, # Actually reset, but changing the name fucks with linking
}
INITIALIZE = {
'pan': 0,
'pan_fine': 0,
'tilt': 0,
'tilt_fine': 0,
'color': 0,
'gobo': 0,
'strobe': 0,
'dim': 255,
'speed': 255,
'mode': 0,
'dim_mode': 0,
}
RATES = {
'pan': 0.75,
'tilt': 0.75,
'gobo': 0.25,
'color': 0.25,
'strobe': 10,
'dim': 0.125,
}
ENUMS = {
'color': {
'white': (0, 9),
'yellow': (10, 19),
'orange': (20, 29),
'cyan': (30, 39),
'blue': (40, 49),
'green': (50, 59),
'pink': (60, 69),
'red': (70, 79),
'pink_red': (80, 89),
'green_pink': (90, 99),
'blue_green': (100, 109),
'cyan_blue': (110, 119),
'orange_cyan': (120, 129),
'yellow_orange': (130, 139),
},
'gobo': {
'none': (0, 7),
'broken_circle': (8, 15),
'burst': (16, 23),
'3_spot_circle': (24, 31),
'square_spots': (32, 39),
'droplets': (40, 47),
'swirl': (48, 55),
'stripes': (56, 63),
'dither_none': (64, 71),
'dither_broken_circle': (72, 79),
'dither_burst': (80, 87),
'dither_3_spot_circle': (88, 95),
'dither_square_spots': (96, 103),
'dither_droplets': (104, 111),
'dither_swirl': (112, 119),
'dither_stripes': (120, 127),
}
}
| 27.274882
| 90
| 0.45404
|
1d30b7ed836531f3ae4ab21ecca843cff8f9edac
| 5,523
|
py
|
Python
|
configs/representation/archive/rnd_moco/rnd-moco_r18_rnd_video_2x8x1_50e_k400_rgb.py
|
happywu/mmaction2-CycleContrast
|
019734e471dffd1161b7a9c617ba862d2349a96c
|
[
"Apache-2.0"
] | null | null | null |
configs/representation/archive/rnd_moco/rnd-moco_r18_rnd_video_2x8x1_50e_k400_rgb.py
|
happywu/mmaction2-CycleContrast
|
019734e471dffd1161b7a9c617ba862d2349a96c
|
[
"Apache-2.0"
] | null | null | null |
configs/representation/archive/rnd_moco/rnd-moco_r18_rnd_video_2x8x1_50e_k400_rgb.py
|
happywu/mmaction2-CycleContrast
|
019734e471dffd1161b7a9c617ba862d2349a96c
|
[
"Apache-2.0"
] | null | null | null |
# model settings
temperature = 0.01
with_norm = True
query_dim = 128
model = dict(
type='RNDMoCoTracker',
queue_dim=query_dim,
img_queue_size=256 * 48,
patch_queue_size=256 * 144,
backbone=dict(
type='ResNet',
pretrained=None,
depth=18,
out_indices=(3, ),
strides=(1, 2, 1, 1),
norm_eval=False,
zero_init_residual=True),
cls_head=dict(
type='UVCHead',
loss_feat=None,
loss_aff=dict(
type='ConcentrateLoss',
win_len=8,
stride=8,
temperature=temperature,
with_norm=with_norm,
loss_weight=1.),
loss_bbox=dict(type='L1Loss', loss_weight=10.),
in_channels=512,
channels=128,
temperature=temperature,
with_norm=with_norm,
init_std=0.01,
track_type='center'),
img_head=dict(
type='MoCoHead',
loss_feat=dict(type='MultiPairNCE', loss_weight=1.),
in_channels=512,
channels=query_dim,
temperature=temperature,
with_norm=with_norm),
patch_head=dict(
type='MoCoHead',
loss_feat=dict(type='MultiPairNCE', loss_weight=1.),
in_channels=512,
channels=query_dim,
temperature=temperature,
with_norm=with_norm),
)
# model training and testing settings
train_cfg = dict(
patch_size=96,
diff_crop=False,
skip_cycle=True,
strong_aug=True,
center_ratio=0.,
shuffle_bn=True)
test_cfg = dict(
precede_frames=7,
topk=5,
temperature=temperature,
strides=(1, 2, 1, 1),
out_indices=(
2,
3,
),
neighbor_range=40,
with_norm=with_norm,
output_dir='eval_results')
# dataset settings
dataset_type = 'VideoDataset'
dataset_type_val = 'DavisDataset'
data_prefix = 'data/kinetics400/videos_train'
ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt'
data_prefix_val = 'data/davis/DAVIS/JPEGImages/480p'
anno_prefix_val = 'data/davis/DAVIS/Annotations/480p'
data_root_val = 'data/davis/DAVIS'
ann_file_val = 'data/davis/DAVIS/ImageSets/davis2017_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=2, frame_interval=8, num_clips=1),
dict(type='DuplicateFrames', times=2),
dict(type='DecordDecode'),
# dict(type='Resize', scale=(-1, 256)),
# dict(type='RandomResizedCrop'),
dict(type='Resize', scale=(256, 256), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='PhotoMetricDistortion', p=0.8),
dict(type='RandomGrayScale', p=0.2),
dict(type='RandomGaussianBlur', p=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='SequentialSampleFrames', frame_interval=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 480), keep_ratio=True),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(
type='Collect',
keys=['imgs', 'ref_seg_map'],
meta_keys=('frame_dir', 'frame_inds', 'original_shape', 'seg_map')),
dict(type='ToTensor', keys=['imgs', 'ref_seg_map'])
]
data = dict(
videos_per_gpu=48,
workers_per_gpu=4,
val_workers_per_gpu=1,
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_prefix,
pipeline=train_pipeline),
val=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True),
test=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True))
# optimizer
optimizer = dict(type='Adam', lr=1e-4)
# optimizer = dict(type='SGD', lr=1e-1)
optimizer_config = dict(grad_clip=None)
# learning policy
# lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
lr_config = dict(policy='Fixed')
# lr_config = dict(
# policy='step',
# warmup='linear',
# warmup_iters=100,
# warmup_ratio=0.001,
# step=[1, 2])
total_epochs = 50
checkpoint_config = dict(interval=1)
evaluation = dict(
interval=1,
metrics='davis',
key_indicator='feat_1.J&F-Mean',
rule='greater')
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
dict(
type='WandbLoggerHook',
init_kwargs=dict(
project='mmaction2',
name='{{fileBasenameNoExtension}}',
resume=True,
tags=['rnd-moco'],
dir='wandb/{{fileBasenameNoExtension}}',
config=dict(
model=model,
train_cfg=train_cfg,
test_cfg=test_cfg,
data=data))),
])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = False
| 30.346154
| 78
| 0.626833
|
86fa28aef5eee3258dec4af4a85c12300ec31810
| 9,003
|
py
|
Python
|
src/mechaclient.py
|
diraven/pipsqueak3
|
5b6ea19dbc22944beaccf41e5f2589bf31fa8bb5
|
[
"BSD-3-Clause"
] | null | null | null |
src/mechaclient.py
|
diraven/pipsqueak3
|
5b6ea19dbc22944beaccf41e5f2589bf31fa8bb5
|
[
"BSD-3-Clause"
] | null | null | null |
src/mechaclient.py
|
diraven/pipsqueak3
|
5b6ea19dbc22944beaccf41e5f2589bf31fa8bb5
|
[
"BSD-3-Clause"
] | null | null | null |
"""
mechaclient.py - Pydle Client component for mechasqueak3
Copyright (c) 2018 The Fuel Rat Mischief,
All rights reserved.
Licensed under the BSD 3-Clause License.
See LICENSE.md
This module is built on top of the Pydle system.
"""
import functools
from typing import Optional
from loguru import logger
from uuid import uuid4
from pydle import Client
from .config.datamodel import ConfigRoot
from .packages.board import RatBoard
from .packages.commands import trigger
from .packages.fuelrats_api.v3.interface import ApiV300WSS
from .packages.permissions import require_permission, TECHRAT
from .packages.context.context import Context
from .packages.fact_manager.fact_manager import FactManager
from .packages.galaxy import Galaxy
from .packages.graceful_errors import graceful_errors
from .packages.utils import sanitize
from .features.message_history import MessageHistoryClient
from typing import Dict
from datetime import datetime, timezone
import prometheus_client
from prometheus_async.aio import time as aio_time
ON_MESSAGE_TIME = prometheus_client.Histogram(
name="on_message",
namespace="client",
documentation="time in on_message",
unit="seconds"
)
TRACKED_MESSAGES = prometheus_client.Gauge(
namespace="client",
name="tracked_messages",
documentation="number of last messages tracked"
)
IGNORED_MESSAGES = prometheus_client.Counter(
name="ignored_messages",
namespace="client",
documentation="messages ignored by the client."
)
ERRORS = prometheus_client.Counter(
name="errors",
namespace="client",
documentation="errors detected during message handling"
)
@require_permission(TECHRAT)
async def _on_invite(ctx: Context):
await ctx.bot.join(ctx.channel)
class MechaClient(Client, MessageHistoryClient):
"""
MechaSqueak v3_tests
"""
__version__ = "3.0a"
def __init__(self, *args, mecha_config: ConfigRoot, **kwargs):
"""
Custom mechasqueak constructor
Unused arguments are passed through to pydle's constructor
Args:
*args (list): arguments
**kwargs (list): keyword arguments
"""
self._api_handler: Optional[ApiV300WSS] = None
self._fact_manager = None # Instantiate Global Fact Manager
self._last_user_message: Dict[str, str] = {} # Holds last message from user, by irc nick
self._rat_cache = None # TODO: replace with ratcache once it exists
self._rat_board = None # Instantiate Rat Board
self._config = mecha_config
self._galaxy = None
self._start_time = datetime.now(tz=timezone.utc)
self._on_invite = require_permission(TECHRAT)(functools.partial(self._on_invite))
TRACKED_MESSAGES.set_function(lambda: len(self._last_user_message))
super().__init__(*args, **kwargs)
async def on_connect(self):
"""
Called upon connection to the IRC server
:return:
"""
logger.debug(f"Connecting to channels...")
# join a channel
for channel in self._config.irc.channels:
logger.debug(f"Configured channel {channel}")
await self.join(channel)
logger.debug("joined channels.")
# call the super
await super().on_connect()
#
# def on_join(self, channel, user):
# super().on_join(channel, user)
async def on_invite(self, channel, by):
logger.info(f"invited to channel {channel!r} by user {by!r}")
# create context from message, tie the channel to the sender
# (this ensures access-denys get sent to the right place)
ctx = await Context.from_message(self, sender=by, channel=by, message=channel)
logger.debug("invited to channel, context is {}", ctx)
return await self._on_invite(ctx)
async def _on_invite(self, ctx):
await self.join(ctx.words[0])
@aio_time(ON_MESSAGE_TIME)
async def on_message(self, channel, user, message: str):
"""
Triggered when a message is received
:param channel: Channel the message arrived in
:param user: user that triggered the message
:param message: message body
:return:
"""
await super().on_message(channel, user, message)
logger.debug(f"{channel}: <{user}> {message}")
if user == self._config.irc.nickname:
# don't do this and the bot can get int o an infinite
# self-stimulated positive feedback loop.
logger.debug(f"Ignored {message} (anti-loop)")
IGNORED_MESSAGES.inc()
return None
# await command execution
# sanitize input string headed to command executor
sanitized_message = sanitize(message)
logger.debug(f"Sanitized {sanitized_message}, Original: {message}")
try:
self._last_user_message[user.casefold()] = sanitized_message # Store sanitized message
ctx = await Context.from_message(
self,
channel=channel,
sender=user,
message=sanitized_message
)
if not ctx.words:
logger.trace("ignoring empty message")
IGNORED_MESSAGES.inc()
return
await trigger(ctx)
# Disable pylint's complaint here, as a broad catch is exactly what we want.
except Exception as ex: # pylint: disable=broad-except
ERRORS.inc()
ex_uuid = uuid4()
logger.exception(ex_uuid)
error_message = graceful_errors.make_graceful(ex, ex_uuid)
# and report it to the user
await self.message(channel, error_message)
# Vhost Handler
async def on_raw_396(self, message):
"""
Handle an IRC 396 message. This message is sent upon successful application of a host mask
via usermode +x.
"""
logger.info(f"{message.params[0]}@{message.params[1]} {message.params[2]}.")
@property
def rat_cache(self) -> object:
"""
Rat Cache
"""
return self._rat_cache
@property
def fact_manager(self) -> FactManager:
"""
Fact Manager
This is initialized in a lazy way to increase overall startup speed.
"""
if not self._fact_manager:
# Instantiate Global Fact Manager
self._fact_manager = FactManager()
return self._fact_manager
@fact_manager.setter
def fact_manager(self, manager: FactManager):
"""
Fact Manager Setter.
"""
if not isinstance(manager, FactManager):
raise TypeError("fact_manager requires a FactManager.")
logger.warning("Fact manager setter invoked!")
self._fact_manager = manager
@fact_manager.deleter
def fact_manager(self):
"""
Fact Manager Deleter
"""
logger.warning("Fact Manager deleter invoked!")
del self._fact_manager
self._fact_manager = None
@property
def api_handler(self) -> ApiV300WSS:
"""
API Handler property
"""
if self._api_handler is None:
self._api_handler = ApiV300WSS(config=self._config.api)
self.board.api_handler = self._api_handler
return self._api_handler
@property
def board(self) -> RatBoard:
"""
Rat Board property
"""
if self._rat_board is None:
self._rat_board = RatBoard(
api_handler=self._api_handler if self._api_handler else None
) # Create Rat Board Object
return self._rat_board
@board.setter
def board(self, value):
"""
Rat Board Setter
"""
if not isinstance(value, RatBoard):
raise TypeError("board property must be of type RatBoard.")
logger.warning("Board Setter invoked!")
self._rat_board = value
@board.deleter
def board(self):
"""
Rat Board Deleter
"""
logger.warning("Board Deleted!")
del self._rat_board
self._rat_board = None
@property
def galaxy(self) -> Galaxy:
"""
Galaxy property
"""
if not self._galaxy:
self._galaxy = Galaxy()
return self._galaxy
@galaxy.setter
def galaxy(self, value):
"""
Galaxy setter
"""
if not isinstance(value, Galaxy):
raise TypeError("galaxy property must be of type Galaxy.")
logger.warning("Galaxy Setter invoked!")
self._galaxy = value
@galaxy.deleter
def galaxy(self):
"""
Galaxy deleter
"""
logger.warning("Galaxy deleted!")
del self._galaxy
self._galaxy = None
@property
def last_user_message(self) -> Dict[str, str]:
return self._last_user_message
@property
def start_time(self) -> datetime:
return self._start_time
| 29.910299
| 99
| 0.632789
|
1ee340ba9d8036fb68a02e137f776b96ac607bb1
| 14,586
|
py
|
Python
|
test/integration/test_auth.py
|
ubragg/endpoints-management-python
|
119c1f1d9a79217194beb0685a5fd11008695c19
|
[
"Apache-2.0"
] | null | null | null |
test/integration/test_auth.py
|
ubragg/endpoints-management-python
|
119c1f1d9a79217194beb0685a5fd11008695c19
|
[
"Apache-2.0"
] | null | null | null |
test/integration/test_auth.py
|
ubragg/endpoints-management-python
|
119c1f1d9a79217194beb0685a5fd11008695c19
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from future import standard_library
standard_library.install_aliases()
from builtins import object
import copy
import flask
import mock
import os
import ssl
import threading
import time
import unittest
from Crypto import PublicKey
from jwkest import ecc
from jwkest import jwk
from test import token_utils
from endpoints_management import auth
from endpoints_management.auth import suppliers
from endpoints_management.auth import tokens
class IntegrationTest(unittest.TestCase):
_CURRENT_TIME = int(time.time())
_PORT = 8080
_ISSUER = u"https://localhost:%d" % _PORT
_PROVIDER_ID = u"localhost"
_INVALID_X509_PATH = u"invalid-x509"
_JWKS_PATH = u"jwks"
_SERVICE_NAME = u"service@name.com"
_X509_PATH = u"x509"
_JWT_CLAIMS = {
u"aud": [u"https://aud1.local.host", u"https://aud2.local.host"],
u"exp": _CURRENT_TIME + 60,
u"email": u"user@local.host",
u"iss": _ISSUER,
u"sub": u"subject-id"}
_ec_jwk = jwk.ECKey(use=u"sig").load_key(ecc.P256)
_rsa_key = jwk.RSAKey(use=u"sig").load_key(PublicKey.RSA.generate(1024))
_ec_jwk.kid = u"ec-key-id"
_rsa_key.kid = u"rsa-key-id"
_mock_timer = mock.MagicMock()
_jwks = jwk.KEYS()
_jwks._keys.append(_ec_jwk)
_jwks._keys.append(_rsa_key)
_AUTH_TOKEN = token_utils.generate_auth_token(_JWT_CLAIMS, _jwks._keys,
alg=u"RS256", kid=_rsa_key.kid)
@classmethod
def setUpClass(cls):
dirname = os.path.dirname(os.path.realpath(__file__))
cls._cert_file = os.path.join(dirname, u"ssl.cert")
cls._key_file = os.path.join(dirname, u"ssl.key")
os.environ[u"REQUESTS_CA_BUNDLE"] = cls._cert_file
rest_server = cls._RestServer()
rest_server.start()
def setUp(self):
self._provider_ids = {}
self._configs = {}
self._authenticator = auth.create_authenticator(self._provider_ids,
self._configs)
self._auth_info = mock.MagicMock()
self._auth_info.is_provider_allowed.return_value = True
self._auth_info.get_allowed_audiences.return_value = [
u"https://aud1.local.host"]
def test_verify_auth_token_with_jwks(self):
url = get_url(IntegrationTest._JWKS_PATH)
self._provider_ids[self._ISSUER] = self._PROVIDER_ID
self._configs[IntegrationTest._ISSUER] = suppliers.IssuerUriConfig(False,
url)
user_info = self._authenticator.authenticate(IntegrationTest._AUTH_TOKEN,
self._auth_info,
IntegrationTest._SERVICE_NAME)
self._assert_user_info_equals(tokens.UserInfo(IntegrationTest._JWT_CLAIMS),
user_info)
def test_authenticate_auth_token_with_bad_signature(self):
new_rsa_key = jwk.RSAKey(use=u"sig").load_key(PublicKey.RSA.generate(2048))
kid = IntegrationTest._rsa_key.kid
new_rsa_key.kid = kid
new_jwks = jwk.KEYS()
new_jwks._keys.append(new_rsa_key)
auth_token = token_utils.generate_auth_token(IntegrationTest._JWT_CLAIMS,
new_jwks._keys, alg=u"RS256",
kid=kid)
url = get_url(IntegrationTest._JWKS_PATH)
self._provider_ids[self._ISSUER] = self._PROVIDER_ID
self._configs[IntegrationTest._ISSUER] = suppliers.IssuerUriConfig(False,
url)
message = u"Signature verification failed"
with self.assertRaisesRegexp(suppliers.UnauthenticatedException, message):
self._authenticator.authenticate(auth_token, self._auth_info,
IntegrationTest._SERVICE_NAME)
def test_verify_auth_token_with_x509(self):
url = get_url(IntegrationTest._X509_PATH)
self._provider_ids[self._ISSUER] = self._PROVIDER_ID
self._configs[IntegrationTest._ISSUER] = suppliers.IssuerUriConfig(False,
url)
user_info = self._authenticator.authenticate(IntegrationTest._AUTH_TOKEN,
self._auth_info,
IntegrationTest._SERVICE_NAME)
self._assert_user_info_equals(tokens.UserInfo(IntegrationTest._JWT_CLAIMS),
user_info)
def test_verify_auth_token_with_invalid_x509(self):
url = get_url(IntegrationTest._INVALID_X509_PATH)
self._provider_ids[self._ISSUER] = self._PROVIDER_ID
self._configs[IntegrationTest._ISSUER] = suppliers.IssuerUriConfig(False,
url)
message = u"Cannot load X.509 certificate"
with self.assertRaisesRegexp(suppliers.UnauthenticatedException, message):
self._authenticator.authenticate(IntegrationTest._AUTH_TOKEN,
self._auth_info,
IntegrationTest._SERVICE_NAME)
def test_openid_discovery(self):
self._provider_ids[self._ISSUER] = self._PROVIDER_ID
self._configs[IntegrationTest._ISSUER] = suppliers.IssuerUriConfig(True,
None)
user_info = self._authenticator.authenticate(IntegrationTest._AUTH_TOKEN,
self._auth_info,
IntegrationTest._SERVICE_NAME)
self._assert_user_info_equals(tokens.UserInfo(IntegrationTest._JWT_CLAIMS),
user_info)
def test_openid_discovery_failed(self):
self._provider_ids[self._ISSUER] = self._PROVIDER_ID
self._configs[IntegrationTest._ISSUER] = suppliers.IssuerUriConfig(False,
None)
message = (u"Cannot find the `jwks_uri` for issuer %s" %
IntegrationTest._ISSUER)
with self.assertRaisesRegexp(suppliers.UnauthenticatedException, message):
self._authenticator.authenticate(IntegrationTest._AUTH_TOKEN,
self._auth_info,
IntegrationTest._SERVICE_NAME)
def test_authenticate_with_malformed_auth_code(self):
with self.assertRaisesRegexp(suppliers.UnauthenticatedException,
u"Cannot decode the auth token"):
self._authenticator.authenticate(u"invalid-auth-code", self._auth_info,
IntegrationTest._SERVICE_NAME)
def test_authenticate_with_disallowed_issuer(self):
url = get_url(IntegrationTest._JWKS_PATH)
self._configs[IntegrationTest._ISSUER] = suppliers.IssuerUriConfig(False,
url)
message = u"Unknown issuer: " + self._ISSUER
with self.assertRaisesRegexp(suppliers.UnauthenticatedException, message):
self._authenticator.authenticate(IntegrationTest._AUTH_TOKEN,
self._auth_info,
IntegrationTest._SERVICE_NAME)
def test_authenticate_with_unknown_issuer(self):
message = (u"Cannot find the `jwks_uri` for issuer %s: "
u"either the issuer is unknown") % IntegrationTest._ISSUER
with self.assertRaisesRegexp(suppliers.UnauthenticatedException, message):
self._authenticator.authenticate(IntegrationTest._AUTH_TOKEN,
self._auth_info,
IntegrationTest._SERVICE_NAME)
def test_authenticate_with_invalid_audience(self):
url = get_url(IntegrationTest._JWKS_PATH)
self._provider_ids[self._ISSUER] = self._PROVIDER_ID
self._configs[IntegrationTest._ISSUER] = suppliers.IssuerUriConfig(False,
url)
self._auth_info.get_allowed_audiences.return_value = []
with self.assertRaisesRegexp(suppliers.UnauthenticatedException,
u"Audiences not allowed"):
self._authenticator.authenticate(IntegrationTest._AUTH_TOKEN,
self._auth_info,
IntegrationTest._SERVICE_NAME)
@mock.patch(u"time.time", _mock_timer)
def test_authenticate_with_expired_auth_token(self):
url = get_url(IntegrationTest._JWKS_PATH)
self._provider_ids[self._ISSUER] = self._PROVIDER_ID
self._configs[IntegrationTest._ISSUER] = suppliers.IssuerUriConfig(False,
url)
IntegrationTest._mock_timer.return_value = 0
# Create an auth token that expires in 10 seconds.
jwt_claims = copy.deepcopy(IntegrationTest._JWT_CLAIMS)
jwt_claims[u"exp"] = time.time() + 10
auth_token = token_utils.generate_auth_token(jwt_claims,
IntegrationTest._jwks._keys,
alg=u"RS256",
kid=IntegrationTest._rsa_key.kid)
# Verify that the auth token can be authenticated successfully.
self._authenticator.authenticate(IntegrationTest._AUTH_TOKEN,
self._auth_info,
IntegrationTest._SERVICE_NAME)
# Advance the timer by 20 seconds and make sure the token is expired.
IntegrationTest._mock_timer.return_value += 20
message = u"The auth token has already expired"
with self.assertRaisesRegexp(suppliers.UnauthenticatedException, message):
self._authenticator.authenticate(auth_token, self._auth_info,
IntegrationTest._SERVICE_NAME)
def test_invalid_openid_discovery_url(self):
issuer = u"https://invalid.issuer"
self._provider_ids[self._ISSUER] = self._PROVIDER_ID
self._configs[issuer] = suppliers.IssuerUriConfig(True, None)
jwt_claims = copy.deepcopy(IntegrationTest._JWT_CLAIMS)
jwt_claims[u"iss"] = issuer
auth_token = token_utils.generate_auth_token(jwt_claims,
IntegrationTest._jwks._keys,
alg=u"RS256",
kid=IntegrationTest._rsa_key.kid)
message = u"Cannot discover the jwks uri"
with self.assertRaisesRegexp(suppliers.UnauthenticatedException, message):
self._authenticator.authenticate(auth_token, self._auth_info,
IntegrationTest._SERVICE_NAME)
def test_invalid_jwks_uri(self):
url = u"https://invalid.jwks.uri"
self._provider_ids[self._ISSUER] = self._PROVIDER_ID
self._configs[IntegrationTest._ISSUER] = suppliers.IssuerUriConfig(False,
url)
message = u"Cannot retrieve valid verification keys from the `jwks_uri`"
with self.assertRaisesRegexp(suppliers.UnauthenticatedException, message):
self._authenticator.authenticate(IntegrationTest._AUTH_TOKEN,
self._auth_info,
IntegrationTest._SERVICE_NAME)
def _assert_user_info_equals(self, expected, actual):
self.assertEqual(expected.audiences, actual.audiences)
self.assertEqual(expected.email, actual.email)
self.assertEqual(expected.subject_id, actual.subject_id)
self.assertEqual(expected.issuer, actual.issuer)
class _RestServer(object):
def __init__(self):
app = flask.Flask(u"integration-test-server")
@app.route(u"/" + IntegrationTest._JWKS_PATH)
def get_json_web_key_set(): # pylint: disable=unused-variable
return IntegrationTest._jwks.dump_jwks()
@app.route(u"/" + IntegrationTest._X509_PATH)
def get_x509_certificates(): # pylint: disable=unused-variable
cert = IntegrationTest._rsa_key.key.publickey().exportKey(u"PEM")
return flask.jsonify({IntegrationTest._rsa_key.kid: cert.decode('ascii')})
@app.route(u"/" + IntegrationTest._INVALID_X509_PATH)
def get_invalid_x509_certificates(): # pylint: disable=unused-variable
return flask.jsonify({IntegrationTest._rsa_key.kid: u"invalid cert"})
@app.route(u"/.well-known/openid-configuration")
def get_openid_configuration(): # pylint: disable=unused-variable
return flask.jsonify({u"jwks_uri": get_url(IntegrationTest._JWKS_PATH)})
self._application = app
def start(self):
def run_app():
ssl_context = (IntegrationTest._cert_file, IntegrationTest._key_file)
self._application.run(port=IntegrationTest._PORT,
ssl_context=ssl_context)
thread = threading.Thread(target=run_app, args=())
thread.daemon = True
thread.start()
def get_url(path):
return u"https://localhost:%d/%s" % (IntegrationTest._PORT, path)
| 48.782609
| 90
| 0.595503
|
0ea0591ca1cf44e1097cc1c75d2be384875f82e6
| 2,203
|
py
|
Python
|
database/migrations/0046_auto_20210128_1632.py
|
ORC-RIS/beiwe-backend
|
af2c43f79350bf0fc1ce8efafab1ac9c40008c40
|
[
"BSD-3-Clause"
] | 1
|
2022-03-12T16:25:50.000Z
|
2022-03-12T16:25:50.000Z
|
database/migrations/0046_auto_20210128_1632.py
|
ORC-RIS/beiwe-backend
|
af2c43f79350bf0fc1ce8efafab1ac9c40008c40
|
[
"BSD-3-Clause"
] | null | null | null |
database/migrations/0046_auto_20210128_1632.py
|
ORC-RIS/beiwe-backend
|
af2c43f79350bf0fc1ce8efafab1ac9c40008c40
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 2.2.14 on 2021-01-28 16:32
import datetime
import django.core.validators
from django.db import migrations, models
# due to complex timezone bugs we are simply deleting all absolute schedules from the system
# in this migration. There were no live deployments other than onnela lab's staging deployment
# at time of writing.
def purge_absolute_schedules(apps, schema_editor):
AbsoluteSchedule = apps.get_model('database', 'AbsoluteSchedule')
AbsoluteSchedule.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('database', '0045_auto_20210121_0301'),
]
operations = [
migrations.RemoveField(
model_name='participant',
name='timezone',
),
migrations.RemoveField(
model_name='study',
name='timezone',
),
migrations.AddField(
model_name='absoluteschedule',
name='date',
field=models.DateField(default=datetime.date(1900, 1, 1)),
preserve_default=False,
),
migrations.AddField(
model_name='absoluteschedule',
name='hour',
field=models.PositiveIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(23)]),
preserve_default=False,
),
migrations.AddField(
model_name='absoluteschedule',
name='minute',
field=models.PositiveIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(59)]),
preserve_default=False,
),
migrations.AddField(
model_name='participant',
name='timezone_name',
field=models.CharField(default='America/New_York', max_length=256),
),
migrations.AddField(
model_name='study',
name='timezone_name',
field=models.CharField(default='America/New_York', max_length=256),
),
migrations.RemoveField(
model_name='absoluteschedule',
name='scheduled_date',
),
migrations.RunPython(purge_absolute_schedules, migrations.RunPython.noop),
]
| 33.378788
| 116
| 0.62778
|
e97cf2ac95834eeb6ee61bd6e0c7d0dbabb63916
| 1,271
|
py
|
Python
|
ffeatools/FFEA_initialise/Surface_tools/scale_netgen_surf.py
|
zzalscv2/FFEA
|
da8a09dadb1b3978a3d230dc79d9b163d7889242
|
[
"Apache-2.0"
] | null | null | null |
ffeatools/FFEA_initialise/Surface_tools/scale_netgen_surf.py
|
zzalscv2/FFEA
|
da8a09dadb1b3978a3d230dc79d9b163d7889242
|
[
"Apache-2.0"
] | null | null | null |
ffeatools/FFEA_initialise/Surface_tools/scale_netgen_surf.py
|
zzalscv2/FFEA
|
da8a09dadb1b3978a3d230dc79d9b163d7889242
|
[
"Apache-2.0"
] | 1
|
2021-04-03T16:08:21.000Z
|
2021-04-03T16:08:21.000Z
|
#
# This file is part of the FFEA simulation package
#
# Copyright (c) by the Theory and Development FFEA teams,
# as they appear in the README.md file.
#
# FFEA is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FFEA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FFEA. If not, see <http://www.gnu.org/licenses/>.
#
# To help us fund FFEA development, we humbly ask that you cite
# the research papers on the package.
#
import sys
import FFEA_surf
if len(sys.argv) != 4:
sys.exit("Usage: python scale_netgen_surf.py [INPUT .surf] [OUTPUT .surf] [scale]")
# Get args
insurffname = sys.argv[1]
outsurffname = sys.argv[2]
scale = float(sys.argv[3])
# Make and scale a surf
surf = FFEA_surf.FFEA_surf(insurffname)
surf.scale(scale)
# Write to out_fname
surf.write_to_netgen_surf(outsurffname)
| 30.261905
| 84
| 0.734854
|
e23e61d59187713e4edbf49c19e7236cc0bb6827
| 5,500
|
py
|
Python
|
test/functional/disconnect_ban.py
|
xraymemory/manna
|
b2f118bdce9b6a128ef171798ab3fac483517afa
|
[
"MIT"
] | null | null | null |
test/functional/disconnect_ban.py
|
xraymemory/manna
|
b2f118bdce9b6a128ef171798ab3fac483517afa
|
[
"MIT"
] | null | null | null |
test/functional/disconnect_ban.py
|
xraymemory/manna
|
b2f118bdce9b6a128ef171798ab3fac483517afa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The manna Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node disconnect and ban behavior"""
import time
from test_framework.mininode import wait_until
from test_framework.test_framework import mannaTestFramework
from test_framework.util import (assert_equal,
assert_raises_jsonrpc,
connect_nodes_bi)
class DisconnectBanTest(mannaTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
def run_test(self):
self.log.info("Test setban and listbanned RPCs")
self.log.info("setban: successfully ban single IP address")
assert_equal(len(self.nodes[1].getpeerinfo()), 2) # node1 should have 2 connections to node0 at this point
self.nodes[1].setban("127.0.0.1", "add")
assert wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10)
assert_equal(len(self.nodes[1].getpeerinfo()), 0) # all nodes must be disconnected at this point
assert_equal(len(self.nodes[1].listbanned()), 1)
self.log.info("clearbanned: successfully clear ban list")
self.nodes[1].clearbanned()
assert_equal(len(self.nodes[1].listbanned()), 0)
self.nodes[1].setban("127.0.0.0/24", "add")
self.log.info("setban: fail to ban an already banned subnet")
assert_equal(len(self.nodes[1].listbanned()), 1)
assert_raises_jsonrpc(-23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add")
self.log.info("setban: fail to ban an invalid subnet")
assert_raises_jsonrpc(-30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add")
assert_equal(len(self.nodes[1].listbanned()), 1) # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
self.log.info("setban remove: fail to unban a non-banned subnet")
assert_raises_jsonrpc(-30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove")
assert_equal(len(self.nodes[1].listbanned()), 1)
self.log.info("setban remove: successfully unban subnet")
self.nodes[1].setban("127.0.0.0/24", "remove")
assert_equal(len(self.nodes[1].listbanned()), 0)
self.nodes[1].clearbanned()
assert_equal(len(self.nodes[1].listbanned()), 0)
self.log.info("setban: test persistence across node restart")
self.nodes[1].setban("127.0.0.0/32", "add")
self.nodes[1].setban("127.0.0.0/24", "add")
# Set the mocktime so we can control when bans expire
old_time = int(time.time())
self.nodes[1].setmocktime(old_time)
self.nodes[1].setban("192.168.0.1", "add", 1) # ban for 1 seconds
self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) # ban for 1000 seconds
listBeforeShutdown = self.nodes[1].listbanned()
assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address'])
# Move time forward by 3 seconds so the third ban has expired
self.nodes[1].setmocktime(old_time + 3)
assert_equal(len(self.nodes[1].listbanned()), 3)
self.stop_node(1)
self.nodes[1] = self.start_node(1, self.options.tmpdir)
listAfterShutdown = self.nodes[1].listbanned()
assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
assert_equal("/19" in listAfterShutdown[2]['address'], True)
# Clear ban lists
self.nodes[1].clearbanned()
connect_nodes_bi(self.nodes, 0, 1)
self.log.info("Test disconnectnode RPCs")
self.log.info("disconnectnode: fail to disconnect when calling with address and nodeid")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
node1 = self.nodes[0].getpeerinfo()[0]['addr']
assert_raises_jsonrpc(-32602, "Only one of address and nodeid should be provided.", self.nodes[0].disconnectnode, address=address1, nodeid=node1)
self.log.info("disconnectnode: fail to disconnect when calling with junk address")
assert_raises_jsonrpc(-29, "Node not found in connected nodes", self.nodes[0].disconnectnode, address="221B Baker Street")
self.log.info("disconnectnode: successfully disconnect node by address")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
self.nodes[0].disconnectnode(address=address1)
assert wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
self.log.info("disconnectnode: successfully reconnect node")
connect_nodes_bi(self.nodes, 0, 1) # reconnect the node
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
assert [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
self.log.info("disconnectnode: successfully disconnect node by node id")
id1 = self.nodes[0].getpeerinfo()[0]['id']
self.nodes[0].disconnectnode(nodeid=id1)
assert wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['id'] == id1]
if __name__ == '__main__':
DisconnectBanTest().main()
| 50.458716
| 153
| 0.659455
|
c2742cc2a0e91afa9e8ad2a374a6dc121aa1bd9c
| 12,322
|
py
|
Python
|
python/mojo_bindings/messaging.py
|
ttyangf/mojo
|
ca344f878ae23db0289644d78d58aa4b77108e08
|
[
"BSD-3-Clause"
] | 1
|
2020-04-28T14:35:10.000Z
|
2020-04-28T14:35:10.000Z
|
mojo/public/python/mojo_bindings/messaging.py
|
TribeMedia/sky_engine
|
4a3894ed246327931b198a7d64652bd0b615b036
|
[
"BSD-3-Clause"
] | null | null | null |
mojo/public/python/mojo_bindings/messaging.py
|
TribeMedia/sky_engine
|
4a3894ed246327931b198a7d64652bd0b615b036
|
[
"BSD-3-Clause"
] | 1
|
2020-04-28T14:35:11.000Z
|
2020-04-28T14:35:11.000Z
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility classes to handle sending and receiving messages."""
import struct
import sys
import weakref
import mojo_bindings.serialization as serialization
# pylint: disable=E0611,F0401
import mojo_system as system
# The flag values for a message header.
NO_FLAG = 0
MESSAGE_EXPECTS_RESPONSE_FLAG = 1 << 0
MESSAGE_IS_RESPONSE_FLAG = 1 << 1
class MessagingException(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
self.__traceback__ = sys.exc_info()[2]
class MessageHeader(object):
"""The header of a mojo message."""
_SIMPLE_MESSAGE_VERSION = 0
_SIMPLE_MESSAGE_STRUCT = struct.Struct("<IIII")
_REQUEST_ID_STRUCT = struct.Struct("<Q")
_REQUEST_ID_OFFSET = _SIMPLE_MESSAGE_STRUCT.size
_MESSAGE_WITH_REQUEST_ID_VERSION = 1
_MESSAGE_WITH_REQUEST_ID_SIZE = (
_SIMPLE_MESSAGE_STRUCT.size + _REQUEST_ID_STRUCT.size)
def __init__(self, message_type, flags, request_id=0, data=None):
self._message_type = message_type
self._flags = flags
self._request_id = request_id
self._data = data
@classmethod
def Deserialize(cls, data):
buf = buffer(data)
if len(data) < cls._SIMPLE_MESSAGE_STRUCT.size:
raise serialization.DeserializationException('Header is too short.')
(size, version, message_type, flags) = (
cls._SIMPLE_MESSAGE_STRUCT.unpack_from(buf))
if (version < cls._SIMPLE_MESSAGE_VERSION):
raise serialization.DeserializationException('Incorrect version.')
request_id = 0
if _HasRequestId(flags):
if version < cls._MESSAGE_WITH_REQUEST_ID_VERSION:
raise serialization.DeserializationException('Incorrect version.')
if (size < cls._MESSAGE_WITH_REQUEST_ID_SIZE or
len(data) < cls._MESSAGE_WITH_REQUEST_ID_SIZE):
raise serialization.DeserializationException('Header is too short.')
(request_id, ) = cls._REQUEST_ID_STRUCT.unpack_from(
buf, cls._REQUEST_ID_OFFSET)
return MessageHeader(message_type, flags, request_id, data)
@property
def message_type(self):
return self._message_type
# pylint: disable=E0202
@property
def request_id(self):
assert self.has_request_id
return self._request_id
# pylint: disable=E0202
@request_id.setter
def request_id(self, request_id):
assert self.has_request_id
self._request_id = request_id
self._REQUEST_ID_STRUCT.pack_into(self._data, self._REQUEST_ID_OFFSET,
request_id)
@property
def has_request_id(self):
return _HasRequestId(self._flags)
@property
def expects_response(self):
return self._HasFlag(MESSAGE_EXPECTS_RESPONSE_FLAG)
@property
def is_response(self):
return self._HasFlag(MESSAGE_IS_RESPONSE_FLAG)
@property
def size(self):
if self.has_request_id:
return self._MESSAGE_WITH_REQUEST_ID_SIZE
return self._SIMPLE_MESSAGE_STRUCT.size
def Serialize(self):
if not self._data:
self._data = bytearray(self.size)
version = self._SIMPLE_MESSAGE_VERSION
size = self._SIMPLE_MESSAGE_STRUCT.size
if self.has_request_id:
version = self._MESSAGE_WITH_REQUEST_ID_VERSION
size = self._MESSAGE_WITH_REQUEST_ID_SIZE
self._SIMPLE_MESSAGE_STRUCT.pack_into(self._data, 0, size, version,
self._message_type, self._flags)
if self.has_request_id:
self._REQUEST_ID_STRUCT.pack_into(self._data, self._REQUEST_ID_OFFSET,
self._request_id)
return self._data
def _HasFlag(self, flag):
return self._flags & flag != 0
class Message(object):
"""A message for a message pipe. This contains data and handles."""
def __init__(self, data=None, handles=None, header=None):
self.data = data
self.handles = handles
self._header = header
self._payload = None
@property
def header(self):
if self._header is None:
self._header = MessageHeader.Deserialize(self.data)
return self._header
@property
def payload(self):
if self._payload is None:
self._payload = Message(self.data[self.header.size:], self.handles)
return self._payload
def SetRequestId(self, request_id):
header = self.header
header.request_id = request_id
(data, _) = header.Serialize()
self.data[:header.Size] = data[:header.Size]
class MessageReceiver(object):
"""A class which implements this interface can receive Message objects."""
def Accept(self, message):
"""
Receive a Message. The MessageReceiver is allowed to mutate the message.
Args:
message: the received message.
Returns:
True if the message has been handled, False otherwise.
"""
raise NotImplementedError()
class MessageReceiverWithResponder(MessageReceiver):
"""
A MessageReceiver that can also handle the response message generated from the
given message.
"""
def AcceptWithResponder(self, message, responder):
"""
A variant on Accept that registers a MessageReceiver (known as the
responder) to handle the response message generated from the given message.
The responder's Accept method may be called as part of the call to
AcceptWithResponder, or some time after its return.
Args:
message: the received message.
responder: the responder that will receive the response.
Returns:
True if the message has been handled, False otherwise.
"""
raise NotImplementedError()
class ConnectionErrorHandler(object):
"""
A ConnectionErrorHandler is notified of an error happening while using the
bindings over message pipes.
"""
def OnError(self, result):
raise NotImplementedError()
class Connector(MessageReceiver):
"""
A Connector owns a message pipe and will send any received messages to the
registered MessageReceiver. It also acts as a MessageReceiver and will send
any message through the handle.
The method Start must be called before the Connector will start listening to
incoming messages.
"""
def __init__(self, handle):
MessageReceiver.__init__(self)
self._handle = handle
self._cancellable = None
self._incoming_message_receiver = None
self._error_handler = None
def __del__(self):
if self._cancellable:
self._cancellable()
def SetIncomingMessageReceiver(self, message_receiver):
"""
Set the MessageReceiver that will receive message from the owned message
pipe.
"""
self._incoming_message_receiver = message_receiver
def SetErrorHandler(self, error_handler):
"""
Set the ConnectionErrorHandler that will be notified of errors on the owned
message pipe.
"""
self._error_handler = error_handler
def Start(self):
assert not self._cancellable
self._RegisterAsyncWaiterForRead()
def Accept(self, message):
result = self._handle.WriteMessage(message.data, message.handles)
return result == system.RESULT_OK
def Close(self):
if self._cancellable:
self._cancellable()
self._cancellable = None
self._handle.Close()
def PassMessagePipe(self):
if self._cancellable:
self._cancellable()
self._cancellable = None
result = self._handle
self._handle = system.Handle()
return result
def _OnAsyncWaiterResult(self, result):
self._cancellable = None
if result == system.RESULT_OK:
self._ReadOutstandingMessages()
else:
self._OnError(result)
def _OnError(self, result):
assert not self._cancellable
if self._error_handler:
self._error_handler.OnError(result)
self._handle.Close()
def _RegisterAsyncWaiterForRead(self) :
assert not self._cancellable
self._cancellable = self._handle.AsyncWait(
system.HANDLE_SIGNAL_READABLE,
system.DEADLINE_INDEFINITE,
_WeakCallback(self._OnAsyncWaiterResult))
def _ReadOutstandingMessages(self):
result = None
dispatched = True
while dispatched:
result, dispatched = _ReadAndDispatchMessage(
self._handle, self._incoming_message_receiver)
if result == system.RESULT_SHOULD_WAIT:
self._RegisterAsyncWaiterForRead()
return
self._OnError(result)
class Router(MessageReceiverWithResponder):
"""
A Router will handle mojo message and forward those to a Connector. It deals
with parsing of headers and adding of request ids in order to be able to match
a response to a request.
"""
def __init__(self, handle):
MessageReceiverWithResponder.__init__(self)
self._incoming_message_receiver = None
self._next_request_id = 1
self._responders = {}
self._connector = Connector(handle)
self._connector.SetIncomingMessageReceiver(
ForwardingMessageReceiver(_WeakCallback(self._HandleIncomingMessage)))
def Start(self):
self._connector.Start()
def SetIncomingMessageReceiver(self, message_receiver):
"""
Set the MessageReceiver that will receive message from the owned message
pipe.
"""
self._incoming_message_receiver = message_receiver
def SetErrorHandler(self, error_handler):
"""
Set the ConnectionErrorHandler that will be notified of errors on the owned
message pipe.
"""
self._connector.SetErrorHandler(error_handler)
def Accept(self, message):
# A message without responder is directly forwarded to the connector.
return self._connector.Accept(message)
def AcceptWithResponder(self, message, responder):
# The message must have a header.
header = message.header
assert header.expects_response
request_id = self._NextRequestId()
header.request_id = request_id
if not self._connector.Accept(message):
return False
self._responders[request_id] = responder
return True
def Close(self):
self._connector.Close()
def PassMessagePipe(self):
return self._connector.PassMessagePipe()
def _HandleIncomingMessage(self, message):
header = message.header
if header.expects_response:
if self._incoming_message_receiver:
return self._incoming_message_receiver.AcceptWithResponder(
message, self)
# If we receive a request expecting a response when the client is not
# listening, then we have no choice but to tear down the pipe.
self.Close()
return False
if header.is_response:
request_id = header.request_id
responder = self._responders.pop(request_id, None)
if responder is None:
return False
return responder.Accept(message)
if self._incoming_message_receiver:
return self._incoming_message_receiver.Accept(message)
# Ok to drop the message
return False
def _NextRequestId(self):
request_id = self._next_request_id
while request_id == 0 or request_id in self._responders:
request_id = (request_id + 1) % (1 << 64)
self._next_request_id = (request_id + 1) % (1 << 64)
return request_id
class ForwardingMessageReceiver(MessageReceiver):
"""A MessageReceiver that forward calls to |Accept| to a callable."""
def __init__(self, callback):
MessageReceiver.__init__(self)
self._callback = callback
def Accept(self, message):
return self._callback(message)
def _WeakCallback(callback):
func = callback.im_func
self = callback.im_self
if not self:
return callback
weak_self = weakref.ref(self)
def Callback(*args, **kwargs):
self = weak_self()
if self:
return func(self, *args, **kwargs)
return Callback
def _ReadAndDispatchMessage(handle, message_receiver):
dispatched = False
(result, _, sizes) = handle.ReadMessage()
if result == system.RESULT_OK and message_receiver:
dispatched = message_receiver.Accept(Message(bytearray(), []))
if result != system.RESULT_RESOURCE_EXHAUSTED:
return (result, dispatched)
(result, data, _) = handle.ReadMessage(bytearray(sizes[0]), sizes[1])
if result == system.RESULT_OK and message_receiver:
dispatched = message_receiver.Accept(Message(data[0], data[1]))
return (result, dispatched)
def _HasRequestId(flags):
return flags & (MESSAGE_EXPECTS_RESPONSE_FLAG|MESSAGE_IS_RESPONSE_FLAG) != 0
| 30.053659
| 80
| 0.718065
|
8dc48238e9aac2702fd911fafa3ecca3e2a8e037
| 8,195
|
py
|
Python
|
solum/tests/api/controllers/v1/test_component.py
|
devdattakulkarni/test-solum
|
4e9ddb82d217116aa2c30a6f2581080cbdfae325
|
[
"Apache-2.0"
] | null | null | null |
solum/tests/api/controllers/v1/test_component.py
|
devdattakulkarni/test-solum
|
4e9ddb82d217116aa2c30a6f2581080cbdfae325
|
[
"Apache-2.0"
] | null | null | null |
solum/tests/api/controllers/v1/test_component.py
|
devdattakulkarni/test-solum
|
4e9ddb82d217116aa2c30a6f2581080cbdfae325
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
from solum.api.controllers.v1 import component
from solum.api.controllers.v1.datamodel import component as componentmodel
from solum.common import exception
from solum import objects
from solum.tests import base
from solum.tests import fakes
@mock.patch('pecan.request', new_callable=fakes.FakePecanRequest)
@mock.patch('pecan.response', new_callable=fakes.FakePecanResponse)
@mock.patch('solum.api.handlers.component_handler.ComponentHandler')
class TestComponentController(base.BaseTestCase):
def setUp(self):
super(TestComponentController, self).setUp()
objects.load()
def test_component_get(self, ComponentHandler, resp_mock, request_mock):
hand_get = ComponentHandler.return_value.get
fake_component = fakes.FakeComponent()
hand_get.return_value = fake_component
obj = component.ComponentController('test_id')
resp = obj.get()
self.assertIsNotNone(resp)
self.assertEqual(fake_component.name, resp['result'].name)
self.assertEqual(fake_component.description,
resp['result'].description)
hand_get.assert_called_with('test_id')
self.assertEqual(200, resp_mock.status)
def test_component_get_not_found(self, ComponentHandler,
resp_mock, request_mock):
hand_get = ComponentHandler.return_value.get
hand_get.side_effect = exception.ResourceNotFound(
name='component', component_id='test_id')
cont = component.ComponentController('test_id')
cont.get()
hand_get.assert_called_with('test_id')
self.assertEqual(404, resp_mock.status)
def test_component_put_none(self, ComponentHandler,
resp_mock, request_mock):
request_mock.body = None
request_mock.content_type = 'application/json'
hand_put = ComponentHandler.return_value.put
hand_put.return_value = fakes.FakeComponent()
component.ComponentController('test_id').put()
self.assertEqual(400, resp_mock.status)
def test_component_put_not_found(self, ComponentHandler,
resp_mock, request_mock):
json_update = {'user_id': 'foo', 'name': 'appy'}
request_mock.body = json.dumps(json_update)
request_mock.content_type = 'application/json'
hand_update = ComponentHandler.return_value.update
hand_update.side_effect = exception.ResourceNotFound(
name='component', component_id='test_id')
component.ComponentController('test_id').put()
hand_update.assert_called_with('test_id', json_update)
self.assertEqual(404, resp_mock.status)
def test_component_put_ok(self, ComponentHandler, resp_mock, request_mock):
json_update = {'name': 'update_foo',
'description': 'update_desc_component',
'user_id': 'user_id_test',
'project_id': 'project_id_test'}
request_mock.body = json.dumps(json_update)
request_mock.content_type = 'application/json'
hand_update = ComponentHandler.return_value.update
hand_update.return_value = fakes.FakeComponent()
component.ComponentController('test_id').put()
hand_update.assert_called_with('test_id', json_update)
self.assertEqual(200, resp_mock.status)
def test_component_delete_not_found(self, ComponentHandler,
resp_mock, request_mock):
hand_delete = ComponentHandler.return_value.delete
hand_delete.side_effect = exception.ResourceNotFound(
name='component', component_id='test_id')
obj = component.ComponentController('test_id')
obj.delete()
hand_delete.assert_called_with('test_id')
self.assertEqual(404, resp_mock.status)
def test_component_delete_ok(self, ComponentHandler,
resp_mock, request_mock):
hand_delete = ComponentHandler.return_value.delete
hand_delete.return_value = None
obj = component.ComponentController('test_id')
obj.delete()
hand_delete.assert_called_with('test_id')
self.assertEqual(204, resp_mock.status)
@mock.patch('pecan.request', new_callable=fakes.FakePecanRequest)
@mock.patch('pecan.response', new_callable=fakes.FakePecanResponse)
@mock.patch('solum.api.handlers.component_handler.ComponentHandler')
class TestComponentsController(base.BaseTestCase):
def setUp(self):
super(TestComponentsController, self).setUp()
objects.load()
def test_components_get_all(self, handler_mock, resp_mock, request_mock):
hand_get_all = handler_mock.return_value.get_all
fake_component = fakes.FakeComponent()
hand_get_all.return_value = [fake_component]
obj = component.ComponentsController()
resp = obj.get_all()
hand_get_all.assert_called_with()
self.assertIsNotNone(resp)
self.assertEqual(fake_component.name, resp['result'][0].name)
self.assertEqual(fake_component.description,
resp['result'][0].description)
self.assertEqual(200, resp_mock.status)
def test_components_post(self, handler_mock, resp_mock, request_mock):
json_create = {'name': 'foo',
'description': 'test_desc_component',
'user_id': 'user_id_test',
'project_id': 'project_id_test'}
request_mock.body = json.dumps(json_create)
request_mock.content_type = 'application/json'
handler_create = handler_mock.return_value.create
handler_create.return_value = fakes.FakeComponent()
component.ComponentsController().post()
handler_create.assert_called_with(json_create)
self.assertEqual(201, resp_mock.status)
handler_create.assert_called_once_with(json_create)
def test_components_post_nodata(self, handler_mock,
resp_mock, request_mock):
request_mock.body = ''
request_mock.content_type = 'application/json'
handler_create = handler_mock.return_value.create
handler_create.return_value = fakes.FakeComponent()
ret_val = component.ComponentsController().post()
self.assertEqual("Missing argument: \"data\"",
str(ret_val['faultstring']))
self.assertEqual(400, resp_mock.status)
class TestComponentAsDict(base.BaseTestCase):
scenarios = [
('none', dict(data=None)),
('one', dict(data={'name': 'foo'})),
('full', dict(data={'uri': 'http://example.com/v1/components/x1',
'name': 'Example-component',
'type': 'component',
'component_type': 'heat_stack',
'tags': ['small'],
'project_id': '1dae5a09ef2b4d8cbf3594b0eb4f6b94',
'user_id': '55f41cf46df74320b9486a35f5d28a11',
'description': 'A component'}))
]
def test_as_dict(self):
objects.load()
if self.data is None:
s = componentmodel.Component()
self.data = {}
else:
s = componentmodel.Component(**self.data)
if 'uri' in self.data:
del self.data['uri']
if 'type' in self.data:
del self.data['type']
self.assertEqual(self.data,
s.as_dict(objects.registry.Component))
| 43.823529
| 79
| 0.656498
|
1db6a5a40a64d6fde1da962e1074aefaec9f11ba
| 3,385
|
py
|
Python
|
zksync_sdk/zksync_provider/transaction.py
|
zksync-sdk/zksync-python
|
740020b6c6b83548cf6cd2ec1b4af94316a74667
|
[
"MIT"
] | 22
|
2021-03-05T07:01:05.000Z
|
2022-03-26T19:15:19.000Z
|
zksync_sdk/zksync_provider/transaction.py
|
zksync-sdk/zksync-python
|
740020b6c6b83548cf6cd2ec1b4af94316a74667
|
[
"MIT"
] | 23
|
2021-03-01T06:09:26.000Z
|
2022-02-17T21:54:44.000Z
|
zksync_sdk/zksync_provider/transaction.py
|
zksync-sdk/zksync-python
|
740020b6c6b83548cf6cd2ec1b4af94316a74667
|
[
"MIT"
] | 10
|
2021-03-08T13:43:49.000Z
|
2021-08-23T16:18:14.000Z
|
import asyncio
from dataclasses import dataclass
from enum import Enum, auto
from typing import Optional
class TransactionStatus(Enum):
FAILED = auto()
COMMITTED = auto()
VERIFIED = auto()
@dataclass
class TransactionResult:
status: TransactionStatus
fail_reason: str
class Transaction:
@classmethod
def build_transaction(cls, provider, transaction_id: str):
transaction = cls(provider, transaction_id)
return transaction
def __init__(self, provider, transaction_hash: str):
self.provider = provider
self.transaction_hash = transaction_hash
async def await_committed(self, attempts: Optional[int] = None, attempts_timeout: Optional[int] = None) \
-> TransactionResult:
status = TransactionResult(TransactionStatus.FAILED,
f"Transaction has not been executed with amount of attempts {attempts}"
f"and timeout {attempts_timeout}")
while True:
if attempts is not None:
if attempts <= 0:
return status
transaction_details = await self.provider.get_tx_receipt(self.transaction_hash)
if attempts is not None:
attempts -= 1
if "failReason" in transaction_details and transaction_details["failReason"] is not None:
return TransactionResult(TransactionStatus.FAILED, transaction_details['failReason'])
if "block" in transaction_details:
block = transaction_details["block"]
if block is not None and "committed" in block and block["committed"]:
return TransactionResult(TransactionStatus.COMMITTED, "")
if attempts_timeout is not None:
await asyncio.sleep(attempts_timeout / 1000)
async def await_verified(self, attempts: Optional[int] = None, attempts_timeout: Optional[int] = None):
intermediate_status = TransactionResult(
TransactionStatus.FAILED,
f"Transaction has not been executed with amount of attempts {attempts}"
f"and timeout {attempts_timeout}")
while True:
if attempts is not None:
if attempts <= 0:
return intermediate_status
transaction_details = await self.provider.get_tx_receipt(self.transaction_hash)
if attempts is not None:
attempts -= 1
if "failReason" in transaction_details and transaction_details["failReason"] is not None:
return TransactionResult(TransactionStatus.FAILED, transaction_details['failReason'])
if "block" in transaction_details:
block = transaction_details["block"]
if block is not None and "committed" in block and block["committed"]:
intermediate_status = TransactionResult(TransactionStatus.COMMITTED, "")
if "block" in transaction_details:
block = transaction_details["block"]
if block is not None and \
"verified" in block and \
block["verified"]:
return TransactionResult(TransactionStatus.VERIFIED, "")
if attempts_timeout is not None:
await asyncio.sleep(attempts_timeout / 1000)
| 41.280488
| 109
| 0.625702
|
154a28603b83ee5bcf04b9a808ee3e5536a8c835
| 750
|
py
|
Python
|
tellapart/aurproxy/config/__init__.py
|
thinker0/aurproxy
|
7387bb3ac7decd9d0034f9ca6b4dfea4384ce59d
|
[
"Apache-2.0"
] | 73
|
2015-04-30T23:41:56.000Z
|
2021-04-16T07:11:47.000Z
|
tellapart/aurproxy/config/__init__.py
|
aurora-scheduler/aurproxy
|
73a1e7086cc4dd171456f50724246a9261febaf8
|
[
"Apache-2.0"
] | 42
|
2015-05-21T00:02:42.000Z
|
2018-01-20T20:20:13.000Z
|
tellapart/aurproxy/config/__init__.py
|
amperity/aurproxy
|
985ab0d32211a5ff1e4f47ceb524b13287e90ffe
|
[
"Apache-2.0"
] | 18
|
2015-05-01T12:48:20.000Z
|
2019-09-06T10:04:38.000Z
|
# Copyright 2015 TellApart, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .endpoint import (
ProxyEndpoint,
ShareEndpoint,
SourceEndpoint)
from .route import ProxyRoute
from .server import ProxyServer
from .stream import ProxyStream
| 34.090909
| 74
| 0.770667
|
0710976281c1832a4634717044455bb8987c195c
| 11,420
|
py
|
Python
|
urban_env/envs/multilane_env.py
|
pinakigupta/BehaviorRL
|
ab67d155197c46f3e8551af9763e490b781e8eeb
|
[
"MIT"
] | 6
|
2020-03-09T01:06:37.000Z
|
2020-09-16T04:51:24.000Z
|
urban_env/envs/multilane_env.py
|
pinakigupta/BehaviorRL
|
ab67d155197c46f3e8551af9763e490b781e8eeb
|
[
"MIT"
] | null | null | null |
urban_env/envs/multilane_env.py
|
pinakigupta/BehaviorRL
|
ab67d155197c46f3e8551af9763e490b781e8eeb
|
[
"MIT"
] | null | null | null |
######################################################################
# Deep Reinforcement Learning for Autonomous Driving
# Created/Modified on: February 5, 2019
# Author: Pinaki Gupta
#######################################################################
from __future__ import division, print_function, absolute_import
import numpy as np
from gym import logger
from urban_env import utils
from urban_env.envs.abstract import AbstractEnv
from urban_env.road.road import Road, RoadNetwork
from urban_env.vehicle.control import MDPVehicle
from urban_env.envs.graphics import EnvViewer
from handle_model_files import is_predict_only
from urban_env.vehicle.dynamics import Vehicle, Obstacle
class MultilaneEnv(AbstractEnv):
"""
A urban driving environment.
The vehicle is driving on a straight urban with several lanes, and is rewarded for reaching a high velocity,
staying on the rightmost lanes and avoiding collisions.
"""
RIGHT_LANE_REWARD = 0.1
""" The reward received when driving on the right-most lanes, linearly mapped to zero for other lanes."""
LANE_CHANGE_REWARD = -1
AGGRESSIVE_LANE_CHANGE_REWARD = -3
ROAD_LENGTH = 500
ROAD_SPEED = 35
DEFAULT_CONFIG = {**AbstractEnv.DEFAULT_CONFIG,
**{
"observation": {
"type": "Kinematics",
"features": ['x', 'y', 'vx', 'vy', 'psi'],
"relative_features": ['x'],
"vehicles_count": 6
},
"other_vehicles_type": "urban_env.vehicle.control.IDMDPVehicle",
"duration": 250,
"_predict_only": is_predict_only(),
"screen_width": 1600,
"screen_height": 400,
"DIFFICULTY_LEVELS": 2,
"COLLISION_REWARD": -200,
"INVALID_ACTION_REWARD": 0,
"VELOCITY_REWARD": 5,
"GOAL_REWARD": 2000,
"OBS_STACK_SIZE": 1,
"GOAL_LENGTH": 1000,
"initial_spacing": 2,
"centering_position": [0.3, 0.5],
}
}
DIFFICULTY_LEVELS = {
"EASY": {
"lanes_count": 2,
"vehicles_count": 5,
"duration": 20
},
"MEDIUM": {
"lanes_count": 3,
"vehicles_count": 10,
"duration": 30
},
"HARD": {
"lanes_count": 4,
"vehicles_count": 50,
"duration": 40
},
}
def __init__(self, config=DEFAULT_CONFIG):
super(MultilaneEnv, self).__init__(config)
self.config.update(self.DIFFICULTY_LEVELS["MEDIUM"])
if self.config["_predict_only"]:
self.ROAD_LENGTH = 1000
self.steps = 0
self.ego_x0 = None
EnvViewer.SCREEN_HEIGHT = self.config['screen_height']
EnvViewer.SCREEN_WIDTH = self.config['screen_width']
self.reset()
def _on_route(self, veh=None):
return True
def _on_road(self, veh=None):
if veh is None:
veh = self.vehicle
return (veh.position[0] < self.ROAD_LENGTH) and (veh.position[0] > 0)
def _goal_achieved(self, veh=None):
if veh is None:
veh = self.vehicle
return (veh.position[0] > 0.99 * self.ROAD_LENGTH) and \
self._on_route(veh)
def reset(self):
self.steps = 0
self._create_road()
self._create_vehicles()
return super(MultilaneEnv, self).reset()
def step(self, action):
self.steps += 1
self.previous_action = action
obs, rew, done, info = super(MultilaneEnv, self).step(action)
self.episode_travel = self.vehicle.position[0] - self.ego_x0
return (obs, rew, done, info)
def _create_road(self):
"""
Create a road composed of straight adjacent lanes.
"""
self.road = Road(network=RoadNetwork.straight_road_network(lanes=self.config["lanes_count"], length=self.ROAD_LENGTH),
np_random=self.np_random)
def _create_vehicles(self):
"""
Create some new random vehicles of a given type, and add them on the road.
"""
self.vehicle = MDPVehicle.create_random(road=self.road,
velocity=np.random.randint(low=15,high=35),
spacing=self.config["initial_spacing"],
config=self.config)
self.vehicle.is_ego_vehicle = True
self.road.vehicles.append(self.vehicle)
self.ego_x0 = self.vehicle.position[0]
vehicles_type = utils.class_from_path(self.config["other_vehicles_type"])
ahead_vehicles = self.config["vehicles_count"] // 2
behind_vehicles = self.config["vehicles_count"] - ahead_vehicles
for _ in range(ahead_vehicles):
self.road.vehicles.append(vehicles_type.create_random(road=self.road,
ahead=True,
config=self.config)
)
for _ in range(behind_vehicles):
self.road.vehicles.append(vehicles_type.create_random(road=self.road,
ahead=False,
config=self.config)
)
for _ in range(10):
self.road.vehicles.append(Obstacle.create_random(road=self.road,
ahead=False,
config=self.config
)
)
# Add the virtual obstacles
lane = self.road.network.lanes_list()[0]
x0 = lane.length/2
position = lane.position(x0, -3.5)
lane_index = self.road.network.get_closest_lane_index(
position=position,
heading=0
)
virtual_obstacle_left = Obstacle(self.road,
position=position,
heading=lane.heading_at(x0),
velocity=0,
target_velocity=0,
lane_index=lane_index,
target_lane_index=lane_index,
enable_lane_change=False,
config=self.config)
virtual_obstacle_left.LENGTH = lane.length
virtual_obstacle_left.virtual = True
self.road.vehicles.append(virtual_obstacle_left)
self.road.virtual_vehicles.append(virtual_obstacle_left)
lane = self.road.network.lanes_list()[-1]
x0 = lane.length/2
position = lane.position(x0, 3.5)
virtual_obstacle_right = Obstacle(self.road,
position=position,
heading=lane.heading_at(x0),
velocity=0,
target_velocity=0,
lane_index=lane_index,
target_lane_index=lane_index,
enable_lane_change=False,
config=self.config)
virtual_obstacle_right.LENGTH = lane.length
virtual_obstacle_right.virtual = True
self.road.vehicles.append(virtual_obstacle_right)
self.road.virtual_vehicles.append(virtual_obstacle_right)
def _reward(self, action):
"""
The reward is defined to foster driving at high speed, on the rightmost lanes, and to avoid collisions.
:param action: the last action performed
:return: the corresponding reward
"""
action_lookup = dict(map(reversed, AbstractEnv.ACTIONS.items()))
action_reward = {action_lookup['LANE_LEFT']: self.LANE_CHANGE_REWARD,
action_lookup['IDLE']: 0,
action_lookup['LANE_RIGHT']: self.LANE_CHANGE_REWARD,
action_lookup['FASTER']: 0,
action_lookup['SLOWER']: 0,
action_lookup['LANE_LEFT_AGGRESSIVE']: self.AGGRESSIVE_LANE_CHANGE_REWARD,
action_lookup['LANE_RIGHT_AGGRESSIVE']: self.AGGRESSIVE_LANE_CHANGE_REWARD
}
neighbours = self.road.network.all_side_lanes(self.vehicle.lane_index)
collision_reward = self.config["COLLISION_REWARD"] * self.vehicle.crashed
velocity_reward = self.config["VELOCITY_REWARD"] * (self.vehicle.velocity_index -1) / (self.vehicle.SPEED_COUNT - 1)
if (velocity_reward > 0):
velocity_reward *= self._on_route()
goal_reward = self.config["GOAL_REWARD"]
if self.vehicle.crashed:
reward = collision_reward + min(0.0, velocity_reward + action_reward[action])
elif self._goal_achieved():
reward = goal_reward + velocity_reward + action_reward[action]
else:
reward = velocity_reward + action_reward[action]
return reward
def _is_terminal(self):
"""
The episode is over if the ego vehicle crashed or the time is out.
"""
return self.vehicle.crashed or \
self._goal_achieved() or \
(not self._on_road()) or \
self.steps >= self.config["duration"]
def _constraint(self, action):
"""
The constraint signal is the occurrence of collision
"""
return float(self.vehicle.crashed)
def print_obs_space(self):
print("obs space, step ", self.steps)
#sys.stdout.flush()
pp = pprint.PrettyPrinter(indent=4)
numoffeatures = len(self.config["observation"]["features"])
numfofobs = len(self.obs)
numofvehicles = numfofobs//numoffeatures
close_vehicle_ids = [int(self.vehicle.Id())]
modified_obs = self.obs
for v in self.close_vehicles:
close_vehicle_ids.append(int(v.Id()))
close_vehicle_ids.extend([-1]*(numofvehicles-len(close_vehicle_ids)))
Idx = 0
obs_Idx = 0
while True:
temp = modified_obs
del(modified_obs)
modified_obs = np.insert(temp, obs_Idx, close_vehicle_ids[Idx])
del(temp)
Idx += 1
obs_Idx += numoffeatures+1
if Idx >= len(close_vehicle_ids):
break
np.set_printoptions(precision=3, suppress=True)
obs_format = pp.pformat(np.round(np.reshape(modified_obs, (numofvehicles, numoffeatures+1 )), 3))
obs_format = obs_format.rstrip("\n")
print(obs_format)
| 40.640569
| 126
| 0.521804
|
4a1e321d24f9266ba328c1020e565d9f0c014761
| 5,446
|
py
|
Python
|
model.py
|
Adversarial-dropout-rnn/adversarial_dropout_lm
|
e08ff9aa51765fff6cfac4c2576e58bee3fcd173
|
[
"BSD-3-Clause"
] | null | null | null |
model.py
|
Adversarial-dropout-rnn/adversarial_dropout_lm
|
e08ff9aa51765fff6cfac4c2576e58bee3fcd173
|
[
"BSD-3-Clause"
] | null | null | null |
model.py
|
Adversarial-dropout-rnn/adversarial_dropout_lm
|
e08ff9aa51765fff6cfac4c2576e58bee3fcd173
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
import torch.nn as nn
from torch.autograd import Variable
from embed_regularize import embedded_dropout_mask
from locked_dropout import LockedDropoutMask
from weight_drop import WeightDropMask
from pytorch_LSTM import LSTM, LSTMCell, BNLSTMCell
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, dropouth=0.5, dropouti=0.5, dropoute=0.1, wdrop=0, tie_weights=False):
super(RNNModel, self).__init__()
self.lockdrop = LockedDropoutMask()
self.idrop = nn.Dropout(dropouti)
self.hdrop = nn.Dropout(dropouth)
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
assert rnn_type in ['LSTM', 'QRNN'], 'RNN type is not supported'
if rnn_type == 'LSTM':
#self.rnns = [torch.nn.LSTM(ninp if l == 0 else nhid, nhid if l != nlayers - 1 else ninp, 1, dropout=0) if l != nlayers-1
# else LSTM(LSTMCell, ninp if l == 0 else nhid, nhid if l != nlayers - 1 else ninp, num_layers=1, dropout=0) for l in range(nlayers)]
self.rnns = [torch.nn.LSTM(ninp if l == 0 else nhid, nhid if l != nlayers - 1 else ninp, 1, dropout=0) for l in range(nlayers)]
if wdrop:
self.rnns = [WeightDropMask(self.rnns[l], ['weight_hh_l0'], dropout=wdrop) for l in range(nlayers)]
elif rnn_type == 'QRNN':
from torchqrnn import QRNNLayer
self.rnns = [QRNNLayer(input_size=ninp if l == 0 else nhid, hidden_size=nhid if l != nlayers - 1 else ninp, save_prev_x=True, zoneout=0, window=2 if l == 0 else 1, output_gate=True) for l in range(nlayers)]
for rnn in self.rnns:
rnn.linear = WeightDropMask(rnn.linear, ['weight'], dropout=wdrop)
print(self.rnns)
self.rnns = torch.nn.ModuleList(self.rnns)
self.decoder = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
#if nhid != ninp:
# raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.ninp = ninp
self.nhid = nhid
self.nlayers = nlayers
self.dropout = dropout
self.dropouti = dropouti
self.dropouth = dropouth
self.dropoute = dropoute
def reset(self):
if self.rnn_type == 'QRNN': [r.reset() for r in self.rnns]
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden, return_h=False, maske=None, maski=None, maskh=None, maskw=None, masko=None):
emb = embedded_dropout_mask(self.encoder, input, maske, dropout=self.dropoute, training=self.training)
emb = self.lockdrop(emb, maski, self.dropouti)
raw_output = emb
new_hidden = []
raw_outputs = []
outputs = []
if maskh is None: maskh = [None for l in range(0, self.nlayers - 1)]
if maskw is None: maskw = [None for l in range(0, self.nlayers)]
for l, rnn in enumerate(self.rnns):
current_input = raw_output
#print(l)
#print(rnn)
raw_output, new_h = rnn(maskw[l], raw_output, hidden[l])
new_hidden.append(new_h)
raw_outputs.append(raw_output)
if l != self.nlayers - 1:
#self.hdrop(raw_output)
raw_output = self.lockdrop(raw_output, maskh[l], self.dropouth)
outputs.append(raw_output)
hidden = new_hidden
output = self.lockdrop(raw_output, masko, self.dropout)
outputs.append(output)
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
result = decoded.view(output.size(0), output.size(1), decoded.size(1))
if return_h:
return result, hidden, raw_outputs, outputs
return result, hidden
def zero_grads(self):
"""Sets gradients of all model parameters to zero."""
for p in self.parameters():
if p.grad is not None:
p.grad.data.zero_()
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return [(Variable(weight.new(1, bsz, self.nhid if l != self.nlayers - 1 else self.ninp).zero_()),
Variable(weight.new(1, bsz, self.nhid if l != self.nlayers - 1 else self.ninp).zero_()))
for l in range(self.nlayers)]
elif self.rnn_type == 'QRNN':
return [Variable(weight.new(1, bsz, self.nhid if l != self.nlayers - 1 else self.ninp).zero_())
for l in range(self.nlayers)]
| 46.547009
| 219
| 0.598972
|
3763b249834ab0c5ca6a25a5045ffb48844645d4
| 30,579
|
py
|
Python
|
sympy/integrals/tests/test_transforms.py
|
smichr/sympy
|
eda86926d98ab6cb7ec73e3cb8ea78ac15bddea3
|
[
"BSD-3-Clause"
] | 7
|
2015-01-14T06:55:33.000Z
|
2018-08-11T14:43:52.000Z
|
sympy/integrals/tests/test_transforms.py
|
smichr/sympy
|
eda86926d98ab6cb7ec73e3cb8ea78ac15bddea3
|
[
"BSD-3-Clause"
] | 1
|
2018-02-19T04:56:04.000Z
|
2018-02-19T04:56:04.000Z
|
sympy/integrals/tests/test_transforms.py
|
smichr/sympy
|
eda86926d98ab6cb7ec73e3cb8ea78ac15bddea3
|
[
"BSD-3-Clause"
] | 1
|
2016-04-24T14:39:22.000Z
|
2016-04-24T14:39:22.000Z
|
from sympy.integrals.transforms import (mellin_transform,
inverse_mellin_transform, laplace_transform, inverse_laplace_transform,
fourier_transform, inverse_fourier_transform,
sine_transform, inverse_sine_transform,
cosine_transform, inverse_cosine_transform,
hankel_transform, inverse_hankel_transform,
LaplaceTransform, FourierTransform, SineTransform, CosineTransform,
InverseLaplaceTransform, InverseFourierTransform, InverseSineTransform, InverseCosineTransform,
HankelTransform, InverseHankelTransform)
from sympy import (
gamma, exp, oo, Heaviside, symbols, Symbol, re, factorial, pi,
cos, S, And, sin, sqrt, I, log, tan, hyperexpand, meijerg,
EulerGamma, erf, besselj, bessely, besseli, besselk,
exp_polar, polar_lift, unpolarify, Function, expint, expand_mul)
from sympy.utilities.pytest import XFAIL, slow, skip
from sympy.abc import x, s, a, b, c, d
nu, beta, rho = symbols('nu beta rho')
def test_undefined_function():
from sympy import Function, MellinTransform
f = Function('f')
assert mellin_transform(f(x), x, s) == MellinTransform(f(x), x, s)
assert mellin_transform(f(x) + exp(-x), x, s) == \
(MellinTransform(f(x), x, s) + gamma(s), (0, oo), True)
assert laplace_transform(2*f(x), x, s) == 2*LaplaceTransform(f(x), x, s)
# TODO test derivative and other rules when implemented
def test_free_symbols():
from sympy import Function
f = Function('f')
assert mellin_transform(f(x), x, s).free_symbols == set([s])
assert mellin_transform(f(x)*a, x, s).free_symbols == set([s, a])
def test_as_integral():
from sympy import Function, Integral
f = Function('f')
assert mellin_transform(f(x), x, s).rewrite('Integral') == \
Integral(x**(s - 1)*f(x), (x, 0, oo))
assert fourier_transform(f(x), x, s).rewrite('Integral') == \
Integral(f(x)*exp(-2*I*pi*s*x), (x, -oo, oo))
assert laplace_transform(f(x), x, s).rewrite('Integral') == \
Integral(f(x)*exp(-s*x), (x, 0, oo))
assert str(inverse_mellin_transform(f(s), s, x, (a, b)).rewrite('Integral')) \
== "Integral(x**(-s)*f(s), (s, _c - oo*I, _c + oo*I))"
assert str(inverse_laplace_transform(f(s), s, x).rewrite('Integral')) == \
"Integral(f(s)*exp(s*x), (s, _c - oo*I, _c + oo*I))"
assert inverse_fourier_transform(f(s), s, x).rewrite('Integral') == \
Integral(f(s)*exp(2*I*pi*s*x), (s, -oo, oo))
# NOTE this is stuck in risch because meijerint cannot handle it
@slow
@XFAIL
def test_mellin_transform_fail():
skip("Risch takes forever.")
from sympy import Max, Min
MT = mellin_transform
bpos = symbols('b', positive=True)
bneg = symbols('b', negative=True)
expr = (sqrt(x + b**2) + b)**a/sqrt(x + b**2)
# TODO does not work with bneg, argument wrong. Needs changes to matching.
assert MT(expr.subs(b, -bpos), x, s) == \
((-1)**(a + 1)*2**(a + 2*s)*bpos**(a + 2*s - 1)*gamma(a + s)
*gamma(1 - a - 2*s)/gamma(1 - s),
(-re(a), -re(a)/2 + S(1)/2), True)
expr = (sqrt(x + b**2) + b)**a
assert MT(expr.subs(b, -bpos), x, s) == \
(
2**(a + 2*s)*a*bpos**(a + 2*s)*gamma(-a - 2*
s)*gamma(a + s)/gamma(-s + 1),
(-re(a), -re(a)/2), True)
# Test exponent 1:
assert MT(expr.subs({b: -bpos, a: 1}), x, s) == \
(-bpos**(2*s + 1)*gamma(s)*gamma(-s - S(1)/2)/(2*sqrt(pi)),
(-1, -S(1)/2), True)
def test_mellin_transform():
from sympy import Max, Min, Ne
MT = mellin_transform
bpos = symbols('b', positive=True)
# 8.4.2
assert MT(x**nu*Heaviside(x - 1), x, s) == \
(1/(-nu - s), (-oo, -re(nu)), True)
assert MT(x**nu*Heaviside(1 - x), x, s) == \
(1/(nu + s), (-re(nu), oo), True)
assert MT((1 - x)**(beta - 1)*Heaviside(1 - x), x, s) == \
(gamma(beta)*gamma(s)/gamma(beta + s), (0, oo), re(-beta) < 0)
assert MT((x - 1)**(beta - 1)*Heaviside(x - 1), x, s) == \
(gamma(beta)*gamma(1 - beta - s)/gamma(1 - s),
(-oo, -re(beta) + 1), re(-beta) < 0)
assert MT((1 + x)**(-rho), x, s) == \
(gamma(s)*gamma(rho - s)/gamma(rho), (0, re(rho)), True)
# TODO also the conditions should be simplified
assert MT(abs(1 - x)**(-rho), x, s) == (
cos(pi*(rho/2 - s))*gamma(s)*gamma(rho - s)/(cos(pi*rho/2)*gamma(rho)),
(0, re(rho)), And(re(rho) - 1 < 0, re(rho) < 1))
mt = MT((1 - x)**(beta - 1)*Heaviside(1 - x)
+ a*(x - 1)**(beta - 1)*Heaviside(x - 1), x, s)
assert mt[1], mt[2] == ((0, -re(beta) + 1), True)
assert MT((x**a - b**a)/(x - b), x, s)[0] == \
pi*b**(a + s - 1)*sin(pi*a)/(sin(pi*s)*sin(pi*(a + s)))
assert MT((x**a - bpos**a)/(x - bpos), x, s) == \
(pi*bpos**(a + s - 1)*sin(pi*a)/(sin(pi*s)*sin(pi*(a + s))),
(Max(-re(a), 0), Min(1 - re(a), 1)), True)
expr = (sqrt(x + b**2) + b)**a
assert MT(expr.subs(b, bpos), x, s) == \
(-a*(2*bpos)**(a + 2*s)*gamma(s)*gamma(-a - 2*s)/gamma(-a - s + 1),
(0, -re(a)/2), True)
expr = (sqrt(x + b**2) + b)**a/sqrt(x + b**2)
assert MT(expr.subs(b, bpos), x, s) == \
(2**(a + 2*s)*bpos**(a + 2*s - 1)*gamma(s)
*gamma(1 - a - 2*s)/gamma(1 - a - s),
(0, -re(a)/2 + S(1)/2), True)
# 8.4.2
assert MT(exp(-x), x, s) == (gamma(s), (0, oo), True)
assert MT(exp(-1/x), x, s) == (gamma(-s), (-oo, 0), True)
# 8.4.5
assert MT(log(x)**4*Heaviside(1 - x), x, s) == (24/s**5, (0, oo), True)
assert MT(log(x)**3*Heaviside(x - 1), x, s) == (6/s**4, (-oo, 0), True)
assert MT(log(x + 1), x, s) == (pi/(s*sin(pi*s)), (-1, 0), True)
assert MT(log(1/x + 1), x, s) == (pi/(s*sin(pi*s)), (0, 1), True)
assert MT(log(abs(1 - x)), x, s) == (pi/(s*tan(pi*s)), (-1, 0), True)
assert MT(log(abs(1 - 1/x)), x, s) == (pi/(s*tan(pi*s)), (0, 1), True)
# TODO we cannot currently do these (needs summation of 3F2(-1))
# this also implies that they cannot be written as a single g-function
# (although this is possible)
mt = MT(log(x)/(x + 1), x, s)
assert mt[1:] == ((0, 1), True)
assert not hyperexpand(mt[0], allow_hyper=True).has(meijerg)
mt = MT(log(x)**2/(x + 1), x, s)
assert mt[1:] == ((0, 1), True)
assert not hyperexpand(mt[0], allow_hyper=True).has(meijerg)
mt = MT(log(x)/(x + 1)**2, x, s)
assert mt[1:] == ((0, 2), True)
assert not hyperexpand(mt[0], allow_hyper=True).has(meijerg)
# 8.4.14
assert MT(erf(sqrt(x)), x, s) == \
(-gamma(s + S(1)/2)/(sqrt(pi)*s), (-S(1)/2, 0), True)
def test_mellin_transform_bessel():
from sympy import Max, Min, hyper, meijerg
MT = mellin_transform
# 8.4.19
assert MT(besselj(a, 2*sqrt(x)), x, s) == \
(gamma(a/2 + s)/gamma(a/2 - s + 1), (-re(a)/2, S(3)/4), True)
assert MT(sin(sqrt(x))*besselj(a, sqrt(x)), x, s) == \
(2**a*gamma(S(1)/2 - 2*s)*gamma((a + 1)/2 + s)
/ (gamma(1 - s - a/2)*gamma(1 + a - 2*s)),
(-(re(a) + 1)/2, S(1)/4), True)
assert MT(cos(sqrt(x))*besselj(a, sqrt(x)), x, s) == \
(2**a*gamma(a/2 + s)*gamma(-2*s + S(1)/2)/(gamma(-a/2 - s + S(1)/2)*
gamma(a - 2*s + 1)), (-re(a)/2, S(1)/4), True)
assert MT(besselj(a, sqrt(x))**2, x, s) == \
(gamma(a + s)*gamma(S(1)/2 - s)
/ (sqrt(pi)*gamma(1 - s)*gamma(1 + a - s)),
(-re(a), S(1)/2), True)
assert MT(besselj(a, sqrt(x))*besselj(-a, sqrt(x)), x, s) == \
(gamma(s)*gamma(S(1)/2 - s)
/ (sqrt(pi)*gamma(1 - a - s)*gamma(1 + a - s)),
(0, S(1)/2), True)
# NOTE: prudnikov gives the strip below as (1/2 - re(a), 1). As far as
# I can see this is wrong (since besselj(z) ~ 1/sqrt(z) for z large)
assert MT(besselj(a - 1, sqrt(x))*besselj(a, sqrt(x)), x, s) == \
(gamma(1 - s)*gamma(a + s - S(1)/2)
/ (sqrt(pi)*gamma(S(3)/2 - s)*gamma(a - s + S(1)/2)),
(S(1)/2 - re(a), S(1)/2), True)
assert MT(besselj(a, sqrt(x))*besselj(b, sqrt(x)), x, s) == \
(4**s*gamma(1 - 2*s)*gamma((a + b)/2 + s)
/ (gamma(1 - s + (b - a)/2)*gamma(1 - s + (a - b)/2)
*gamma( 1 - s + (a + b)/2)),
(-(re(a) + re(b))/2, S(1)/2), True)
assert MT(besselj(a, sqrt(x))**2 + besselj(-a, sqrt(x))**2, x, s)[1:] == \
((Max(re(a), -re(a)), S(1)/2), True)
# Section 8.4.20
assert MT(bessely(a, 2*sqrt(x)), x, s) == \
(-cos(pi*(a/2 - s))*gamma(s - a/2)*gamma(s + a/2)/pi,
(Max(-re(a)/2, re(a)/2), S(3)/4), True)
assert MT(sin(sqrt(x))*bessely(a, sqrt(x)), x, s) == \
(-4**s*sin(pi*(a/2 - s))*gamma(S(1)/2 - 2*s)
* gamma((1 - a)/2 + s)*gamma((1 + a)/2 + s)
/ (sqrt(pi)*gamma(1 - s - a/2)*gamma(1 - s + a/2)),
(Max(-(re(a) + 1)/2, (re(a) - 1)/2), S(1)/4), True)
assert MT(cos(sqrt(x))*bessely(a, sqrt(x)), x, s) == \
(-4**s*cos(pi*(a/2 - s))*gamma(s - a/2)*gamma(s + a/2)*gamma(S(1)/2 - 2*s)
/ (sqrt(pi)*gamma(S(1)/2 - s - a/2)*gamma(S(1)/2 - s + a/2)),
(Max(-re(a)/2, re(a)/2), S(1)/4), True)
assert MT(besselj(a, sqrt(x))*bessely(a, sqrt(x)), x, s) == \
(-cos(pi*s)*gamma(s)*gamma(a + s)*gamma(S(1)/2 - s)
/ (pi**S('3/2')*gamma(1 + a - s)),
(Max(-re(a), 0), S(1)/2), True)
assert MT(besselj(a, sqrt(x))*bessely(b, sqrt(x)), x, s) == \
(-4**s*cos(pi*(a/2 - b/2 + s))*gamma(1 - 2*s)
* gamma(a/2 - b/2 + s)*gamma(a/2 + b/2 + s)
/ (pi*gamma(a/2 - b/2 - s + 1)*gamma(a/2 + b/2 - s + 1)),
(Max((-re(a) + re(b))/2, (-re(a) - re(b))/2), S(1)/2), True)
# NOTE bessely(a, sqrt(x))**2 and bessely(a, sqrt(x))*bessely(b, sqrt(x))
# are a mess (no matter what way you look at it ...)
assert MT(bessely(a, sqrt(x))**2, x, s)[1:] == \
((Max(-re(a), 0, re(a)), S(1)/2), True)
# Section 8.4.22
# TODO we can't do any of these (delicate cancellation)
# Section 8.4.23
assert MT(besselk(a, 2*sqrt(x)), x, s) == \
(gamma(
s - a/2)*gamma(s + a/2)/2, (Max(-re(a)/2, re(a)/2), oo), True)
assert MT(besselj(a, 2*sqrt(2*sqrt(x)))*besselk(a, 2*sqrt(2*sqrt(x))), x, s) == \
(4**(-s)*gamma(2*s)*gamma(a/2 + s)/gamma(a/2 - s + 1)/2,
(Max(-re(a)/2, 0), oo), True)
# TODO bessely(a, x)*besselk(a, x) is a mess
assert MT(besseli(a, sqrt(x))*besselk(a, sqrt(x)), x, s) == \
(
gamma(s)*gamma(
a + s)*gamma(-s + S(1)/2)/(2*sqrt(pi)*gamma(a - s + 1)),
(Max(-re(a), 0), S(1)/2), True)
assert MT(besseli(b, sqrt(x))*besselk(a, sqrt(x)), x, s) == \
(4**s*gamma(-2*s + 1)*gamma(-a/2 + b/2 + s)*gamma(a/2 + b/2 + s)/
(2*gamma(-a/2 + b/2 - s + 1)*gamma(a/2 + b/2 - s + 1)),
(Max(-re(a)/2 - re(b)/2, re(a)/2 - re(b)/2), S(1)/2), True)
# TODO products of besselk are a mess
# TODO this can be simplified considerably (although I have no idea how)
mt = MT(exp(-x/2)*besselk(a, x/2), x, s)
assert not mt[0].has(meijerg, hyper)
assert mt[1:] == ((Max(-re(a), re(a)), oo), True)
# TODO exp(x/2)*besselk(a, x/2) [etc] cannot currently be done
# TODO various strange products of special orders
def test_expint():
from sympy import E1, expint, Max, re, lerchphi, Symbol, simplify, Si, Ci, Ei
aneg = Symbol('a', negative=True)
u = Symbol('u', polar=True)
assert mellin_transform(E1(x), x, s) == (gamma(s)/s, (0, oo), True)
assert inverse_mellin_transform(gamma(s)/s, s, x,
(0, oo)).rewrite(expint).expand() == E1(x)
assert mellin_transform(expint(a, x), x, s) == \
(gamma(s)/(a + s - 1), (Max(1 - re(a), 0), oo), True)
# XXX IMT has hickups with complicated strips ...
assert simplify(unpolarify(
inverse_mellin_transform(gamma(s)/(aneg + s - 1), s, x,
(1 - aneg, oo)).rewrite(expint).expand(func=True))) == \
expint(aneg, x)
assert mellin_transform(Si(x), x, s) == \
(-2**s*sqrt(pi)*gamma((s + 1)/2)/(2*s*gamma(-s/2 + 1)
), (-1, 0), True)
assert inverse_mellin_transform(-2**s*sqrt(pi)*gamma((s + 1)/2)
/(2*s*gamma(-s/2 + 1)), s, x, (-1, 0)) \
== Si(x)
assert mellin_transform(Ci(sqrt(x)), x, s) == \
(-4**s*sqrt(pi)*gamma(s)/(2*s*gamma(-s + S(1)/2)), (0, 1), True)
assert inverse_mellin_transform(
-4**s*sqrt(pi)*gamma(s)/(2*s*gamma(-s + S(1)/2)),
s, u, (0, 1)).expand() == Ci(sqrt(u))
# TODO LT of Si, Shi, Chi is a mess ...
assert laplace_transform(Ci(x), x, s) == (-log(1 + s**2)/2/s, 0, True)
assert laplace_transform(expint(a, x), x, s) == \
(lerchphi(s*polar_lift(-1), 1, a), 0, S(0) < re(a))
assert laplace_transform(expint(1, x), x, s) == (log(s + 1)/s, 0, True)
assert laplace_transform(expint(2, x), x, s) == \
((s - log(s + 1))/s**2, 0, True)
assert inverse_laplace_transform(-log(1 + s**2)/2/s, s, u).expand() == \
Heaviside(u)*Ci(u)
assert inverse_laplace_transform(log(s + 1)/s, s, x).rewrite(expint) == \
Heaviside(x)*E1(x)
assert inverse_laplace_transform((s - log(s + 1))/s**2, s,
x).rewrite(expint).expand() == \
(expint(2, x)*Heaviside(x)).rewrite(Ei).rewrite(expint).expand()
def test_inverse_mellin_transform():
from sympy import (sin, simplify, expand_func, powsimp, Max, Min, expand,
powdenest, powsimp, exp_polar, combsimp, cos, cot)
IMT = inverse_mellin_transform
assert IMT(gamma(s), s, x, (0, oo)) == exp(-x)
assert IMT(gamma(-s), s, x, (-oo, 0)) == exp(-1/x)
assert simplify(IMT(s/(2*s**2 - 2), s, x, (2, oo))) == \
(x**2 + 1)*Heaviside(1 - x)/(4*x)
# test passing "None"
assert IMT(1/(s**2 - 1), s, x, (-1, None)) == \
-x*Heaviside(-x + 1)/2 - Heaviside(x - 1)/(2*x)
assert IMT(1/(s**2 - 1), s, x, (None, 1)) == \
-x*Heaviside(-x + 1)/2 - Heaviside(x - 1)/(2*x)
# test expansion of sums
assert IMT(gamma(s) + gamma(s - 1), s, x, (1, oo)) == (x + 1)*exp(-x)/x
# test factorisation of polys
r = symbols('r', real=True)
assert IMT(1/(s**2 + 1), s, exp(-x), (None, oo)
).subs(x, r).rewrite(sin).simplify() \
== sin(r)*Heaviside(1 - exp(-r))
# test multiplicative substitution
_a, _b = symbols('a b', positive=True)
assert IMT(_b**(-s/_a)*factorial(s/_a)/s, s, x, (0, oo)) == exp(-_b*x**_a)
assert IMT(factorial(_a/_b + s/_b)/(_a + s), s, x, (-_a, oo)) == x**_a*exp(-x**_b)
def simp_pows(expr):
return simplify(powsimp(expand_mul(expr, deep=False), force=True)).replace(exp_polar, exp)
# Now test the inverses of all direct transforms tested above
# Section 8.4.2
assert IMT(-1/(nu + s), s, x, (-oo, None)) == x**nu*Heaviside(x - 1)
assert IMT(1/(nu + s), s, x, (None, oo)) == x**nu*Heaviside(1 - x)
assert simp_pows(IMT(gamma(beta)*gamma(s)/gamma(s + beta), s, x, (0, oo))) \
== (1 - x)**(beta - 1)*Heaviside(1 - x)
assert simp_pows(IMT(gamma(beta)*gamma(1 - beta - s)/gamma(1 - s),
s, x, (-oo, None))) \
== (x - 1)**(beta - 1)*Heaviside(x - 1)
assert simp_pows(IMT(gamma(s)*gamma(rho - s)/gamma(rho), s, x, (0, None))) \
== (1/(x + 1))**rho
assert simp_pows(IMT(d**c*d**(s - 1)*sin(pi*c)
*gamma(s)*gamma(s + c)*gamma(1 - s)*gamma(1 - s - c)/pi,
s, x, (Max(-re(c), 0), Min(1 - re(c), 1)))) \
== (x**c - d**c)/(x - d)
assert simplify(IMT(1/sqrt(pi)*(-c/2)*gamma(s)*gamma((1 - c)/2 - s)
*gamma(-c/2 - s)/gamma(1 - c - s),
s, x, (0, -re(c)/2))) == \
(1 + sqrt(x + 1))**c
assert simplify(IMT(2**(a + 2*s)*b**(a + 2*s - 1)*gamma(s)*gamma(1 - a - 2*s)
/gamma(1 - a - s), s, x, (0, (-re(a) + 1)/2))) == \
b**(a - 1)*(sqrt(1 + x/b**2) + 1)**(a - 1)*(b**2*sqrt(1 + x/b**2) +
b**2 + x)/(b**2 + x)
assert simplify(IMT(-2**(c + 2*s)*c*b**(c + 2*s)*gamma(s)*gamma(-c - 2*s)
/ gamma(-c - s + 1), s, x, (0, -re(c)/2))) == \
b**c*(sqrt(1 + x/b**2) + 1)**c
# Section 8.4.5
assert IMT(24/s**5, s, x, (0, oo)) == log(x)**4*Heaviside(1 - x)
assert expand(IMT(6/s**4, s, x, (-oo, 0)), force=True) == \
log(x)**3*Heaviside(x - 1)
assert IMT(pi/(s*sin(pi*s)), s, x, (-1, 0)) == log(x + 1)
assert IMT(pi/(s*sin(pi*s/2)), s, x, (-2, 0)) == log(x**2 + 1)
assert IMT(pi/(s*sin(2*pi*s)), s, x, (-S(1)/2, 0)) == log(sqrt(x) + 1)
assert IMT(pi/(s*sin(pi*s)), s, x, (0, 1)) == log(1 + 1/x)
# TODO
def mysimp(expr):
from sympy import expand, logcombine, powsimp
return expand(
powsimp(logcombine(expr, force=True), force=True, deep=True),
force=True).replace(exp_polar, exp)
assert mysimp(mysimp(IMT(pi/(s*tan(pi*s)), s, x, (-1, 0)))) in [
log(1 - x)*Heaviside(1 - x) + log(x - 1)*Heaviside(x - 1),
log(x)*Heaviside(x - 1) + log(1 - 1/x)*Heaviside(x - 1) + log(-x +
1)*Heaviside(-x + 1)]
# test passing cot
assert mysimp(IMT(pi*cot(pi*s)/s, s, x, (0, 1))) in [
log(1/x - 1)*Heaviside(1 - x) + log(1 - 1/x)*Heaviside(x - 1),
-log(x)*Heaviside(-x + 1) + log(1 - 1/x)*Heaviside(x - 1) + log(-x +
1)*Heaviside(-x + 1), ]
# 8.4.14
assert IMT(-gamma(s + S(1)/2)/(sqrt(pi)*s), s, x, (-S(1)/2, 0)) == \
erf(sqrt(x))
# 8.4.19
assert simplify(IMT(gamma(a/2 + s)/gamma(a/2 - s + 1), s, x, (-re(a)/2, S(3)/4))) \
== besselj(a, 2*sqrt(x))
assert simplify(IMT(2**a*gamma(S(1)/2 - 2*s)*gamma(s + (a + 1)/2)
/ (gamma(1 - s - a/2)*gamma(1 - 2*s + a)),
s, x, (-(re(a) + 1)/2, S(1)/4))) == \
sin(sqrt(x))*besselj(a, sqrt(x))
assert simplify(IMT(2**a*gamma(a/2 + s)*gamma(S(1)/2 - 2*s)
/ (gamma(S(1)/2 - s - a/2)*gamma(1 - 2*s + a)),
s, x, (-re(a)/2, S(1)/4))) == \
cos(sqrt(x))*besselj(a, sqrt(x))
# TODO this comes out as an amazing mess, but simplifies nicely
assert simplify(IMT(gamma(a + s)*gamma(S(1)/2 - s)
/ (sqrt(pi)*gamma(1 - s)*gamma(1 + a - s)),
s, x, (-re(a), S(1)/2))) == \
besselj(a, sqrt(x))**2
assert simplify(IMT(gamma(s)*gamma(S(1)/2 - s)
/ (sqrt(pi)*gamma(1 - s - a)*gamma(1 + a - s)),
s, x, (0, S(1)/2))) == \
besselj(-a, sqrt(x))*besselj(a, sqrt(x))
assert simplify(IMT(4**s*gamma(-2*s + 1)*gamma(a/2 + b/2 + s)
/ (gamma(-a/2 + b/2 - s + 1)*gamma(a/2 - b/2 - s + 1)
*gamma(a/2 + b/2 - s + 1)),
s, x, (-(re(a) + re(b))/2, S(1)/2))) == \
besselj(a, sqrt(x))*besselj(b, sqrt(x))
# Section 8.4.20
# TODO this can be further simplified!
assert simplify(IMT(-2**(2*s)*cos(pi*a/2 - pi*b/2 + pi*s)*gamma(-2*s + 1) *
gamma(a/2 - b/2 + s)*gamma(a/2 + b/2 + s) /
(pi*gamma(a/2 - b/2 - s + 1)*gamma(a/2 + b/2 - s + 1)),
s, x,
(Max(-re(a)/2 - re(b)/2, -re(a)/2 + re(b)/2), S(1)/2))) == \
(-cos(pi*b)*besselj(b, sqrt(x)) + besselj(-b, sqrt(x))) * \
besselj(a, sqrt(x))/sin(pi*b)*(-1)
# TODO more
# for coverage
assert IMT(pi/cos(pi*s), s, x, (0, S(1)/2)) == sqrt(x)/(x + 1)
def test_laplace_transform():
from sympy import (fresnels, fresnelc, hyper)
LT = laplace_transform
a, b, c, = symbols('a b c', positive=True)
t = symbols('t')
w = Symbol("w")
f = Function("f")
# Test unevaluated form
assert laplace_transform(f(t), t, w) == LaplaceTransform(f(t), t, w)
assert inverse_laplace_transform(
f(w), w, t, plane=0) == InverseLaplaceTransform(f(w), w, t, 0)
# test a bug
spos = symbols('s', positive=True)
assert LT(exp(t), t, spos)[:2] == (1/(spos - 1), True)
# basic tests from wikipedia
assert LT((t - a)**b*exp(-c*(t - a))*Heaviside(t - a), t, s) == \
((s + c)**(-b - 1)*exp(-a*s)*gamma(b + 1), -c, True)
assert LT(t**a, t, s) == (s**(-a - 1)*gamma(a + 1), 0, True)
assert LT(Heaviside(t), t, s) == (1/s, 0, True)
assert LT(Heaviside(t - a), t, s) == (exp(-a*s)/s, 0, True)
assert LT(1 - exp(-a*t), t, s) == (a/(s*(a + s)), 0, True)
assert LT((exp(2*t) - 1)*exp(-b - t)*Heaviside(t)/2, t, s, noconds=True) \
== exp(-b)/(s**2 - 1)
assert LT(exp(t), t, s)[:2] == (1/(s - 1), 1)
assert LT(exp(2*t), t, s)[:2] == (1/(s - 2), 2)
assert LT(exp(a*t), t, s)[:2] == (1/(s - a), a)
assert LT(log(t/a), t, s) == ((log(a) + log(s) + EulerGamma)/(-s), 0, True)
assert LT(erf(t), t, s) == ((-erf(s/2) + 1)*exp(s**2/4)/s, 0, True)
assert LT(sin(a*t), t, s) == (a/(a**2 + s**2), 0, True)
assert LT(cos(a*t), t, s) == (s/(a**2 + s**2), 0, True)
# TODO would be nice to have these come out better
assert LT(
exp(-a*t)*sin(b*t), t, s) == (1/b/(1 + (a + s)**2/b**2), -a, True)
assert LT(exp(-a*t)*cos(b*t), t, s) == \
(1/(s + a)/(1 + b**2/(a + s)**2), -a, True)
# TODO sinh, cosh have delicate cancellation
assert LT(besselj(0, t), t, s) == (1/sqrt(1 + s**2), 0, True)
assert LT(besselj(1, t), t, s) == (1 - 1/sqrt(1 + 1/s**2), 0, True)
# TODO general order works, but is a *mess*
# TODO besseli also works, but is an even greater mess
# test a bug in conditions processing
# TODO the auxiliary condition should be recognised/simplified
assert LT(exp(t)*cos(t), t, s)[:-1] in [
((s - 1)/(s**2 - 2*s + 2), -oo),
((s - 1)/((s - 1)**2 + 1), -oo),
]
# Fresnel functions
assert laplace_transform(fresnels(t), t, s) == \
((-sin(s**2/(2*pi))*fresnels(s/pi) + sin(s**2/(2*pi))/2 -
cos(s**2/(2*pi))*fresnelc(s/pi) + cos(s**2/(2*pi))/2)/s, 0, True)
assert laplace_transform(fresnelc(t), t, s) == \
(sqrt(2)*(sqrt(2)*sin(s**2/(2*pi))*fresnelc(s/pi) -
sqrt(2)*cos(s**2/(2*pi))*fresnels(s/pi) + cos(s**2/(2*pi) +
pi/4))/(2*s), 0, True)
def test_inverse_laplace_transform():
from sympy import (expand, sinh, cosh, besselj, besseli, exp_polar,
unpolarify, simplify)
ILT = inverse_laplace_transform
a, b, c, = symbols('a b c', positive=True)
t = symbols('t')
def simp_hyp(expr):
return expand(expand(expr).rewrite(sin))
# just test inverses of all of the above
assert ILT(1/s, s, t) == Heaviside(t)
assert ILT(1/s**2, s, t) == t*Heaviside(t)
assert ILT(1/s**5, s, t) == t**4*Heaviside(t)/24
assert ILT(exp(-a*s)/s, s, t) == Heaviside(t - a)
assert ILT(exp(-a*s)/(s + b), s, t) == exp(b*(a - t))*Heaviside(-a + t)
assert ILT(a/(s**2 + a**2), s, t) == sin(a*t)*Heaviside(t)
assert ILT(s/(s**2 + a**2), s, t) == cos(a*t)*Heaviside(t)
# TODO is there a way around simp_hyp?
assert simp_hyp(ILT(a/(s**2 - a**2), s, t)) == sinh(a*t)*Heaviside(t)
assert simp_hyp(ILT(s/(s**2 - a**2), s, t)) == cosh(a*t)*Heaviside(t)
assert ILT(a/((s + b)**2 + a**2), s, t) == exp(-b*t)*sin(a*t)*Heaviside(t)
assert ILT(
(s + b)/((s + b)**2 + a**2), s, t) == exp(-b*t)*cos(a*t)*Heaviside(t)
# TODO sinh/cosh shifted come out a mess. also delayed trig is a mess
# TODO should this simplify further?
assert ILT(exp(-a*s)/s**b, s, t) == \
(t - a)**(b - 1)*Heaviside(t - a)/gamma(b)
assert ILT(exp(-a*s)/sqrt(1 + s**2), s, t) == \
Heaviside(t - a)*besselj(0, a - t) # note: besselj(0, x) is even
# XXX ILT turns these branch factor into trig functions ...
assert simplify(ILT(a**b*(s + sqrt(s**2 - a**2))**(-b)/sqrt(s**2 - a**2),
s, t).rewrite(exp)) == \
Heaviside(t)*besseli(b, a*t)
assert ILT(a**b*(s + sqrt(s**2 + a**2))**(-b)/sqrt(s**2 + a**2),
s, t).rewrite(exp) == \
Heaviside(t)*besselj(b, a*t)
assert ILT(1/(s*sqrt(s + 1)), s, t) == Heaviside(t)*erf(sqrt(t))
# TODO can we make erf(t) work?
def test_fourier_transform():
from sympy import simplify, expand, expand_complex, factor, expand_trig
FT = fourier_transform
IFT = inverse_fourier_transform
def simp(x):
return simplify(expand_trig(expand_complex(expand(x))))
def sinc(x):
return sin(pi*x)/(pi*x)
k = symbols('k', real=True)
f = Function("f")
# TODO for this to work with real a, need to expand abs(a*x) to abs(a)*abs(x)
a = symbols('a', positive=True)
b = symbols('b', positive=True)
posk = symbols('posk', positive=True)
# Test unevaluated form
assert fourier_transform(f(x), x, k) == FourierTransform(f(x), x, k)
assert inverse_fourier_transform(
f(k), k, x) == InverseFourierTransform(f(k), k, x)
# basic examples from wikipedia
assert simp(FT(Heaviside(1 - abs(2*a*x)), x, k)) == sinc(k/a)/a
# TODO IFT is a *mess*
assert simp(FT(Heaviside(1 - abs(a*x))*(1 - abs(a*x)), x, k)) == sinc(k/a)**2/a
# TODO IFT
assert factor(FT(exp(-a*x)*Heaviside(x), x, k), extension=I) == \
1/(a + 2*pi*I*k)
# NOTE: the ift comes out in pieces
assert IFT(1/(a + 2*pi*I*x), x, posk,
noconds=False) == (exp(-a*posk), True)
assert IFT(1/(a + 2*pi*I*x), x, -posk,
noconds=False) == (0, True)
assert IFT(1/(a + 2*pi*I*x), x, symbols('k', negative=True),
noconds=False) == (0, True)
# TODO IFT without factoring comes out as meijer g
assert factor(FT(x*exp(-a*x)*Heaviside(x), x, k), extension=I) == \
1/(a + 2*pi*I*k)**2
assert FT(exp(-a*x)*sin(b*x)*Heaviside(x), x, k) == \
1/b/(1 + a**2*(1 + 2*pi*I*k/a)**2/b**2)
assert FT(exp(-a*x**2), x, k) == sqrt(pi)*exp(-pi**2*k**2/a)/sqrt(a)
assert IFT(sqrt(pi/a)*exp(-(pi*k)**2/a), k, x) == exp(-a*x**2)
assert FT(exp(-a*abs(x)), x, k) == 2*a/(a**2 + 4*pi**2*k**2)
# TODO IFT (comes out as meijer G)
# TODO besselj(n, x), n an integer > 0 actually can be done...
# TODO are there other common transforms (no distributions!)?
def test_sine_transform():
from sympy import sinh, cosh, EulerGamma
t = symbols("t")
w = symbols("w")
a = symbols("a")
f = Function("f")
# Test unevaluated form
assert sine_transform(f(t), t, w) == SineTransform(f(t), t, w)
assert inverse_sine_transform(
f(w), w, t) == InverseSineTransform(f(w), w, t)
assert sine_transform(1/sqrt(t), t, w) == 1/sqrt(w)
assert inverse_sine_transform(1/sqrt(w), w, t) == 1/sqrt(t)
assert sine_transform(
(1/sqrt(t))**3, t, w) == sqrt(w)*gamma(S(1)/4)/(2*gamma(S(5)/4))
assert sine_transform(t**(-a), t, w) == 2**(
-a + S(1)/2)*w**(a - 1)*gamma(-a/2 + 1)/gamma((a + 1)/2)
assert inverse_sine_transform(2**(-a + S(
1)/2)*w**(a - 1)*gamma(-a/2 + 1)/gamma(a/2 + S(1)/2), w, t) == t**(-a)
assert sine_transform(
exp(-a*t), t, w) == sqrt(2)*w/(sqrt(pi)*(a**2 + w**2))
assert inverse_sine_transform(
sqrt(2)*w/(sqrt(pi)*(a**2 + w**2)), w, t) == -sinh(a*t) + cosh(a*t)
assert sine_transform(
log(t)/t, t, w) == sqrt(2)*sqrt(pi)*(-log(w**2) - 2*EulerGamma)/4
assert sine_transform(
t*exp(-a*t**2), t, w) == sqrt(2)*w*exp(-w**2/(4*a))/(4*a**(S(3)/2))
assert inverse_sine_transform(
sqrt(2)*w*exp(-w**2/(4*a))/(4*a**(S(3)/2)), w, t) == t*exp(-a*t**2)
def test_cosine_transform():
from sympy import sinh, cosh, Si, Ci
t = symbols("t")
w = symbols("w")
a = symbols("a")
f = Function("f")
# Test unevaluated form
assert cosine_transform(f(t), t, w) == CosineTransform(f(t), t, w)
assert inverse_cosine_transform(
f(w), w, t) == InverseCosineTransform(f(w), w, t)
assert cosine_transform(1/sqrt(t), t, w) == 1/sqrt(w)
assert inverse_cosine_transform(1/sqrt(w), w, t) == 1/sqrt(t)
assert cosine_transform(1/(
a**2 + t**2), t, w) == -sqrt(2)*sqrt(pi)*(sinh(a*w) - cosh(a*w))/(2*a)
assert cosine_transform(t**(
-a), t, w) == 2**(-a + S(1)/2)*w**(a - 1)*gamma((-a + 1)/2)/gamma(a/2)
assert inverse_cosine_transform(2**(-a + S(
1)/2)*w**(a - 1)*gamma(-a/2 + S(1)/2)/gamma(a/2), w, t) == t**(-a)
assert cosine_transform(
exp(-a*t), t, w) == sqrt(2)*a/(sqrt(pi)*(a**2 + w**2))
assert inverse_cosine_transform(
sqrt(2)*a/(sqrt(pi)*(a**2 + w**2)), w, t) == -sinh(a*t) + cosh(a*t)
assert cosine_transform(exp(-a*sqrt(t))*cos(a*sqrt(
t)), t, w) == -a*(sinh(a**2/(2*w)) - cosh(a**2/(2*w)))/(2*w**(S(3)/2))
assert cosine_transform(1/(a + t), t, w) == -sqrt(
2)*((2*Si(a*w) - pi)*sin(a*w) + 2*cos(a*w)*Ci(a*w))/(2*sqrt(pi))
assert inverse_cosine_transform(sqrt(2)*meijerg(((S(1)/2, 0), ()), (
(S(1)/2, 0, 0), (S(1)/2,)), a**2*w**2/4)/(2*pi), w, t) == 1/(a + t)
assert cosine_transform(1/sqrt(a**2 + t**2), t, w) == sqrt(2)*meijerg(
((S(1)/2,), ()), ((0, 0), (S(1)/2,)), a**2*w**2/4)/(2*sqrt(pi))
assert inverse_cosine_transform(sqrt(2)*meijerg(((S(1)/2,), ()), ((0, 0), (S(1)/2,)), a**2*w**2/4)/(2*sqrt(pi)), w, t) == 1/(t*sqrt(a**2/t**2 + 1))
def test_hankel_transform():
from sympy import sinh, cosh, gamma, sqrt, exp
r = Symbol("r")
k = Symbol("k")
nu = Symbol("nu")
m = Symbol("m")
a = symbols("a")
assert hankel_transform(1/r, r, k, 0) == 1/k
assert inverse_hankel_transform(1/k, k, r, 0) == 1/r
assert hankel_transform(
1/r**m, r, k, 0) == 2**(-m + 1)*k**(m - 2)*gamma(-m/2 + 1)/gamma(m/2)
assert inverse_hankel_transform(
2**(-m + 1)*k**(m - 2)*gamma(-m/2 + 1)/gamma(m/2), k, r, 0) == r**(-m)
assert hankel_transform(1/r**m, r, k, nu) == 2**(
-m + 1)*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2)
assert inverse_hankel_transform(2**(-m + 1)*k**(
m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2), k, r, nu) == r**(-m)
assert hankel_transform(r**nu*exp(-a*r), r, k, nu) == \
2**(nu + 1)*a*k**(-nu - 3)*(a**2/k**2 + 1)**(-nu - S(
3)/2)*gamma(nu + S(3)/2)/sqrt(pi)
assert inverse_hankel_transform(2**(nu + 1)*a*k**(-nu - 3)*(a**2/k**2 + 1)**(-nu - S(3)/2)*gamma(nu + S(3)/2)/sqrt(pi), k, r, nu) == \
r**nu*(-sinh(a*r) + cosh(a*r))
| 43.008439
| 151
| 0.502731
|
9517e7cb1aa8210accd6ce0912b1a10e68082f09
| 5,973
|
py
|
Python
|
descarteslabs/scenes/tests/test_search.py
|
carderne/descarteslabs-python
|
757b480efb8d58474a3bf07f1dbd90652b46ed64
|
[
"Apache-2.0"
] | null | null | null |
descarteslabs/scenes/tests/test_search.py
|
carderne/descarteslabs-python
|
757b480efb8d58474a3bf07f1dbd90652b46ed64
|
[
"Apache-2.0"
] | null | null | null |
descarteslabs/scenes/tests/test_search.py
|
carderne/descarteslabs-python
|
757b480efb8d58474a3bf07f1dbd90652b46ed64
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import datetime
from descarteslabs.scenes import geocontext, search
from shapely.geometry import shape
import mock
from .mock_data import _metadata_search, _metadata_get_bands_by_product
class TestScenesSearch(unittest.TestCase):
geom = {
"coordinates": (
(
(-95.836498, 39.278486),
(-92.068696, 39.278486),
(-92.068696, 42.799988),
(-95.836498, 42.799988),
(-95.836498, 39.278486),
),
),
"type": "Polygon",
}
@mock.patch("descarteslabs.scenes._search.Metadata.search", _metadata_search)
@mock.patch(
"descarteslabs.scenes._search.Metadata.get_bands_by_product",
_metadata_get_bands_by_product,
)
def test_search_geom(self):
sc, ctx = search(self.geom, products="landsat:LC08:PRE:TOAR", limit=4)
assert len(sc) > 0
assert len(sc) <= 4 # test client only has 2 scenes available
assert isinstance(ctx, geocontext.AOI)
assert ctx.__geo_interface__ == self.geom
assert ctx.resolution == 15
assert ctx.crs == "EPSG:32615"
for scene in sc:
# allow for changes in publicly available data
assert abs(len(scene.properties.bands) - 24) < 4
assert "derived:ndvi" in scene.properties.bands
@mock.patch("descarteslabs.scenes._search.Metadata.search", _metadata_search)
@mock.patch(
"descarteslabs.scenes._search.Metadata.get_bands_by_product",
_metadata_get_bands_by_product,
)
def test_search_shapely(self):
sc, ctx = search(shape(self.geom), products="landsat:LC08:PRE:TOAR", limit=4)
assert len(sc) == 2
assert isinstance(ctx, geocontext.AOI)
assert ctx.__geo_interface__ == self.geom
assert ctx.resolution == 15
assert ctx.crs == "EPSG:32615"
for scene in sc:
# allow for changes in publicly available data
assert abs(len(scene.properties.bands) - 24) < 4
assert "derived:ndvi" in scene.properties.bands
@mock.patch("descarteslabs.scenes._search.Metadata.search", _metadata_search)
@mock.patch(
"descarteslabs.scenes._search.Metadata.get_bands_by_product",
_metadata_get_bands_by_product,
)
def test_search_AOI(self):
aoi = geocontext.AOI(self.geom, resolution=5)
sc, ctx = search(aoi, products="landsat:LC08:PRE:TOAR", limit=4)
assert len(sc) > 0
assert len(sc) <= 4 # test client only has 2 scenes available
assert ctx.resolution == 5
assert ctx.crs == "EPSG:32615"
@mock.patch("descarteslabs.scenes._search.Metadata.search", _metadata_search)
@mock.patch(
"descarteslabs.scenes._search.Metadata.get_bands_by_product",
_metadata_get_bands_by_product,
)
def test_search_AOI_with_shape(self):
aoi = geocontext.AOI(self.geom, shape=(100, 100))
sc, ctx = search(aoi, products="landsat:LC08:PRE:TOAR", limit=4)
assert len(sc) > 0
assert len(sc) <= 4 # test client only has 2 scenes available
assert ctx.resolution is None
assert ctx.shape == aoi.shape
assert ctx.crs == "EPSG:32615"
@mock.patch("descarteslabs.scenes._search.Metadata.search", _metadata_search)
@mock.patch(
"descarteslabs.scenes._search.Metadata.get_bands_by_product",
_metadata_get_bands_by_product,
)
def test_search_dltile(self):
tile = geocontext.DLTile(
{
"geometry": {
"coordinates": [
[
[-94.50970627780103, 40.460817879515986],
[-93.75494640538922, 40.468212507270195],
[-93.76149667591069, 41.04471363474632],
[-94.5228005945451, 41.03716803374444],
[-94.50970627780103, 40.460817879515986],
]
],
"type": "Polygon",
},
"properties": {
"cs_code": "EPSG:32615",
"key": "64:0:1000.0:15:-2:70",
"outputBounds": [372000.0, 4480000.0, 436000.0, 4544000.0],
"pad": 0,
"resolution": 1000.0,
"ti": -2,
"tilesize": 64,
"tj": 70,
"zone": 15,
},
}
)
sc, ctx = search(tile, products="landsat:LC08:PRE:TOAR", limit=4)
assert len(sc) > 0
assert len(sc) <= 4 # test client only has 2 scenes available
assert ctx == tile
@mock.patch("descarteslabs.scenes._search.Metadata.search", _metadata_search)
@mock.patch(
"descarteslabs.scenes._search.Metadata.get_bands_by_product",
_metadata_get_bands_by_product,
)
def test_search_no_products(self):
sc, ctx = search(self.geom, limit=4)
assert len(sc) > 0
assert len(sc) <= 4 # test client only has 2 scenes available
@mock.patch("descarteslabs.scenes._search.Metadata.search", _metadata_search)
@mock.patch(
"descarteslabs.scenes._search.Metadata.get_bands_by_product",
_metadata_get_bands_by_product,
)
def test_search_datetime(self):
start_datetime = datetime.datetime(2016, 7, 6)
end_datetime = datetime.datetime(2016, 7, 15)
sc, ctx = search(
self.geom,
products="landsat:LC08:PRE:TOAR",
start_datetime=start_datetime,
end_datetime=end_datetime,
limit=4,
)
assert len(sc) > 0
assert len(sc) <= 4 # test client only has 2 scenes available
for scene in sc:
assert scene.properties["date"] >= start_datetime
assert scene.properties["date"] <= end_datetime
| 36.644172
| 85
| 0.584966
|
c84e1cde75281900b2c21b9af02a09b745f5213e
| 4,053
|
py
|
Python
|
tensorflow/python/data/util/traverse_test.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/data/util/traverse_test.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/data/util/traverse_test.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities for traversing the dataset construction graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import traverse
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class _TestDataset(dataset_ops.UnaryUnchangedStructureDataset):
def __init__(self, input_dataset):
self._input_dataset = input_dataset
temp_variant_tensor = gen_dataset_ops.prefetch_dataset(
input_dataset._variant_tensor,
buffer_size=1,
**self._flat_structure)
variant_tensor = gen_dataset_ops.model_dataset(
temp_variant_tensor, **self._flat_structure)
super(_TestDataset, self).__init__(input_dataset, variant_tensor)
class TraverseTest(test.TestCase):
@test_util.run_deprecated_v1
def testOnlySource(self):
ds = dataset_ops.Dataset.range(10)
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)
self.assertAllEqual(["RangeDataset"], [x.name for x in variant_tensor_ops])
@test_util.run_deprecated_v1
def testSimplePipeline(self):
ds = dataset_ops.Dataset.range(10).map(math_ops.square)
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)
self.assertSetEqual(
set(["MapDataset", "RangeDataset"]),
set([x.name for x in variant_tensor_ops]))
@test_util.run_deprecated_v1
def testConcat(self):
ds1 = dataset_ops.Dataset.range(10)
ds2 = dataset_ops.Dataset.range(10)
ds = ds1.concatenate(ds2)
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)
self.assertSetEqual(
set(["ConcatenateDataset", "RangeDataset", "RangeDataset_1"]),
set([x.name for x in variant_tensor_ops]))
@test_util.run_deprecated_v1
def testZip(self):
ds1 = dataset_ops.Dataset.range(10)
ds2 = dataset_ops.Dataset.range(10)
ds = dataset_ops.Dataset.zip((ds1, ds2))
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)
self.assertSetEqual(
set(["ZipDataset", "RangeDataset", "RangeDataset_1"]),
set([x.name for x in variant_tensor_ops]))
@test_util.run_deprecated_v1
def testMultipleVariantTensors(self):
ds = dataset_ops.Dataset.range(10)
ds = _TestDataset(ds)
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)
self.assertSetEqual(
set(["RangeDataset", "ModelDataset", "PrefetchDataset"]),
set([x.name for x in variant_tensor_ops]))
@test_util.run_deprecated_v1
def testFlatMap(self):
ds1 = dataset_ops.Dataset.range(10).repeat(10)
def map_fn(ds):
def _map(x):
return ds.batch(x)
return _map
ds2 = dataset_ops.Dataset.range(20).prefetch(1)
ds2 = ds2.flat_map(map_fn(ds1))
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds2)
self.assertSetEqual(
set([
"FlatMapDataset", "PrefetchDataset", "RepeatDataset",
"RangeDataset", "RangeDataset_1"
]), set([x.name for x in variant_tensor_ops]))
if __name__ == "__main__":
test.main()
| 36.845455
| 81
| 0.705403
|
f3c2fbcd5ce1eee3395175c89c6e1eebaf2f0bf9
| 5,902
|
py
|
Python
|
se_leg_ra/views/ra.py
|
SUNET/se-leg-ra
|
ac30e700dda4fceb7a9205b4b2790478cf3ba5b4
|
[
"BSD-3-Clause"
] | null | null | null |
se_leg_ra/views/ra.py
|
SUNET/se-leg-ra
|
ac30e700dda4fceb7a9205b4b2790478cf3ba5b4
|
[
"BSD-3-Clause"
] | null | null | null |
se_leg_ra/views/ra.py
|
SUNET/se-leg-ra
|
ac30e700dda4fceb7a9205b4b2790478cf3ba5b4
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from flask import Blueprint, current_app, render_template, url_for, request, redirect
from se_leg_ra.forms import DriversLicenseForm, IdCardForm, PassportForm, NationalIDCardForm
from se_leg_ra.decorators import require_eppn
from se_leg_ra.db import IdCardProofing, DriversLicenseProofing, PassportProofing, NationalIdCardProofing
from se_leg_ra.utils import log_and_send_proofing
__author__ = 'lundberg'
se_leg_ra_views = Blueprint('se_leg_ra', __name__, url_prefix='', template_folder='templates')
def get_view_context(form, user):
view_context = {
'form': form,
'action_url': request.path,
'user': user,
'success_message': None,
'error_message': None
}
return view_context
@se_leg_ra_views.route('/', methods=['GET'])
@require_eppn
def index(user):
current_app.logger.debug('GET index')
# Set up the default form
view_context = get_view_context(DriversLicenseForm(), user)
view_context['action_url'] = url_for('se_leg_ra.drivers_license')
return render_template('drivers_license.jinja2', view_context=view_context)
@se_leg_ra_views.route('/login', methods=['GET'])
def login():
current_app.logger.debug('GET login')
login_dict = current_app.config['LOGIN_ALTERNATIVES']
return render_template('login.jinja2', login_alternatives=login_dict)
@se_leg_ra_views.route('/id-card', methods=['GET', 'POST'])
@require_eppn
def id_card(user):
form = IdCardForm()
view_context = get_view_context(form, user)
if form.validate_on_submit():
current_app.logger.debug('id_card form validated')
data = {
'qr_code': form.qr_code.data,
'nin': form.nin.data,
'card_number': form.card_number.data,
'expiry_date': form.expiry_date.data,
'ocular_validation': form.ocular_validation.data
}
current_app.logger.debug('Form data: {}'.format(data))
# Log the vetting attempt
proofing_element = IdCardProofing(current_app.config['RA_APP_ID'], user['eppn'], data['nin'],
data['card_number'], data['qr_code'], data['ocular_validation'],
data['expiry_date'], '2018v1')
view_context = log_and_send_proofing(proofing_element, identity=data['nin'], view_context=view_context)
return render_template('id_card.jinja2', view_context=view_context)
@se_leg_ra_views.route('/drivers-license', methods=['GET', 'POST'])
@require_eppn
def drivers_license(user):
form = DriversLicenseForm()
view_context = get_view_context(form, user)
if form.validate_on_submit():
data = {
'qr_code': form.qr_code.data,
'nin': form.nin.data,
'reference_number': form.reference_number.data,
'expiry_date': form.expiry_date.data,
'ocular_validation': form.ocular_validation.data
}
current_app.logger.debug('Form data: {}'.format(data))
# Log the vetting attempt
proofing_element = DriversLicenseProofing(current_app.config['RA_APP_ID'], user['eppn'], data['nin'],
data['reference_number'], data['qr_code'], data['ocular_validation'],
data['expiry_date'], '2018v1')
view_context = log_and_send_proofing(proofing_element, identity=data['nin'], view_context=view_context)
return render_template('drivers_license.jinja2', view_context=view_context)
@se_leg_ra_views.route('/passport', methods=['GET', 'POST'])
@require_eppn
def passport(user):
form = PassportForm()
view_context = get_view_context(form, user)
if form.validate_on_submit():
data = {
'qr_code': form.qr_code.data,
'nin': form.nin.data,
'expiry_date': form.expiry_date.data,
'passport_number': form.passport_number.data,
'ocular_validation': form.ocular_validation.data
}
current_app.logger.debug('Form data: {}'.format(data))
# Log the vetting attempt
proofing_element = PassportProofing(current_app.config['RA_APP_ID'], user['eppn'], data['nin'],
data['passport_number'], data['qr_code'], data['ocular_validation'],
data['expiry_date'], '2018v1')
view_context = log_and_send_proofing(proofing_element, identity=data['nin'], view_context=view_context)
return render_template('passport.jinja2', view_context=view_context)
@se_leg_ra_views.route('/national-id-card', methods=['GET', 'POST'])
@require_eppn
def national_id_card(user):
form = NationalIDCardForm()
view_context = get_view_context(form, user)
if form.validate_on_submit():
data = {
'qr_code': form.qr_code.data,
'nin': form.nin.data,
'expiry_date': form.expiry_date.data,
'card_number': form.card_number.data,
'ocular_validation': form.ocular_validation.data
}
current_app.logger.debug('Form data: {}'.format(data))
# Log the vetting attempt
proofing_element = NationalIdCardProofing(current_app.config['RA_APP_ID'], user['eppn'], data['nin'],
data['card_number'], data['qr_code'],
data['ocular_validation'], data['expiry_date'], '2018v1')
view_context = log_and_send_proofing(proofing_element, identity=data['nin'], view_context=view_context)
return render_template('national_id_card.jinja2', view_context=view_context)
@se_leg_ra_views.route('/logout', methods=['GET'])
@require_eppn
def logout(user):
current_app.logger.info('User {} logged out'.format(user['eppn']))
return redirect(current_app.config['LOGOUT_URL'])
| 40.986111
| 119
| 0.64978
|
a5f0070566eeb785dedc80951a37eb250b078c5c
| 664
|
py
|
Python
|
mlld_functions.py
|
mdastro/My_Package
|
80fb7ac90c93da4b41bf90197afa13e1f0b434c7
|
[
"MIT"
] | null | null | null |
mlld_functions.py
|
mdastro/My_Package
|
80fb7ac90c93da4b41bf90197afa13e1f0b434c7
|
[
"MIT"
] | null | null | null |
mlld_functions.py
|
mdastro/My_Package
|
80fb7ac90c93da4b41bf90197afa13e1f0b434c7
|
[
"MIT"
] | null | null | null |
import numpy as np
"""
:param MLLD_functions: this class has several functions that are usually used by myself.
"""
class MLLD_functions:
def standardization(self, variable):
"""
:param variable: the array with the variables you wish to standardize
:return: standardized array
"""
var_average = np.average(variable)
var_std = np.std(variable)
new_variable = []
for i in range(variable.size):
new_variable_i = (variable[i] - var_average)/var_std
new_variable.append(new_variable_i)
self.new_variable = np.array(new_variable)
return self.new_variable
| 30.181818
| 88
| 0.64759
|
c2d65692448ede7e78f2d7c1060268ea6571e8a5
| 5,282
|
py
|
Python
|
lxmert/lxmert/src/lxrt/entry.py
|
Fostereee/Transformer-MM-Explainability
|
6dc4925b83a38e39069369da599b11d548128eb5
|
[
"MIT"
] | 322
|
2021-03-29T20:42:57.000Z
|
2022-03-28T12:26:47.000Z
|
lxmert/lxmert/src/lxrt/entry.py
|
Fostereee/Transformer-MM-Explainability
|
6dc4925b83a38e39069369da599b11d548128eb5
|
[
"MIT"
] | 14
|
2021-04-23T23:45:58.000Z
|
2022-03-15T02:46:01.000Z
|
lxmert/lxmert/src/lxrt/entry.py
|
Fostereee/Transformer-MM-Explainability
|
6dc4925b83a38e39069369da599b11d548128eb5
|
[
"MIT"
] | 51
|
2021-04-05T15:44:52.000Z
|
2022-03-25T02:28:49.000Z
|
# coding=utf-8
# Copyright 2019 project LXRT.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import torch.nn as nn
from ..lxrt.tokenization import BertTokenizer
from ..lxrt.modeling import LXRTFeatureExtraction as VisualBertForLXRFeature, VISUAL_CONFIG
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
def convert_sents_to_features(sents, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (i, sent) in enumerate(sents):
tokens_a = tokenizer.tokenize(sent.strip())
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# Keep segment id which allows loading BERT-weights.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids))
return features
def set_visual_config(args):
VISUAL_CONFIG.l_layers = args.llayers
VISUAL_CONFIG.x_layers = args.xlayers
VISUAL_CONFIG.r_layers = args.rlayers
class LXRTEncoder(nn.Module):
def __init__(self, args, max_seq_length, mode='x'):
super().__init__()
self.max_seq_length = max_seq_length
set_visual_config(args)
# Using the bert tokenizer
self.tokenizer = BertTokenizer.from_pretrained(
"bert-base-uncased",
do_lower_case=True
)
# Build LXRT Model
self.model = VisualBertForLXRFeature.from_pretrained(
"bert-base-uncased",
mode=mode
)
if args.from_scratch:
print("initializing all the weights")
self.model.apply(self.model.init_bert_weights)
def multi_gpu(self):
self.model = nn.DataParallel(self.model)
@property
def dim(self):
return 768
def forward(self, sents, feats, visual_attention_mask=None):
train_features = convert_sents_to_features(
sents, self.max_seq_length, self.tokenizer)
input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long).cuda()
input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long).cuda()
segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long).cuda()
output = self.model(input_ids, segment_ids, input_mask,
visual_feats=feats,
visual_attention_mask=visual_attention_mask)
return output
def save(self, path):
torch.save(self.model.state_dict(),
os.path.join("%s_LXRT.pth" % path))
def load(self, path):
# Load state_dict from snapshot file
print("Load LXMERT pre-trained model from %s" % path)
state_dict = torch.load("%s_LXRT.pth" % path)
new_state_dict = {}
for key, value in state_dict.items():
if key.startswith("module."):
new_state_dict[key[len("module."):]] = value
else:
new_state_dict[key] = value
state_dict = new_state_dict
# Print out the differences of pre-trained and model weights.
load_keys = set(state_dict.keys())
model_keys = set(self.model.state_dict().keys())
print()
print("Weights in loaded but not in model:")
for key in sorted(load_keys.difference(model_keys)):
print(key)
print()
print("Weights in model but not in loaded:")
for key in sorted(model_keys.difference(load_keys)):
print(key)
print()
# Load weights to model
self.model.load_state_dict(state_dict, strict=False)
| 33.643312
| 100
| 0.641045
|
6039f96200932bf5edb2f26b76b7b1427e3e1b2c
| 622
|
py
|
Python
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/payment_sips/__manifest__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | 1
|
2019-12-19T01:53:13.000Z
|
2019-12-19T01:53:13.000Z
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/payment_sips/__manifest__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/payment_sips/__manifest__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright 2015 Eezee-It
{
'name': 'Worldline SIPS',
'version': '1.0',
'author': 'Eezee-It',
'category': 'Accounting',
'description': """
Worldline SIPS Payment Acquirer for online payments
Works with Worldline keys version 2.0, contains implementation of
payments acquirer using Worldline SIPS.""",
'depends': ['payment'],
'data': [
'views/payment_views.xml',
'views/payment_sips_templates.xml',
'data/payment_acquirer_data.xml',
],
'installable': True,
}
| 25.916667
| 74
| 0.651125
|
5d3fcc181ce6af49a1e4650805a2e828daea1ee4
| 12,611
|
py
|
Python
|
tests/components/fritzbox/test_config_flow.py
|
edofullin/core
|
106dc4d28ad59cb192c60fc7a354cafa86899ea4
|
[
"Apache-2.0"
] | 1
|
2021-04-28T09:51:08.000Z
|
2021-04-28T09:51:08.000Z
|
tests/components/fritzbox/test_config_flow.py
|
edofullin/core
|
106dc4d28ad59cb192c60fc7a354cafa86899ea4
|
[
"Apache-2.0"
] | 60
|
2020-08-03T07:32:56.000Z
|
2022-03-31T06:02:07.000Z
|
tests/components/fritzbox/test_config_flow.py
|
edofullin/core
|
106dc4d28ad59cb192c60fc7a354cafa86899ea4
|
[
"Apache-2.0"
] | 4
|
2017-01-10T04:17:33.000Z
|
2021-09-02T16:37:24.000Z
|
"""Tests for AVM Fritz!Box config flow."""
from unittest import mock
from unittest.mock import Mock, patch
from pyfritzhome import LoginError
import pytest
from requests.exceptions import HTTPError
from homeassistant.components.fritzbox.const import DOMAIN
from homeassistant.components.ssdp import (
ATTR_SSDP_LOCATION,
ATTR_UPNP_FRIENDLY_NAME,
ATTR_UPNP_UDN,
)
from homeassistant.config_entries import SOURCE_REAUTH, SOURCE_SSDP, SOURCE_USER
from homeassistant.const import CONF_DEVICES, CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from homeassistant.helpers.typing import HomeAssistantType
from . import MOCK_CONFIG
from tests.common import MockConfigEntry
MOCK_USER_DATA = MOCK_CONFIG[DOMAIN][CONF_DEVICES][0]
MOCK_SSDP_DATA = {
ATTR_SSDP_LOCATION: "https://fake_host:12345/test",
ATTR_UPNP_FRIENDLY_NAME: "fake_name",
ATTR_UPNP_UDN: "uuid:only-a-test",
}
@pytest.fixture(name="fritz")
def fritz_fixture() -> Mock:
"""Patch libraries."""
with patch("homeassistant.components.fritzbox.config_flow.Fritzhome") as fritz:
yield fritz
async def test_user(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow by user."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_USER_DATA
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "fake_host"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_PASSWORD] == "fake_pass"
assert result["data"][CONF_USERNAME] == "fake_user"
assert not result["result"].unique_id
async def test_user_auth_failed(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow by user with authentication failure."""
fritz().login.side_effect = [LoginError("Boom"), mock.DEFAULT]
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"]["base"] == "invalid_auth"
async def test_user_not_successful(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow by user but no connection found."""
fritz().login.side_effect = OSError("Boom")
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "no_devices_found"
async def test_user_already_configured(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow by user when already configured."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert not result["result"].unique_id
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_reauth_success(hass: HomeAssistantType, fritz: Mock):
"""Test starting a reauthentication flow."""
mock_config = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_DATA)
mock_config.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH, "entry_id": mock_config.entry_id},
data=mock_config.data,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_USERNAME: "other_fake_user",
CONF_PASSWORD: "other_fake_password",
},
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "reauth_successful"
assert mock_config.data[CONF_USERNAME] == "other_fake_user"
assert mock_config.data[CONF_PASSWORD] == "other_fake_password"
async def test_reauth_auth_failed(hass: HomeAssistantType, fritz: Mock):
"""Test starting a reauthentication flow with authentication failure."""
fritz().login.side_effect = LoginError("Boom")
mock_config = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_DATA)
mock_config.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH, "entry_id": mock_config.entry_id},
data=mock_config.data,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_USERNAME: "other_fake_user",
CONF_PASSWORD: "other_fake_password",
},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
assert result["errors"]["base"] == "invalid_auth"
async def test_reauth_not_successful(hass: HomeAssistantType, fritz: Mock):
"""Test starting a reauthentication flow but no connection found."""
fritz().login.side_effect = OSError("Boom")
mock_config = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_DATA)
mock_config.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH, "entry_id": mock_config.entry_id},
data=mock_config.data,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_USERNAME: "other_fake_user",
CONF_PASSWORD: "other_fake_password",
},
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "no_devices_found"
async def test_import(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow by import."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "import"}, data=MOCK_USER_DATA
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "fake_host"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_PASSWORD] == "fake_pass"
assert result["data"][CONF_USERNAME] == "fake_user"
assert not result["result"].unique_id
async def test_ssdp(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: "fake_pass", CONF_USERNAME: "fake_user"},
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "fake_name"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_PASSWORD] == "fake_pass"
assert result["data"][CONF_USERNAME] == "fake_user"
assert result["result"].unique_id == "only-a-test"
async def test_ssdp_no_friendly_name(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery without friendly name."""
MOCK_NO_NAME = MOCK_SSDP_DATA.copy()
del MOCK_NO_NAME[ATTR_UPNP_FRIENDLY_NAME]
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_SSDP}, data=MOCK_NO_NAME
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: "fake_pass", CONF_USERNAME: "fake_user"},
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "fake_host"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_PASSWORD] == "fake_pass"
assert result["data"][CONF_USERNAME] == "fake_user"
assert result["result"].unique_id == "only-a-test"
async def test_ssdp_auth_failed(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery with authentication failure."""
fritz().login.side_effect = LoginError("Boom")
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "confirm"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: "whatever", CONF_USERNAME: "whatever"},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "confirm"
assert result["errors"]["base"] == "invalid_auth"
async def test_ssdp_not_successful(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery but no device found."""
fritz().login.side_effect = OSError("Boom")
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: "whatever", CONF_USERNAME: "whatever"},
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "no_devices_found"
async def test_ssdp_not_supported(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery with unsupported device."""
fritz().get_device_elements.side_effect = HTTPError("Boom")
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: "whatever", CONF_USERNAME: "whatever"},
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "not_supported"
async def test_ssdp_already_in_progress_unique_id(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery twice."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_in_progress"
async def test_ssdp_already_in_progress_host(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery twice."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "confirm"
MOCK_NO_UNIQUE_ID = MOCK_SSDP_DATA.copy()
del MOCK_NO_UNIQUE_ID[ATTR_UPNP_UDN]
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_SSDP}, data=MOCK_NO_UNIQUE_ID
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_in_progress"
async def test_ssdp_already_configured(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery when already configured."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert not result["result"].unique_id
result2 = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result2["type"] == RESULT_TYPE_ABORT
assert result2["reason"] == "already_configured"
assert result["result"].unique_id == "only-a-test"
| 37.20059
| 88
| 0.702403
|
ac538702fd5b3d2de4d9c2e05771300632288fcb
| 2,041
|
py
|
Python
|
Lib/json/tests/test_speedups.py
|
djaldave/laevad-python-2.7.18
|
df9aac191d554295db45d638e528880a9ab9a3ec
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/json/tests/test_speedups.py
|
djaldave/laevad-python-2.7.18
|
df9aac191d554295db45d638e528880a9ab9a3ec
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/json/tests/test_speedups.py
|
djaldave/laevad-python-2.7.18
|
df9aac191d554295db45d638e528880a9ab9a3ec
|
[
"bzip2-1.0.6"
] | null | null | null |
from json.tests import CTest
class BadBool:
def __nonzero__(self):
1/0
class TestSpeedups(CTest):
def test_scanstring(self):
self.assertEqual(self.json.decoder.scanstring.__module__, "_json")
self.assertIs(self.json.decoder.scanstring, self.json.decoder.c_scanstring)
def test_encode_basestring_ascii(self):
self.assertEqual(self.json.encoder.encode_basestring_ascii.__module__,
"_json")
self.assertIs(self.json.encoder.encode_basestring_ascii,
self.json.encoder.c_encode_basestring_ascii)
class TestDecode(CTest):
def test_make_scanner(self):
self.assertRaises(AttributeError, self.json.scanner.c_make_scanner, 1)
def test_bad_bool_args(self):
def test(value):
self.json.decoder.JSONDecoder(strict=BadBool()).decode(value)
self.assertRaises(ZeroDivisionError, test, '""')
self.assertRaises(ZeroDivisionError, test, '{}')
self.assertRaises(ZeroDivisionError, test, u'""')
self.assertRaises(ZeroDivisionError, test, u'{}')
class TestEncode(CTest):
def test_make_encoder(self):
self.assertRaises(TypeError, self.json.encoder.c_make_encoder,
None,
"\xCD\x7D\x3D\x4E\x12\x4C\xF9\x79\xD7\x52\xBA\x82\xF2\x27\x4A\x7D\xA0\xCA\x75",
None)
def test_bad_bool_args(self):
def test(name):
self.json.encoder.JSONEncoder(**{name: BadBool()}).encode({'a': 1})
self.assertRaises(ZeroDivisionError, test, 'skipkeys')
self.assertRaises(ZeroDivisionError, test, 'ensure_ascii')
self.assertRaises(ZeroDivisionError, test, 'check_circular')
self.assertRaises(ZeroDivisionError, test, 'allow_nan')
self.assertRaises(ZeroDivisionError, test, 'sort_keys')
def test_bad_encoding(self):
with self.assertRaises(UnicodeEncodeError):
self.json.encoder.JSONEncoder(encoding=u'\udcff').encode({'key': 123})
| 39.25
| 92
| 0.660461
|
ad40eb4e07101e6a4f711f4277f8b504dbe88b80
| 7,036
|
py
|
Python
|
projector.py
|
nessessence/stylegan2-pytorch
|
8b17ffd1f05cbbc7966bebe3330914d5ebcac188
|
[
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
projector.py
|
nessessence/stylegan2-pytorch
|
8b17ffd1f05cbbc7966bebe3330914d5ebcac188
|
[
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
projector.py
|
nessessence/stylegan2-pytorch
|
8b17ffd1f05cbbc7966bebe3330914d5ebcac188
|
[
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
import argparse
import math
import os
import torch
from torch import optim
from torch.nn import functional as F
from torchvision import transforms
from PIL import Image
from tqdm import tqdm
import lpips
from model import Generator
def noise_regularize(noises):
loss = 0
for noise in noises:
size = noise.shape[2]
while True:
loss = (
loss
+ (noise * torch.roll(noise, shifts=1, dims=3)).mean().pow(2)
+ (noise * torch.roll(noise, shifts=1, dims=2)).mean().pow(2)
)
if size <= 8:
break
noise = noise.reshape([-1, 1, size // 2, 2, size // 2, 2])
noise = noise.mean([3, 5])
size //= 2
return loss
def noise_normalize_(noises):
for noise in noises:
mean = noise.mean()
std = noise.std()
noise.data.add_(-mean).div_(std)
def get_lr(t, initial_lr, rampdown=0.25, rampup=0.05):
lr_ramp = min(1, (1 - t) / rampdown)
lr_ramp = 0.5 - 0.5 * math.cos(lr_ramp * math.pi)
lr_ramp = lr_ramp * min(1, t / rampup)
return initial_lr * lr_ramp
def latent_noise(latent, strength):
noise = torch.randn_like(latent) * strength
return latent + noise
def make_image(tensor):
return (
tensor.detach()
.clamp_(min=-1, max=1)
.add(1)
.div_(2)
.mul(255)
.type(torch.uint8)
.permute(0, 2, 3, 1)
.to("cpu")
.numpy()
)
if __name__ == "__main__":
device = "cuda"
parser = argparse.ArgumentParser(
description="Image projector to the generator latent spaces"
)
parser.add_argument(
"--ckpt", type=str, required=True, help="path to the model checkpoint"
)
parser.add_argument(
"--size", type=int, default=256, help="output image sizes of the generator"
)
parser.add_argument(
"--lr_rampup",
type=float,
default=0.05,
help="duration of the learning rate warmup",
)
parser.add_argument(
"--lr_rampdown",
type=float,
default=0.25,
help="duration of the learning rate decay",
)
parser.add_argument("--lr", type=float, default=0.1, help="learning rate")
parser.add_argument(
"--noise", type=float, default=0.05, help="strength of the noise level"
)
parser.add_argument(
"--noise_ramp",
type=float,
default=0.75,
help="duration of the noise level decay",
)
parser.add_argument("--step", type=int, default=1000, help="optimize iterations")
parser.add_argument(
"--noise_regularize",
type=float,
default=1e5,
help="weight of the noise regularization",
)
parser.add_argument("--mse", type=float, default=0, help="weight of the mse loss")
parser.add_argument(
"--w_plus",
action="store_true",
help="allow to use distinct latent codes to each layers",
)
parser.add_argument(
"files", metavar="FILES", nargs="+", help="path to image files to be projected"
)
args = parser.parse_args()
n_mean_latent = 10000
resize = min(args.size, 256)
transform = transforms.Compose(
[
transforms.Resize(resize),
transforms.CenterCrop(resize),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
]
)
imgs = []
for imgfile in args.files:
img = transform(Image.open(imgfile).convert("RGB"))
imgs.append(img)
imgs = torch.stack(imgs, 0).to(device)
g_ema = Generator(args.size, 512, 8)
g_ema.load_state_dict(torch.load(args.ckpt)["g_ema"], strict=False)
g_ema.eval()
g_ema = g_ema.to(device)
with torch.no_grad():
noise_sample = torch.randn(n_mean_latent, 512, device=device)
latent_out = g_ema.style(noise_sample)
latent_mean = latent_out.mean(0)
latent_std = ((latent_out - latent_mean).pow(2).sum() / n_mean_latent) ** 0.5
percept = lpips.PerceptualLoss(
model="net-lin", net="vgg", use_gpu=device.startswith("cuda")
)
noises_single = g_ema.make_noise()
noises = []
for noise in noises_single:
noises.append(noise.repeat(imgs.shape[0], 1, 1, 1).normal_())
latent_in = latent_mean.detach().clone().unsqueeze(0).repeat(imgs.shape[0], 1)
if args.w_plus:
latent_in = latent_in.unsqueeze(1).repeat(1, g_ema.n_latent, 1)
latent_in.requires_grad = True
for noise in noises:
noise.requires_grad = True
optimizer = optim.Adam([latent_in] + noises, lr=args.lr)
pbar = tqdm(range(args.step))
latent_path = []
for i in pbar:
t = i / args.step
lr = get_lr(t, args.lr)
optimizer.param_groups[0]["lr"] = lr
noise_strength = latent_std * args.noise * max(0, 1 - t / args.noise_ramp) ** 2
latent_n = latent_noise(latent_in, noise_strength.item())
img_gen, _ = g_ema([latent_n], input_is_latent=True, noise=noises)
batch, channel, height, width = img_gen.shape
if height > 256:
factor = height // 256
img_gen = img_gen.reshape(
batch, channel, height // factor, factor, width // factor, factor
)
img_gen = img_gen.mean([3, 5])
p_loss = percept(img_gen, imgs).sum()
n_loss = noise_regularize(noises)
mse_loss = F.mse_loss(img_gen, imgs)
loss = p_loss + args.noise_regularize * n_loss + args.mse * mse_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
noise_normalize_(noises)
if (i + 1) % 100 == 0:
latent_path.append(latent_in.detach().clone())
pbar.set_description(
(
f"perceptual: {p_loss.item():.4f}; noise regularize: {n_loss.item():.4f};"
f" mse: {mse_loss.item():.4f}; lr: {lr:.4f}"
)
)
img_gen, _ = g_ema([latent_path[-1]], input_is_latent=True, noise=noises)
filename = os.path.splitext(os.path.basename(args.files[0]))[0] + ".pt"
img_ar = make_image(img_gen)
result_file = {}
for i, input_name in enumerate(args.files):
noise_single = []
for noise in noises:
noise_single.append(noise[i : i + 1])
# latent: "w", img: "inversed image"
result_file[input_name] = {
"img": img_gen[i],
"latent": latent_in[i],
"noise": noise_single,
}
img_name = os.path.splitext(os.path.basename(input_name))[0] + "-project.png"
pil_img = Image.fromarray(img_ar[i])
pil_img.save(img_name)
torch.save(result_file, filename)
| 28.257028
| 91
| 0.561967
|
714e330087927c6cb2f5157f597385fcfd93148e
| 1,103
|
py
|
Python
|
rplugin/python3/denite/source/gtags_context.py
|
deramchmzz/denite-gtags
|
21cbdb90c39b6b6c4908167561394cc2fabafdb0
|
[
"MIT"
] | null | null | null |
rplugin/python3/denite/source/gtags_context.py
|
deramchmzz/denite-gtags
|
21cbdb90c39b6b6c4908167561394cc2fabafdb0
|
[
"MIT"
] | null | null | null |
rplugin/python3/denite/source/gtags_context.py
|
deramchmzz/denite-gtags
|
21cbdb90c39b6b6c4908167561394cc2fabafdb0
|
[
"MIT"
] | null | null | null |
import os
import sys
sys.path.insert(1, os.path.dirname(__file__))
from denite_gtags import TagsBase # pylint: disable=locally-disabled, wrong-import-position
from denite import util
class Source(TagsBase):
def __init__(self, vim):
super().__init__(vim)
self.name = 'gtags_context'
self.kind = 'file'
def get_search_flags(self):
current_line = self.vim.current.window.cursor[0]
file_name = self.vim.current.window.buffer.name
return [[
'--from-here', '{}:{}'.format(current_line, file_name),
'--result=ctags-mod'
]]
def convert_to_candidates(self, tags):
candidates = []
for tag in tags:
path, line, text = self._parse_tag(tag)
col = text.find(text) - 1
candidates.append({
'word': '{0}:{1};{2}'.format(util.abspath(self.vim, path), line, text),
'action__path': path,
'action__line': line,
'action__text': text,
'action__col': col
})
return candidates
| 29.810811
| 92
| 0.56845
|
02ffe31e6ae2431402cfdd5c5f854e29c6810455
| 449,937
|
py
|
Python
|
python3/lib/python3.6/site-packages/tensorflow/python/ops/gen_data_flow_ops.py
|
TruongThuyLiem/keras2tensorflow
|
726f2370160701081cb43fbd8b56154c10d7ad63
|
[
"MIT"
] | 3
|
2020-10-12T15:47:01.000Z
|
2022-01-14T19:51:26.000Z
|
python3/lib/python3.6/site-packages/tensorflow/python/ops/gen_data_flow_ops.py
|
TruongThuyLiem/keras2tensorflow
|
726f2370160701081cb43fbd8b56154c10d7ad63
|
[
"MIT"
] | null | null | null |
python3/lib/python3.6/site-packages/tensorflow/python/ops/gen_data_flow_ops.py
|
TruongThuyLiem/keras2tensorflow
|
726f2370160701081cb43fbd8b56154c10d7ad63
|
[
"MIT"
] | 2
|
2020-08-03T13:02:06.000Z
|
2020-11-04T03:15:44.000Z
|
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: data_flow_ops.cc
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util import dispatch as _dispatch
from tensorflow.python.util.tf_export import tf_export
from tensorflow.python.util.tf_export import kwarg_only as _kwarg_only
from tensorflow.tools.docs import doc_controls as _doc_controls
def accumulator_apply_gradient(handle, local_step, gradient, name=None):
r"""Applies a gradient to a given accumulator.
Does not add if local_step is lesser than the accumulator's global_step.
Args:
handle: A `Tensor` of type mutable `string`. The handle to a accumulator.
local_step: A `Tensor` of type `int64`.
The local_step value at which the gradient was computed.
gradient: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
A tensor of the gradient to be accumulated.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("accumulator_apply_gradient op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"AccumulatorApplyGradient", handle=handle, local_step=local_step,
gradient=gradient, name=name)
return _op
_result = None
return _result
def AccumulatorApplyGradient(handle, local_step, gradient, name=None):
return accumulator_apply_gradient(handle=handle, local_step=local_step, gradient=gradient, name=name)
AccumulatorApplyGradient.__doc__ = accumulator_apply_gradient.__doc__
AccumulatorApplyGradient = _doc_controls.do_not_generate_docs(_kwarg_only(AccumulatorApplyGradient))
tf_export("raw_ops.AccumulatorApplyGradient")(AccumulatorApplyGradient)
def accumulator_apply_gradient_eager_fallback(handle, local_step, gradient, name=None, ctx=None):
raise RuntimeError("accumulator_apply_gradient op does not support eager execution. Arg 'handle' is a ref.")
def accumulator_num_accumulated(handle, name=None):
r"""Returns the number of gradients aggregated in the given accumulators.
Args:
handle: A `Tensor` of type mutable `string`. The handle to an accumulator.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("accumulator_num_accumulated op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"AccumulatorNumAccumulated", handle=handle, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"AccumulatorNumAccumulated", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def AccumulatorNumAccumulated(handle, name=None):
return accumulator_num_accumulated(handle=handle, name=name)
AccumulatorNumAccumulated.__doc__ = accumulator_num_accumulated.__doc__
AccumulatorNumAccumulated = _doc_controls.do_not_generate_docs(_kwarg_only(AccumulatorNumAccumulated))
tf_export("raw_ops.AccumulatorNumAccumulated")(AccumulatorNumAccumulated)
def accumulator_num_accumulated_eager_fallback(handle, name=None, ctx=None):
raise RuntimeError("accumulator_num_accumulated op does not support eager execution. Arg 'handle' is a ref.")
def accumulator_set_global_step(handle, new_global_step, name=None):
r"""Updates the accumulator with a new value for global_step.
Logs warning if the accumulator's value is already higher than
new_global_step.
Args:
handle: A `Tensor` of type mutable `string`. The handle to an accumulator.
new_global_step: A `Tensor` of type `int64`.
The new global_step value to set.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("accumulator_set_global_step op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"AccumulatorSetGlobalStep", handle=handle,
new_global_step=new_global_step,
name=name)
return _op
_result = None
return _result
def AccumulatorSetGlobalStep(handle, new_global_step, name=None):
return accumulator_set_global_step(handle=handle, new_global_step=new_global_step, name=name)
AccumulatorSetGlobalStep.__doc__ = accumulator_set_global_step.__doc__
AccumulatorSetGlobalStep = _doc_controls.do_not_generate_docs(_kwarg_only(AccumulatorSetGlobalStep))
tf_export("raw_ops.AccumulatorSetGlobalStep")(AccumulatorSetGlobalStep)
def accumulator_set_global_step_eager_fallback(handle, new_global_step, name=None, ctx=None):
raise RuntimeError("accumulator_set_global_step op does not support eager execution. Arg 'handle' is a ref.")
def accumulator_take_gradient(handle, num_required, dtype, name=None):
r"""Extracts the average gradient in the given ConditionalAccumulator.
The op blocks until sufficient (i.e., more than num_required)
gradients have been accumulated. If the accumulator has already
aggregated more than num_required gradients, it returns the average of
the accumulated gradients. Also automatically increments the recorded
global_step in the accumulator by 1, and resets the aggregate to 0.
Args:
handle: A `Tensor` of type mutable `string`. The handle to an accumulator.
num_required: A `Tensor` of type `int32`.
Number of gradients required before we return an aggregate.
dtype: A `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8, tf.complex64, tf.int64, tf.qint8, tf.quint8, tf.qint32, tf.bfloat16, tf.uint16, tf.complex128, tf.half, tf.uint32, tf.uint64`.
The data type of accumulated gradients. Needs to correspond to the type
of the accumulator.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("accumulator_take_gradient op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
dtype = _execute.make_type(dtype, "dtype")
_, _, _op = _op_def_lib._apply_op_helper(
"AccumulatorTakeGradient", handle=handle, num_required=num_required,
dtype=dtype, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("dtype", _op.get_attr("dtype"))
_execute.record_gradient(
"AccumulatorTakeGradient", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def AccumulatorTakeGradient(handle, num_required, dtype, name=None):
return accumulator_take_gradient(handle=handle, num_required=num_required, dtype=dtype, name=name)
AccumulatorTakeGradient.__doc__ = accumulator_take_gradient.__doc__
AccumulatorTakeGradient = _doc_controls.do_not_generate_docs(_kwarg_only(AccumulatorTakeGradient))
tf_export("raw_ops.AccumulatorTakeGradient")(AccumulatorTakeGradient)
def accumulator_take_gradient_eager_fallback(handle, num_required, dtype, name=None, ctx=None):
raise RuntimeError("accumulator_take_gradient op does not support eager execution. Arg 'handle' is a ref.")
def barrier(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None):
r"""Defines a barrier that persists across different graph executions.
A barrier represents a key-value map, where each key is a string, and
each value is a tuple of tensors.
At runtime, the barrier contains 'complete' and 'incomplete'
elements. A complete element has defined tensors for all components of
its value tuple, and may be accessed using BarrierTakeMany. An
incomplete element has some undefined components in its value tuple,
and may be updated using BarrierInsertMany.
Args:
component_types: A list of `tf.DTypes` that has length `>= 1`.
The type of each component in a value.
shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.
The shape of each component in a value. Each shape must be 1 in the
first dimension. The length of this attr must be the same as the length of
component_types.
capacity: An optional `int`. Defaults to `-1`.
The capacity of the barrier. The default capacity is MAX_INT32,
which is the largest capacity of the underlying queue.
container: An optional `string`. Defaults to `""`.
If non-empty, this barrier is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional `string`. Defaults to `""`.
If non-empty, this barrier will be shared under the given name
across multiple sessions.
name: A name for the operation (optional).
Returns:
A `Tensor` of type mutable `string`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("barrier op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
if not isinstance(component_types, (list, tuple)):
raise TypeError(
"Expected list for 'component_types' argument to "
"'barrier' Op, not %r." % component_types)
component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
if shapes is None:
shapes = []
if not isinstance(shapes, (list, tuple)):
raise TypeError(
"Expected list for 'shapes' argument to "
"'barrier' Op, not %r." % shapes)
shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
if capacity is None:
capacity = -1
capacity = _execute.make_int(capacity, "capacity")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"Barrier", component_types=component_types, shapes=shapes,
capacity=capacity, container=container,
shared_name=shared_name, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("component_types", _op.get_attr("component_types"), "shapes",
_op.get_attr("shapes"), "capacity", _op.get_attr("capacity"),
"container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_execute.record_gradient(
"Barrier", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def Barrier(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None):
return barrier(component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name, name=name)
Barrier.__doc__ = barrier.__doc__
Barrier = _doc_controls.do_not_generate_docs(_kwarg_only(Barrier))
tf_export("raw_ops.Barrier")(Barrier)
def barrier_eager_fallback(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None, ctx=None):
raise RuntimeError("barrier op does not support eager execution. Arg 'handle' is a ref.")
def barrier_close(handle, cancel_pending_enqueues=False, name=None):
r"""Closes the given barrier.
This operation signals that no more new elements will be inserted in the
given barrier. Subsequent InsertMany that try to introduce a new key will fail.
Subsequent InsertMany operations that just add missing components to already
existing elements will continue to succeed. Subsequent TakeMany operations will
continue to succeed if sufficient completed elements remain in the barrier.
Subsequent TakeMany operations that would block will fail immediately.
Args:
handle: A `Tensor` of type mutable `string`. The handle to a barrier.
cancel_pending_enqueues: An optional `bool`. Defaults to `False`.
If true, all pending enqueue requests that are
blocked on the barrier's queue will be canceled. InsertMany will fail, even
if no new key is introduced.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("barrier_close op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
if cancel_pending_enqueues is None:
cancel_pending_enqueues = False
cancel_pending_enqueues = _execute.make_bool(cancel_pending_enqueues, "cancel_pending_enqueues")
_, _, _op = _op_def_lib._apply_op_helper(
"BarrierClose", handle=handle,
cancel_pending_enqueues=cancel_pending_enqueues,
name=name)
return _op
_result = None
return _result
def BarrierClose(handle, cancel_pending_enqueues=False, name=None):
return barrier_close(handle=handle, cancel_pending_enqueues=cancel_pending_enqueues, name=name)
BarrierClose.__doc__ = barrier_close.__doc__
BarrierClose = _doc_controls.do_not_generate_docs(_kwarg_only(BarrierClose))
tf_export("raw_ops.BarrierClose")(BarrierClose)
def barrier_close_eager_fallback(handle, cancel_pending_enqueues=False, name=None, ctx=None):
raise RuntimeError("barrier_close op does not support eager execution. Arg 'handle' is a ref.")
def barrier_incomplete_size(handle, name=None):
r"""Computes the number of incomplete elements in the given barrier.
Args:
handle: A `Tensor` of type mutable `string`. The handle to a barrier.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("barrier_incomplete_size op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"BarrierIncompleteSize", handle=handle, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"BarrierIncompleteSize", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def BarrierIncompleteSize(handle, name=None):
return barrier_incomplete_size(handle=handle, name=name)
BarrierIncompleteSize.__doc__ = barrier_incomplete_size.__doc__
BarrierIncompleteSize = _doc_controls.do_not_generate_docs(_kwarg_only(BarrierIncompleteSize))
tf_export("raw_ops.BarrierIncompleteSize")(BarrierIncompleteSize)
def barrier_incomplete_size_eager_fallback(handle, name=None, ctx=None):
raise RuntimeError("barrier_incomplete_size op does not support eager execution. Arg 'handle' is a ref.")
def barrier_insert_many(handle, keys, values, component_index, name=None):
r"""For each key, assigns the respective value to the specified component.
If a key is not found in the barrier, this operation will create a new
incomplete element. If a key is found in the barrier, and the element
already has a value at component_index, this operation will fail with
INVALID_ARGUMENT, and leave the barrier in an undefined state.
Args:
handle: A `Tensor` of type mutable `string`. The handle to a barrier.
keys: A `Tensor` of type `string`.
A one-dimensional tensor of keys, with length n.
values: A `Tensor`.
An any-dimensional tensor of values, which are associated with the
respective keys. The 0th dimension must have length n.
component_index: An `int`.
The component of the barrier elements that is being assigned.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("barrier_insert_many op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
component_index = _execute.make_int(component_index, "component_index")
_, _, _op = _op_def_lib._apply_op_helper(
"BarrierInsertMany", handle=handle, keys=keys, values=values,
component_index=component_index, name=name)
return _op
_result = None
return _result
def BarrierInsertMany(handle, keys, values, component_index, name=None):
return barrier_insert_many(handle=handle, keys=keys, values=values, component_index=component_index, name=name)
BarrierInsertMany.__doc__ = barrier_insert_many.__doc__
BarrierInsertMany = _doc_controls.do_not_generate_docs(_kwarg_only(BarrierInsertMany))
tf_export("raw_ops.BarrierInsertMany")(BarrierInsertMany)
def barrier_insert_many_eager_fallback(handle, keys, values, component_index, name=None, ctx=None):
raise RuntimeError("barrier_insert_many op does not support eager execution. Arg 'handle' is a ref.")
def barrier_ready_size(handle, name=None):
r"""Computes the number of complete elements in the given barrier.
Args:
handle: A `Tensor` of type mutable `string`. The handle to a barrier.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("barrier_ready_size op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"BarrierReadySize", handle=handle, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"BarrierReadySize", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def BarrierReadySize(handle, name=None):
return barrier_ready_size(handle=handle, name=name)
BarrierReadySize.__doc__ = barrier_ready_size.__doc__
BarrierReadySize = _doc_controls.do_not_generate_docs(_kwarg_only(BarrierReadySize))
tf_export("raw_ops.BarrierReadySize")(BarrierReadySize)
def barrier_ready_size_eager_fallback(handle, name=None, ctx=None):
raise RuntimeError("barrier_ready_size op does not support eager execution. Arg 'handle' is a ref.")
_barrier_take_many_outputs = ["indices", "keys", "values"]
_BarrierTakeManyOutput = _collections.namedtuple(
"BarrierTakeMany", _barrier_take_many_outputs)
def barrier_take_many(handle, num_elements, component_types, allow_small_batch=False, wait_for_incomplete=False, timeout_ms=-1, name=None):
r"""Takes the given number of completed elements from a barrier.
This operation concatenates completed-element component tensors along
the 0th dimension to make a single component tensor.
Elements come out of the barrier when they are complete, and in the order
in which they were placed into the barrier. The indices output provides
information about the batch in which each element was originally inserted
into the barrier.
Args:
handle: A `Tensor` of type mutable `string`. The handle to a barrier.
num_elements: A `Tensor` of type `int32`.
A single-element tensor containing the number of elements to
take.
component_types: A list of `tf.DTypes` that has length `>= 1`.
The type of each component in a value.
allow_small_batch: An optional `bool`. Defaults to `False`.
Allow to return less than num_elements items if barrier is
already closed.
wait_for_incomplete: An optional `bool`. Defaults to `False`.
timeout_ms: An optional `int`. Defaults to `-1`.
If the queue is empty, this operation will block for up to
timeout_ms milliseconds.
Note: This option is not supported yet.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (indices, keys, values).
indices: A `Tensor` of type `int64`.
keys: A `Tensor` of type `string`.
values: A list of `Tensor` objects of type `component_types`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("barrier_take_many op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
if not isinstance(component_types, (list, tuple)):
raise TypeError(
"Expected list for 'component_types' argument to "
"'barrier_take_many' Op, not %r." % component_types)
component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
if allow_small_batch is None:
allow_small_batch = False
allow_small_batch = _execute.make_bool(allow_small_batch, "allow_small_batch")
if wait_for_incomplete is None:
wait_for_incomplete = False
wait_for_incomplete = _execute.make_bool(wait_for_incomplete, "wait_for_incomplete")
if timeout_ms is None:
timeout_ms = -1
timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
_, _, _op = _op_def_lib._apply_op_helper(
"BarrierTakeMany", handle=handle, num_elements=num_elements,
component_types=component_types,
allow_small_batch=allow_small_batch,
wait_for_incomplete=wait_for_incomplete,
timeout_ms=timeout_ms, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("component_types", _op.get_attr("component_types"),
"allow_small_batch", _op.get_attr("allow_small_batch"),
"wait_for_incomplete", _op.get_attr("wait_for_incomplete"),
"timeout_ms", _op.get_attr("timeout_ms"))
_execute.record_gradient(
"BarrierTakeMany", _inputs_flat, _attrs, _result, name)
_result = _result[:2] + [_result[2:]]
_result = _BarrierTakeManyOutput._make(_result)
return _result
def BarrierTakeMany(handle, num_elements, component_types, allow_small_batch=False, wait_for_incomplete=False, timeout_ms=-1, name=None):
return barrier_take_many(handle=handle, num_elements=num_elements, component_types=component_types, allow_small_batch=allow_small_batch, wait_for_incomplete=wait_for_incomplete, timeout_ms=timeout_ms, name=name)
BarrierTakeMany.__doc__ = barrier_take_many.__doc__
BarrierTakeMany = _doc_controls.do_not_generate_docs(_kwarg_only(BarrierTakeMany))
tf_export("raw_ops.BarrierTakeMany")(BarrierTakeMany)
def barrier_take_many_eager_fallback(handle, num_elements, component_types, allow_small_batch=False, wait_for_incomplete=False, timeout_ms=-1, name=None, ctx=None):
raise RuntimeError("barrier_take_many op does not support eager execution. Arg 'handle' is a ref.")
def conditional_accumulator(dtype, shape, container="", shared_name="", reduction_type="MEAN", name=None):
r"""A conditional accumulator for aggregating gradients.
The accumulator accepts gradients marked with local_step greater or
equal to the most recent global_step known to the accumulator. The
average can be extracted from the accumulator, provided sufficient
gradients have been accumulated. Extracting the average automatically
resets the aggregate to 0, and increments the global_step recorded by
the accumulator.
Args:
dtype: A `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8, tf.complex64, tf.int64, tf.qint8, tf.quint8, tf.qint32, tf.bfloat16, tf.uint16, tf.complex128, tf.half, tf.uint32, tf.uint64`.
The type of the value being accumulated.
shape: A `tf.TensorShape` or list of `ints`.
The shape of the values, can be [], in which case shape is unknown.
container: An optional `string`. Defaults to `""`.
If non-empty, this accumulator is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional `string`. Defaults to `""`.
If non-empty, this accumulator will be shared under the
given name across multiple sessions.
reduction_type: An optional `string` from: `"MEAN", "SUM"`. Defaults to `"MEAN"`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type mutable `string`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("conditional_accumulator op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
dtype = _execute.make_type(dtype, "dtype")
shape = _execute.make_shape(shape, "shape")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
if reduction_type is None:
reduction_type = "MEAN"
reduction_type = _execute.make_str(reduction_type, "reduction_type")
_, _, _op = _op_def_lib._apply_op_helper(
"ConditionalAccumulator", dtype=dtype, shape=shape,
container=container,
shared_name=shared_name,
reduction_type=reduction_type, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("dtype", _op.get_attr("dtype"), "shape", _op.get_attr("shape"),
"container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"), "reduction_type",
_op.get_attr("reduction_type"))
_execute.record_gradient(
"ConditionalAccumulator", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def ConditionalAccumulator(dtype, shape, container="", shared_name="", reduction_type="MEAN", name=None):
return conditional_accumulator(dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type, name=name)
ConditionalAccumulator.__doc__ = conditional_accumulator.__doc__
ConditionalAccumulator = _doc_controls.do_not_generate_docs(_kwarg_only(ConditionalAccumulator))
tf_export("raw_ops.ConditionalAccumulator")(ConditionalAccumulator)
def conditional_accumulator_eager_fallback(dtype, shape, container="", shared_name="", reduction_type="MEAN", name=None, ctx=None):
raise RuntimeError("conditional_accumulator op does not support eager execution. Arg 'handle' is a ref.")
def delete_session_tensor(handle, name=None):
r"""Delete the tensor specified by its handle in the session.
Args:
handle: A `Tensor` of type `string`.
The handle for a tensor stored in the session state.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"DeleteSessionTensor", name, _ctx._post_execution_callbacks, handle)
return _result
except _core._FallbackException:
try:
return delete_session_tensor_eager_fallback(
handle, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"DeleteSessionTensor", handle=handle, name=name)
return _op
_result = None
return _result
def DeleteSessionTensor(handle, name=None):
return delete_session_tensor(handle=handle, name=name)
DeleteSessionTensor.__doc__ = delete_session_tensor.__doc__
DeleteSessionTensor = _doc_controls.do_not_generate_docs(_kwarg_only(DeleteSessionTensor))
tf_export("raw_ops.DeleteSessionTensor")(DeleteSessionTensor)
def delete_session_tensor_eager_fallback(handle, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function delete_session_tensor
"""
_ctx = ctx if ctx else _context.context()
handle = _ops.convert_to_tensor(handle, _dtypes.string)
_inputs_flat = [handle]
_attrs = None
_result = _execute.execute(b"DeleteSessionTensor", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
@_dispatch.add_dispatch_list
@tf_export('dynamic_partition')
def dynamic_partition(data, partitions, num_partitions, name=None):
r"""Partitions `data` into `num_partitions` tensors using indices from `partitions`.
For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]`
becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i`
are placed in `outputs[i]` in lexicographic order of `js`, and the first
dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`.
In detail,
```python
outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]
outputs[i] = pack([data[js, ...] for js if partitions[js] == i])
```
`data.shape` must start with `partitions.shape`.
For example:
```python
# Scalar partitions.
partitions = 1
num_partitions = 2
data = [10, 20]
outputs[0] = [] # Empty with shape [0, 2]
outputs[1] = [[10, 20]]
# Vector partitions.
partitions = [0, 0, 1, 1, 0]
num_partitions = 2
data = [10, 20, 30, 40, 50]
outputs[0] = [10, 20, 50]
outputs[1] = [30, 40]
```
See `dynamic_stitch` for an example on how to merge partitions back.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/DynamicPartition.png" alt>
</div>
Args:
data: A `Tensor`.
partitions: A `Tensor` of type `int32`.
Any shape. Indices in the range `[0, num_partitions)`.
num_partitions: An `int` that is `>= 1`.
The number of partitions to output.
name: A name for the operation (optional).
Returns:
A list of `num_partitions` `Tensor` objects with the same type as `data`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"DynamicPartition", name, _ctx._post_execution_callbacks, data,
partitions, "num_partitions", num_partitions)
return _result
except _core._FallbackException:
try:
return dynamic_partition_eager_fallback(
data, partitions, num_partitions=num_partitions, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
dynamic_partition, data=data, partitions=partitions,
num_partitions=num_partitions, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
num_partitions = _execute.make_int(num_partitions, "num_partitions")
try:
_, _, _op = _op_def_lib._apply_op_helper(
"DynamicPartition", data=data, partitions=partitions,
num_partitions=num_partitions, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
dynamic_partition, data=data, partitions=partitions,
num_partitions=num_partitions, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("num_partitions", _op.get_attr("num_partitions"), "T",
_op.get_attr("T"))
_execute.record_gradient(
"DynamicPartition", _inputs_flat, _attrs, _result, name)
return _result
def DynamicPartition(data, partitions, num_partitions, name=None):
return dynamic_partition(data=data, partitions=partitions, num_partitions=num_partitions, name=name)
DynamicPartition.__doc__ = dynamic_partition.__doc__
DynamicPartition = _doc_controls.do_not_generate_docs(_kwarg_only(DynamicPartition))
tf_export("raw_ops.DynamicPartition")(DynamicPartition)
def dynamic_partition_eager_fallback(data, partitions, num_partitions, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function dynamic_partition
"""
_ctx = ctx if ctx else _context.context()
num_partitions = _execute.make_int(num_partitions, "num_partitions")
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
partitions = _ops.convert_to_tensor(partitions, _dtypes.int32)
_inputs_flat = [data, partitions]
_attrs = ("num_partitions", num_partitions, "T", _attr_T)
_result = _execute.execute(b"DynamicPartition", num_partitions,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"DynamicPartition", _inputs_flat, _attrs, _result, name)
return _result
@_dispatch.add_dispatch_list
@tf_export('dynamic_stitch')
def dynamic_stitch(indices, data, name=None):
r"""Interleave the values from the `data` tensors into a single tensor.
Builds a merged tensor such that
```python
merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
```
For example, if each `indices[m]` is scalar or vector, we have
```python
# Scalar indices:
merged[indices[m], ...] = data[m][...]
# Vector indices:
merged[indices[m][i], ...] = data[m][i, ...]
```
Each `data[i].shape` must start with the corresponding `indices[i].shape`,
and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we
must have `data[i].shape = indices[i].shape + constant`. In terms of this
`constant`, the output shape is
merged.shape = [max(indices)] + constant
Values are merged in order, so if an index appears in both `indices[m][i]` and
`indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the
merged result. If you do not need this guarantee, ParallelDynamicStitch might
perform better on some devices.
For example:
```python
indices[0] = 6
indices[1] = [4, 1]
indices[2] = [[5, 2], [0, 3]]
data[0] = [61, 62]
data[1] = [[41, 42], [11, 12]]
data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
[51, 52], [61, 62]]
```
This method can be used to merge partitions created by `dynamic_partition`
as illustrated on the following example:
```python
# Apply function (increments x_i) on elements for which a certain condition
# apply (x_i != -1 in this example).
x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
condition_mask=tf.not_equal(x,tf.constant(-1.))
partitioned_data = tf.dynamic_partition(
x, tf.cast(condition_mask, tf.int32) , 2)
partitioned_data[1] = partitioned_data[1] + 1.0
condition_indices = tf.dynamic_partition(
tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
x = tf.dynamic_stitch(condition_indices, partitioned_data)
# Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
# unchanged.
```
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
</div>
Args:
indices: A list of at least 1 `Tensor` objects with type `int32`.
data: A list with the same length as `indices` of `Tensor` objects with the same type.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"DynamicStitch", name, _ctx._post_execution_callbacks, indices, data)
return _result
except _core._FallbackException:
try:
return dynamic_stitch_eager_fallback(
indices, data, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
dynamic_stitch, indices=indices, data=data, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(indices, (list, tuple)):
raise TypeError(
"Expected list for 'indices' argument to "
"'dynamic_stitch' Op, not %r." % indices)
_attr_N = len(indices)
if not isinstance(data, (list, tuple)):
raise TypeError(
"Expected list for 'data' argument to "
"'dynamic_stitch' Op, not %r." % data)
if len(data) != _attr_N:
raise ValueError(
"List argument 'data' to 'dynamic_stitch' Op with length %d "
"must match length %d of argument 'indices'." %
(len(data), _attr_N))
try:
_, _, _op = _op_def_lib._apply_op_helper(
"DynamicStitch", indices=indices, data=data, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
dynamic_stitch, indices=indices, data=data, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T"))
_execute.record_gradient(
"DynamicStitch", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def DynamicStitch(indices, data, name=None):
return dynamic_stitch(indices=indices, data=data, name=name)
DynamicStitch.__doc__ = dynamic_stitch.__doc__
DynamicStitch = _doc_controls.do_not_generate_docs(_kwarg_only(DynamicStitch))
tf_export("raw_ops.DynamicStitch")(DynamicStitch)
def dynamic_stitch_eager_fallback(indices, data, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function dynamic_stitch
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(indices, (list, tuple)):
raise TypeError(
"Expected list for 'indices' argument to "
"'dynamic_stitch' Op, not %r." % indices)
_attr_N = len(indices)
if not isinstance(data, (list, tuple)):
raise TypeError(
"Expected list for 'data' argument to "
"'dynamic_stitch' Op, not %r." % data)
if len(data) != _attr_N:
raise ValueError(
"List argument 'data' to 'dynamic_stitch' Op with length %d "
"must match length %d of argument 'indices'." %
(len(data), _attr_N))
_attr_T, data = _execute.args_to_matching_eager(list(data), _ctx)
indices = _ops.convert_n_to_tensor(indices, _dtypes.int32)
_inputs_flat = list(indices) + list(data)
_attrs = ("N", _attr_N, "T", _attr_T)
_result = _execute.execute(b"DynamicStitch", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"DynamicStitch", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def fifo_queue(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None):
r"""A queue that produces elements in first-in first-out order.
Args:
component_types: A list of `tf.DTypes` that has length `>= 1`.
The type of each component in a value.
shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.
The shape of each component in a value. The length of this attr must
be either 0 or the same as the length of component_types. If the length of
this attr is 0, the shapes of queue elements are not constrained, and
only one element may be dequeued at a time.
capacity: An optional `int`. Defaults to `-1`.
The upper bound on the number of elements in this queue.
Negative numbers mean no limit.
container: An optional `string`. Defaults to `""`.
If non-empty, this queue is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional `string`. Defaults to `""`.
If non-empty, this queue will be shared under the given name
across multiple sessions.
name: A name for the operation (optional).
Returns:
A `Tensor` of type mutable `string`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("fifo_queue op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
if not isinstance(component_types, (list, tuple)):
raise TypeError(
"Expected list for 'component_types' argument to "
"'fifo_queue' Op, not %r." % component_types)
component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
if shapes is None:
shapes = []
if not isinstance(shapes, (list, tuple)):
raise TypeError(
"Expected list for 'shapes' argument to "
"'fifo_queue' Op, not %r." % shapes)
shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
if capacity is None:
capacity = -1
capacity = _execute.make_int(capacity, "capacity")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"FIFOQueue", component_types=component_types, shapes=shapes,
capacity=capacity, container=container,
shared_name=shared_name, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("component_types", _op.get_attr("component_types"), "shapes",
_op.get_attr("shapes"), "capacity", _op.get_attr("capacity"),
"container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_execute.record_gradient(
"FIFOQueue", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def FIFOQueue(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None):
return fifo_queue(component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name, name=name)
FIFOQueue.__doc__ = fifo_queue.__doc__
FIFOQueue = _doc_controls.do_not_generate_docs(_kwarg_only(FIFOQueue))
tf_export("raw_ops.FIFOQueue")(FIFOQueue)
def fifo_queue_eager_fallback(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None, ctx=None):
raise RuntimeError("fifo_queue op does not support eager execution. Arg 'handle' is a ref.")
def fifo_queue_v2(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None):
r"""A queue that produces elements in first-in first-out order.
Args:
component_types: A list of `tf.DTypes` that has length `>= 1`.
The type of each component in a value.
shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.
The shape of each component in a value. The length of this attr must
be either 0 or the same as the length of component_types. If the length of
this attr is 0, the shapes of queue elements are not constrained, and
only one element may be dequeued at a time.
capacity: An optional `int`. Defaults to `-1`.
The upper bound on the number of elements in this queue.
Negative numbers mean no limit.
container: An optional `string`. Defaults to `""`.
If non-empty, this queue is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional `string`. Defaults to `""`.
If non-empty, this queue will be shared under the given name
across multiple sessions.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `resource`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"FIFOQueueV2", name, _ctx._post_execution_callbacks,
"component_types", component_types, "shapes", shapes, "capacity",
capacity, "container", container, "shared_name", shared_name)
return _result
except _core._FallbackException:
try:
return fifo_queue_v2_eager_fallback(
component_types=component_types, shapes=shapes, capacity=capacity,
container=container, shared_name=shared_name, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(component_types, (list, tuple)):
raise TypeError(
"Expected list for 'component_types' argument to "
"'fifo_queue_v2' Op, not %r." % component_types)
component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
if shapes is None:
shapes = []
if not isinstance(shapes, (list, tuple)):
raise TypeError(
"Expected list for 'shapes' argument to "
"'fifo_queue_v2' Op, not %r." % shapes)
shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
if capacity is None:
capacity = -1
capacity = _execute.make_int(capacity, "capacity")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"FIFOQueueV2", component_types=component_types, shapes=shapes,
capacity=capacity, container=container,
shared_name=shared_name, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("component_types", _op.get_attr("component_types"), "shapes",
_op.get_attr("shapes"), "capacity", _op.get_attr("capacity"),
"container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_execute.record_gradient(
"FIFOQueueV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def FIFOQueueV2(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None):
return fifo_queue_v2(component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name, name=name)
FIFOQueueV2.__doc__ = fifo_queue_v2.__doc__
FIFOQueueV2 = _doc_controls.do_not_generate_docs(_kwarg_only(FIFOQueueV2))
tf_export("raw_ops.FIFOQueueV2")(FIFOQueueV2)
def fifo_queue_v2_eager_fallback(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function fifo_queue_v2
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(component_types, (list, tuple)):
raise TypeError(
"Expected list for 'component_types' argument to "
"'fifo_queue_v2' Op, not %r." % component_types)
component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
if shapes is None:
shapes = []
if not isinstance(shapes, (list, tuple)):
raise TypeError(
"Expected list for 'shapes' argument to "
"'fifo_queue_v2' Op, not %r." % shapes)
shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
if capacity is None:
capacity = -1
capacity = _execute.make_int(capacity, "capacity")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_inputs_flat = []
_attrs = ("component_types", component_types, "shapes", shapes, "capacity",
capacity, "container", container, "shared_name", shared_name)
_result = _execute.execute(b"FIFOQueueV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"FIFOQueueV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def fake_queue(resource, name=None):
r"""Deprecated. Do not use.
Args:
resource: A `Tensor` of type `resource`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type mutable `string`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("fake_queue op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"FakeQueue", resource=resource, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"FakeQueue", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def FakeQueue(resource, name=None):
return fake_queue(resource=resource, name=name)
FakeQueue.__doc__ = fake_queue.__doc__
FakeQueue = _doc_controls.do_not_generate_docs(_kwarg_only(FakeQueue))
tf_export("raw_ops.FakeQueue")(FakeQueue)
def fake_queue_eager_fallback(resource, name=None, ctx=None):
raise RuntimeError("fake_queue op does not support eager execution. Arg 'handle' is a ref.")
def get_session_handle(value, name=None):
r"""Store the input tensor in the state of the current session.
Args:
value: A `Tensor`. The tensor to be stored.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"GetSessionHandle", name, _ctx._post_execution_callbacks, value)
return _result
except _core._FallbackException:
try:
return get_session_handle_eager_fallback(
value, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"GetSessionHandle", value=value, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"GetSessionHandle", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def GetSessionHandle(value, name=None):
return get_session_handle(value=value, name=name)
GetSessionHandle.__doc__ = get_session_handle.__doc__
GetSessionHandle = _doc_controls.do_not_generate_docs(_kwarg_only(GetSessionHandle))
tf_export("raw_ops.GetSessionHandle")(GetSessionHandle)
def get_session_handle_eager_fallback(value, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function get_session_handle
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
_inputs_flat = [value]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"GetSessionHandle", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"GetSessionHandle", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def get_session_handle_v2(value, name=None):
r"""Store the input tensor in the state of the current session.
Args:
value: A `Tensor`. The tensor to be stored.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `resource`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"GetSessionHandleV2", name, _ctx._post_execution_callbacks, value)
return _result
except _core._FallbackException:
try:
return get_session_handle_v2_eager_fallback(
value, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"GetSessionHandleV2", value=value, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"GetSessionHandleV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def GetSessionHandleV2(value, name=None):
return get_session_handle_v2(value=value, name=name)
GetSessionHandleV2.__doc__ = get_session_handle_v2.__doc__
GetSessionHandleV2 = _doc_controls.do_not_generate_docs(_kwarg_only(GetSessionHandleV2))
tf_export("raw_ops.GetSessionHandleV2")(GetSessionHandleV2)
def get_session_handle_v2_eager_fallback(value, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function get_session_handle_v2
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
_inputs_flat = [value]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"GetSessionHandleV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"GetSessionHandleV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def get_session_tensor(handle, dtype, name=None):
r"""Get the value of the tensor specified by its handle.
Args:
handle: A `Tensor` of type `string`.
The handle for a tensor stored in the session state.
dtype: A `tf.DType`. The type of the output value.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"GetSessionTensor", name, _ctx._post_execution_callbacks, handle,
"dtype", dtype)
return _result
except _core._FallbackException:
try:
return get_session_tensor_eager_fallback(
handle, dtype=dtype, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
dtype = _execute.make_type(dtype, "dtype")
_, _, _op = _op_def_lib._apply_op_helper(
"GetSessionTensor", handle=handle, dtype=dtype, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("dtype", _op.get_attr("dtype"))
_execute.record_gradient(
"GetSessionTensor", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def GetSessionTensor(handle, dtype, name=None):
return get_session_tensor(handle=handle, dtype=dtype, name=name)
GetSessionTensor.__doc__ = get_session_tensor.__doc__
GetSessionTensor = _doc_controls.do_not_generate_docs(_kwarg_only(GetSessionTensor))
tf_export("raw_ops.GetSessionTensor")(GetSessionTensor)
def get_session_tensor_eager_fallback(handle, dtype, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function get_session_tensor
"""
_ctx = ctx if ctx else _context.context()
dtype = _execute.make_type(dtype, "dtype")
handle = _ops.convert_to_tensor(handle, _dtypes.string)
_inputs_flat = [handle]
_attrs = ("dtype", dtype)
_result = _execute.execute(b"GetSessionTensor", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"GetSessionTensor", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def map_clear(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
r"""Op removes all elements in the underlying container.
Args:
dtypes: A list of `tf.DTypes`.
capacity: An optional `int` that is `>= 0`. Defaults to `0`.
memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
container: An optional `string`. Defaults to `""`.
shared_name: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "MapClear",
name, _ctx._post_execution_callbacks, "capacity", capacity,
"memory_limit", memory_limit, "dtypes", dtypes, "container",
container, "shared_name", shared_name)
return _result
except _core._FallbackException:
try:
return map_clear_eager_fallback(
capacity=capacity, memory_limit=memory_limit, dtypes=dtypes,
container=container, shared_name=shared_name, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'map_clear' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"MapClear", dtypes=dtypes, capacity=capacity,
memory_limit=memory_limit, container=container,
shared_name=shared_name, name=name)
return _op
_result = None
return _result
def MapClear(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
return map_clear(dtypes=dtypes, capacity=capacity, memory_limit=memory_limit, container=container, shared_name=shared_name, name=name)
MapClear.__doc__ = map_clear.__doc__
MapClear = _doc_controls.do_not_generate_docs(_kwarg_only(MapClear))
tf_export("raw_ops.MapClear")(MapClear)
def map_clear_eager_fallback(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function map_clear
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'map_clear' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_inputs_flat = []
_attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
dtypes, "container", container, "shared_name", shared_name)
_result = _execute.execute(b"MapClear", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
def map_incomplete_size(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
r"""Op returns the number of incomplete elements in the underlying container.
Args:
dtypes: A list of `tf.DTypes`.
capacity: An optional `int` that is `>= 0`. Defaults to `0`.
memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
container: An optional `string`. Defaults to `""`.
shared_name: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"MapIncompleteSize", name, _ctx._post_execution_callbacks, "capacity",
capacity, "memory_limit", memory_limit, "dtypes", dtypes, "container",
container, "shared_name", shared_name)
return _result
except _core._FallbackException:
try:
return map_incomplete_size_eager_fallback(
capacity=capacity, memory_limit=memory_limit, dtypes=dtypes,
container=container, shared_name=shared_name, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'map_incomplete_size' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"MapIncompleteSize", dtypes=dtypes, capacity=capacity,
memory_limit=memory_limit, container=container,
shared_name=shared_name, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
_op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
"container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_execute.record_gradient(
"MapIncompleteSize", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def MapIncompleteSize(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
return map_incomplete_size(dtypes=dtypes, capacity=capacity, memory_limit=memory_limit, container=container, shared_name=shared_name, name=name)
MapIncompleteSize.__doc__ = map_incomplete_size.__doc__
MapIncompleteSize = _doc_controls.do_not_generate_docs(_kwarg_only(MapIncompleteSize))
tf_export("raw_ops.MapIncompleteSize")(MapIncompleteSize)
def map_incomplete_size_eager_fallback(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function map_incomplete_size
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'map_incomplete_size' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_inputs_flat = []
_attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
dtypes, "container", container, "shared_name", shared_name)
_result = _execute.execute(b"MapIncompleteSize", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"MapIncompleteSize", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def map_peek(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
r"""Op peeks at the values at the specified key. If the
underlying container does not contain this key
this op will block until it does.
Args:
key: A `Tensor` of type `int64`.
indices: A `Tensor` of type `int32`.
dtypes: A list of `tf.DTypes` that has length `>= 1`.
capacity: An optional `int` that is `>= 0`. Defaults to `0`.
memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
container: An optional `string`. Defaults to `""`.
shared_name: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects of type `dtypes`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "MapPeek",
name, _ctx._post_execution_callbacks, key, indices, "capacity",
capacity, "memory_limit", memory_limit, "dtypes", dtypes, "container",
container, "shared_name", shared_name)
return _result
except _core._FallbackException:
try:
return map_peek_eager_fallback(
key, indices, capacity=capacity, memory_limit=memory_limit,
dtypes=dtypes, container=container, shared_name=shared_name,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'map_peek' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"MapPeek", key=key, indices=indices, dtypes=dtypes, capacity=capacity,
memory_limit=memory_limit, container=container,
shared_name=shared_name, name=name)
_result = _op.outputs[:]
if not _result:
return _op
_inputs_flat = _op.inputs
_attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
_op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
"container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_execute.record_gradient(
"MapPeek", _inputs_flat, _attrs, _result, name)
return _result
def MapPeek(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
return map_peek(key=key, indices=indices, dtypes=dtypes, capacity=capacity, memory_limit=memory_limit, container=container, shared_name=shared_name, name=name)
MapPeek.__doc__ = map_peek.__doc__
MapPeek = _doc_controls.do_not_generate_docs(_kwarg_only(MapPeek))
tf_export("raw_ops.MapPeek")(MapPeek)
def map_peek_eager_fallback(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function map_peek
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'map_peek' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
key = _ops.convert_to_tensor(key, _dtypes.int64)
indices = _ops.convert_to_tensor(indices, _dtypes.int32)
_inputs_flat = [key, indices]
_attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
dtypes, "container", container, "shared_name", shared_name)
_result = _execute.execute(b"MapPeek", len(dtypes), inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"MapPeek", _inputs_flat, _attrs, _result, name)
return _result
def map_size(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
r"""Op returns the number of elements in the underlying container.
Args:
dtypes: A list of `tf.DTypes`.
capacity: An optional `int` that is `>= 0`. Defaults to `0`.
memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
container: An optional `string`. Defaults to `""`.
shared_name: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "MapSize",
name, _ctx._post_execution_callbacks, "capacity", capacity,
"memory_limit", memory_limit, "dtypes", dtypes, "container",
container, "shared_name", shared_name)
return _result
except _core._FallbackException:
try:
return map_size_eager_fallback(
capacity=capacity, memory_limit=memory_limit, dtypes=dtypes,
container=container, shared_name=shared_name, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'map_size' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"MapSize", dtypes=dtypes, capacity=capacity,
memory_limit=memory_limit, container=container,
shared_name=shared_name, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
_op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
"container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_execute.record_gradient(
"MapSize", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def MapSize(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
return map_size(dtypes=dtypes, capacity=capacity, memory_limit=memory_limit, container=container, shared_name=shared_name, name=name)
MapSize.__doc__ = map_size.__doc__
MapSize = _doc_controls.do_not_generate_docs(_kwarg_only(MapSize))
tf_export("raw_ops.MapSize")(MapSize)
def map_size_eager_fallback(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function map_size
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'map_size' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_inputs_flat = []
_attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
dtypes, "container", container, "shared_name", shared_name)
_result = _execute.execute(b"MapSize", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"MapSize", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def map_stage(key, indices, values, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
r"""Stage (key, values) in the underlying container which behaves like a hashtable.
Args:
key: A `Tensor` of type `int64`. int64
indices: A `Tensor` of type `int32`.
values: A list of `Tensor` objects. a list of tensors
dtypes A list of data types that inserted values should adhere to.
dtypes: A list of `tf.DTypes`.
capacity: An optional `int` that is `>= 0`. Defaults to `0`.
Maximum number of elements in the Staging Area. If > 0, inserts
on the container will block when the capacity is reached.
memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
container: An optional `string`. Defaults to `""`.
If non-empty, this queue is placed in the given container. Otherwise,
a default container is used.
shared_name: An optional `string`. Defaults to `""`.
It is necessary to match this name to the matching Unstage Op.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "MapStage",
name, _ctx._post_execution_callbacks, key, indices, values,
"capacity", capacity, "memory_limit", memory_limit, "dtypes", dtypes,
"container", container, "shared_name", shared_name)
return _result
except _core._FallbackException:
try:
return map_stage_eager_fallback(
key, indices, values, capacity=capacity,
memory_limit=memory_limit, dtypes=dtypes, container=container,
shared_name=shared_name, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'map_stage' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"MapStage", key=key, indices=indices, values=values, dtypes=dtypes,
capacity=capacity, memory_limit=memory_limit,
container=container, shared_name=shared_name, name=name)
return _op
_result = None
return _result
def MapStage(key, indices, values, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
return map_stage(key=key, indices=indices, values=values, dtypes=dtypes, capacity=capacity, memory_limit=memory_limit, container=container, shared_name=shared_name, name=name)
MapStage.__doc__ = map_stage.__doc__
MapStage = _doc_controls.do_not_generate_docs(_kwarg_only(MapStage))
tf_export("raw_ops.MapStage")(MapStage)
def map_stage_eager_fallback(key, indices, values, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function map_stage
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'map_stage' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_attr_fake_dtypes, values = _execute.convert_to_mixed_eager_tensors(values, _ctx)
key = _ops.convert_to_tensor(key, _dtypes.int64)
indices = _ops.convert_to_tensor(indices, _dtypes.int32)
_inputs_flat = [key, indices] + list(values)
_attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
dtypes, "fake_dtypes", _attr_fake_dtypes, "container", container,
"shared_name", shared_name)
_result = _execute.execute(b"MapStage", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
def map_unstage(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
r"""Op removes and returns the values associated with the key
from the underlying container. If the underlying container
does not contain this key, the op will block until it does.
Args:
key: A `Tensor` of type `int64`.
indices: A `Tensor` of type `int32`.
dtypes: A list of `tf.DTypes` that has length `>= 1`.
capacity: An optional `int` that is `>= 0`. Defaults to `0`.
memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
container: An optional `string`. Defaults to `""`.
shared_name: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects of type `dtypes`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"MapUnstage", name, _ctx._post_execution_callbacks, key, indices,
"capacity", capacity, "memory_limit", memory_limit, "dtypes", dtypes,
"container", container, "shared_name", shared_name)
return _result
except _core._FallbackException:
try:
return map_unstage_eager_fallback(
key, indices, capacity=capacity, memory_limit=memory_limit,
dtypes=dtypes, container=container, shared_name=shared_name,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'map_unstage' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"MapUnstage", key=key, indices=indices, dtypes=dtypes,
capacity=capacity, memory_limit=memory_limit,
container=container, shared_name=shared_name, name=name)
_result = _op.outputs[:]
if not _result:
return _op
_inputs_flat = _op.inputs
_attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
_op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
"container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_execute.record_gradient(
"MapUnstage", _inputs_flat, _attrs, _result, name)
return _result
def MapUnstage(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
return map_unstage(key=key, indices=indices, dtypes=dtypes, capacity=capacity, memory_limit=memory_limit, container=container, shared_name=shared_name, name=name)
MapUnstage.__doc__ = map_unstage.__doc__
MapUnstage = _doc_controls.do_not_generate_docs(_kwarg_only(MapUnstage))
tf_export("raw_ops.MapUnstage")(MapUnstage)
def map_unstage_eager_fallback(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function map_unstage
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'map_unstage' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
key = _ops.convert_to_tensor(key, _dtypes.int64)
indices = _ops.convert_to_tensor(indices, _dtypes.int32)
_inputs_flat = [key, indices]
_attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
dtypes, "container", container, "shared_name", shared_name)
_result = _execute.execute(b"MapUnstage", len(dtypes), inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"MapUnstage", _inputs_flat, _attrs, _result, name)
return _result
_map_unstage_no_key_outputs = ["key", "values"]
_MapUnstageNoKeyOutput = _collections.namedtuple(
"MapUnstageNoKey", _map_unstage_no_key_outputs)
def map_unstage_no_key(indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
r"""Op removes and returns a random (key, value)
from the underlying container. If the underlying container
does not contain elements, the op will block until it does.
Args:
indices: A `Tensor` of type `int32`.
dtypes: A list of `tf.DTypes` that has length `>= 1`.
capacity: An optional `int` that is `>= 0`. Defaults to `0`.
memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
container: An optional `string`. Defaults to `""`.
shared_name: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (key, values).
key: A `Tensor` of type `int64`.
values: A list of `Tensor` objects of type `dtypes`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"MapUnstageNoKey", name, _ctx._post_execution_callbacks, indices,
"capacity", capacity, "memory_limit", memory_limit, "dtypes", dtypes,
"container", container, "shared_name", shared_name)
_result = _MapUnstageNoKeyOutput._make(_result)
return _result
except _core._FallbackException:
try:
return map_unstage_no_key_eager_fallback(
indices, capacity=capacity, memory_limit=memory_limit,
dtypes=dtypes, container=container, shared_name=shared_name,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'map_unstage_no_key' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"MapUnstageNoKey", indices=indices, dtypes=dtypes, capacity=capacity,
memory_limit=memory_limit, container=container,
shared_name=shared_name, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
_op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
"container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_execute.record_gradient(
"MapUnstageNoKey", _inputs_flat, _attrs, _result, name)
_result = _result[:1] + [_result[1:]]
_result = _MapUnstageNoKeyOutput._make(_result)
return _result
def MapUnstageNoKey(indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
return map_unstage_no_key(indices=indices, dtypes=dtypes, capacity=capacity, memory_limit=memory_limit, container=container, shared_name=shared_name, name=name)
MapUnstageNoKey.__doc__ = map_unstage_no_key.__doc__
MapUnstageNoKey = _doc_controls.do_not_generate_docs(_kwarg_only(MapUnstageNoKey))
tf_export("raw_ops.MapUnstageNoKey")(MapUnstageNoKey)
def map_unstage_no_key_eager_fallback(indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function map_unstage_no_key
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'map_unstage_no_key' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
indices = _ops.convert_to_tensor(indices, _dtypes.int32)
_inputs_flat = [indices]
_attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
dtypes, "container", container, "shared_name", shared_name)
_result = _execute.execute(b"MapUnstageNoKey", len(dtypes) + 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"MapUnstageNoKey", _inputs_flat, _attrs, _result, name)
_result = _result[:1] + [_result[1:]]
_result = _MapUnstageNoKeyOutput._make(_result)
return _result
def ordered_map_clear(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
r"""Op removes all elements in the underlying container.
Args:
dtypes: A list of `tf.DTypes`.
capacity: An optional `int` that is `>= 0`. Defaults to `0`.
memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
container: An optional `string`. Defaults to `""`.
shared_name: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"OrderedMapClear", name, _ctx._post_execution_callbacks, "capacity",
capacity, "memory_limit", memory_limit, "dtypes", dtypes, "container",
container, "shared_name", shared_name)
return _result
except _core._FallbackException:
try:
return ordered_map_clear_eager_fallback(
capacity=capacity, memory_limit=memory_limit, dtypes=dtypes,
container=container, shared_name=shared_name, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'ordered_map_clear' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"OrderedMapClear", dtypes=dtypes, capacity=capacity,
memory_limit=memory_limit, container=container,
shared_name=shared_name, name=name)
return _op
_result = None
return _result
def OrderedMapClear(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
return ordered_map_clear(dtypes=dtypes, capacity=capacity, memory_limit=memory_limit, container=container, shared_name=shared_name, name=name)
OrderedMapClear.__doc__ = ordered_map_clear.__doc__
OrderedMapClear = _doc_controls.do_not_generate_docs(_kwarg_only(OrderedMapClear))
tf_export("raw_ops.OrderedMapClear")(OrderedMapClear)
def ordered_map_clear_eager_fallback(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function ordered_map_clear
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'ordered_map_clear' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_inputs_flat = []
_attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
dtypes, "container", container, "shared_name", shared_name)
_result = _execute.execute(b"OrderedMapClear", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
def ordered_map_incomplete_size(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
r"""Op returns the number of incomplete elements in the underlying container.
Args:
dtypes: A list of `tf.DTypes`.
capacity: An optional `int` that is `>= 0`. Defaults to `0`.
memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
container: An optional `string`. Defaults to `""`.
shared_name: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"OrderedMapIncompleteSize", name, _ctx._post_execution_callbacks,
"capacity", capacity, "memory_limit", memory_limit, "dtypes", dtypes,
"container", container, "shared_name", shared_name)
return _result
except _core._FallbackException:
try:
return ordered_map_incomplete_size_eager_fallback(
capacity=capacity, memory_limit=memory_limit, dtypes=dtypes,
container=container, shared_name=shared_name, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'ordered_map_incomplete_size' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"OrderedMapIncompleteSize", dtypes=dtypes, capacity=capacity,
memory_limit=memory_limit,
container=container,
shared_name=shared_name, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
_op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
"container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_execute.record_gradient(
"OrderedMapIncompleteSize", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def OrderedMapIncompleteSize(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
return ordered_map_incomplete_size(dtypes=dtypes, capacity=capacity, memory_limit=memory_limit, container=container, shared_name=shared_name, name=name)
OrderedMapIncompleteSize.__doc__ = ordered_map_incomplete_size.__doc__
OrderedMapIncompleteSize = _doc_controls.do_not_generate_docs(_kwarg_only(OrderedMapIncompleteSize))
tf_export("raw_ops.OrderedMapIncompleteSize")(OrderedMapIncompleteSize)
def ordered_map_incomplete_size_eager_fallback(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function ordered_map_incomplete_size
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'ordered_map_incomplete_size' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_inputs_flat = []
_attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
dtypes, "container", container, "shared_name", shared_name)
_result = _execute.execute(b"OrderedMapIncompleteSize", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"OrderedMapIncompleteSize", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def ordered_map_peek(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
r"""Op peeks at the values at the specified key. If the
underlying container does not contain this key
this op will block until it does. This Op is optimized for
performance.
Args:
key: A `Tensor` of type `int64`.
indices: A `Tensor` of type `int32`.
dtypes: A list of `tf.DTypes` that has length `>= 1`.
capacity: An optional `int` that is `>= 0`. Defaults to `0`.
memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
container: An optional `string`. Defaults to `""`.
shared_name: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects of type `dtypes`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"OrderedMapPeek", name, _ctx._post_execution_callbacks, key, indices,
"capacity", capacity, "memory_limit", memory_limit, "dtypes", dtypes,
"container", container, "shared_name", shared_name)
return _result
except _core._FallbackException:
try:
return ordered_map_peek_eager_fallback(
key, indices, capacity=capacity, memory_limit=memory_limit,
dtypes=dtypes, container=container, shared_name=shared_name,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'ordered_map_peek' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"OrderedMapPeek", key=key, indices=indices, dtypes=dtypes,
capacity=capacity, memory_limit=memory_limit,
container=container, shared_name=shared_name,
name=name)
_result = _op.outputs[:]
if not _result:
return _op
_inputs_flat = _op.inputs
_attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
_op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
"container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_execute.record_gradient(
"OrderedMapPeek", _inputs_flat, _attrs, _result, name)
return _result
def OrderedMapPeek(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
return ordered_map_peek(key=key, indices=indices, dtypes=dtypes, capacity=capacity, memory_limit=memory_limit, container=container, shared_name=shared_name, name=name)
OrderedMapPeek.__doc__ = ordered_map_peek.__doc__
OrderedMapPeek = _doc_controls.do_not_generate_docs(_kwarg_only(OrderedMapPeek))
tf_export("raw_ops.OrderedMapPeek")(OrderedMapPeek)
def ordered_map_peek_eager_fallback(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function ordered_map_peek
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'ordered_map_peek' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
key = _ops.convert_to_tensor(key, _dtypes.int64)
indices = _ops.convert_to_tensor(indices, _dtypes.int32)
_inputs_flat = [key, indices]
_attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
dtypes, "container", container, "shared_name", shared_name)
_result = _execute.execute(b"OrderedMapPeek", len(dtypes),
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"OrderedMapPeek", _inputs_flat, _attrs, _result, name)
return _result
def ordered_map_size(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
r"""Op returns the number of elements in the underlying container.
Args:
dtypes: A list of `tf.DTypes`.
capacity: An optional `int` that is `>= 0`. Defaults to `0`.
memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
container: An optional `string`. Defaults to `""`.
shared_name: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"OrderedMapSize", name, _ctx._post_execution_callbacks, "capacity",
capacity, "memory_limit", memory_limit, "dtypes", dtypes, "container",
container, "shared_name", shared_name)
return _result
except _core._FallbackException:
try:
return ordered_map_size_eager_fallback(
capacity=capacity, memory_limit=memory_limit, dtypes=dtypes,
container=container, shared_name=shared_name, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'ordered_map_size' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"OrderedMapSize", dtypes=dtypes, capacity=capacity,
memory_limit=memory_limit, container=container,
shared_name=shared_name, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
_op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
"container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_execute.record_gradient(
"OrderedMapSize", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def OrderedMapSize(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
return ordered_map_size(dtypes=dtypes, capacity=capacity, memory_limit=memory_limit, container=container, shared_name=shared_name, name=name)
OrderedMapSize.__doc__ = ordered_map_size.__doc__
OrderedMapSize = _doc_controls.do_not_generate_docs(_kwarg_only(OrderedMapSize))
tf_export("raw_ops.OrderedMapSize")(OrderedMapSize)
def ordered_map_size_eager_fallback(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function ordered_map_size
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'ordered_map_size' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_inputs_flat = []
_attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
dtypes, "container", container, "shared_name", shared_name)
_result = _execute.execute(b"OrderedMapSize", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"OrderedMapSize", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def ordered_map_stage(key, indices, values, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
r"""Stage (key, values) in the underlying container which behaves like a ordered
associative container. Elements are ordered by key.
Args:
key: A `Tensor` of type `int64`. int64
indices: A `Tensor` of type `int32`.
values: A list of `Tensor` objects. a list of tensors
dtypes A list of data types that inserted values should adhere to.
dtypes: A list of `tf.DTypes`.
capacity: An optional `int` that is `>= 0`. Defaults to `0`.
Maximum number of elements in the Staging Area. If > 0, inserts
on the container will block when the capacity is reached.
memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
container: An optional `string`. Defaults to `""`.
If non-empty, this queue is placed in the given container. Otherwise,
a default container is used.
shared_name: An optional `string`. Defaults to `""`.
It is necessary to match this name to the matching Unstage Op.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"OrderedMapStage", name, _ctx._post_execution_callbacks, key, indices,
values, "capacity", capacity, "memory_limit", memory_limit, "dtypes",
dtypes, "container", container, "shared_name", shared_name)
return _result
except _core._FallbackException:
try:
return ordered_map_stage_eager_fallback(
key, indices, values, capacity=capacity,
memory_limit=memory_limit, dtypes=dtypes, container=container,
shared_name=shared_name, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'ordered_map_stage' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"OrderedMapStage", key=key, indices=indices, values=values,
dtypes=dtypes, capacity=capacity,
memory_limit=memory_limit, container=container,
shared_name=shared_name, name=name)
return _op
_result = None
return _result
def OrderedMapStage(key, indices, values, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
return ordered_map_stage(key=key, indices=indices, values=values, dtypes=dtypes, capacity=capacity, memory_limit=memory_limit, container=container, shared_name=shared_name, name=name)
OrderedMapStage.__doc__ = ordered_map_stage.__doc__
OrderedMapStage = _doc_controls.do_not_generate_docs(_kwarg_only(OrderedMapStage))
tf_export("raw_ops.OrderedMapStage")(OrderedMapStage)
def ordered_map_stage_eager_fallback(key, indices, values, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function ordered_map_stage
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'ordered_map_stage' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_attr_fake_dtypes, values = _execute.convert_to_mixed_eager_tensors(values, _ctx)
key = _ops.convert_to_tensor(key, _dtypes.int64)
indices = _ops.convert_to_tensor(indices, _dtypes.int32)
_inputs_flat = [key, indices] + list(values)
_attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
dtypes, "fake_dtypes", _attr_fake_dtypes, "container", container,
"shared_name", shared_name)
_result = _execute.execute(b"OrderedMapStage", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
def ordered_map_unstage(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
r"""Op removes and returns the values associated with the key
from the underlying container. If the underlying container
does not contain this key, the op will block until it does.
Args:
key: A `Tensor` of type `int64`.
indices: A `Tensor` of type `int32`.
dtypes: A list of `tf.DTypes` that has length `>= 1`.
capacity: An optional `int` that is `>= 0`. Defaults to `0`.
memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
container: An optional `string`. Defaults to `""`.
shared_name: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects of type `dtypes`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"OrderedMapUnstage", name, _ctx._post_execution_callbacks, key,
indices, "capacity", capacity, "memory_limit", memory_limit, "dtypes",
dtypes, "container", container, "shared_name", shared_name)
return _result
except _core._FallbackException:
try:
return ordered_map_unstage_eager_fallback(
key, indices, capacity=capacity, memory_limit=memory_limit,
dtypes=dtypes, container=container, shared_name=shared_name,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'ordered_map_unstage' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"OrderedMapUnstage", key=key, indices=indices, dtypes=dtypes,
capacity=capacity, memory_limit=memory_limit,
container=container, shared_name=shared_name,
name=name)
_result = _op.outputs[:]
if not _result:
return _op
_inputs_flat = _op.inputs
_attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
_op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
"container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_execute.record_gradient(
"OrderedMapUnstage", _inputs_flat, _attrs, _result, name)
return _result
def OrderedMapUnstage(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
return ordered_map_unstage(key=key, indices=indices, dtypes=dtypes, capacity=capacity, memory_limit=memory_limit, container=container, shared_name=shared_name, name=name)
OrderedMapUnstage.__doc__ = ordered_map_unstage.__doc__
OrderedMapUnstage = _doc_controls.do_not_generate_docs(_kwarg_only(OrderedMapUnstage))
tf_export("raw_ops.OrderedMapUnstage")(OrderedMapUnstage)
def ordered_map_unstage_eager_fallback(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function ordered_map_unstage
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'ordered_map_unstage' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
key = _ops.convert_to_tensor(key, _dtypes.int64)
indices = _ops.convert_to_tensor(indices, _dtypes.int32)
_inputs_flat = [key, indices]
_attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
dtypes, "container", container, "shared_name", shared_name)
_result = _execute.execute(b"OrderedMapUnstage", len(dtypes),
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"OrderedMapUnstage", _inputs_flat, _attrs, _result, name)
return _result
_ordered_map_unstage_no_key_outputs = ["key", "values"]
_OrderedMapUnstageNoKeyOutput = _collections.namedtuple(
"OrderedMapUnstageNoKey", _ordered_map_unstage_no_key_outputs)
def ordered_map_unstage_no_key(indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
r"""Op removes and returns the (key, value) element with the smallest
key from the underlying container. If the underlying container
does not contain elements, the op will block until it does.
Args:
indices: A `Tensor` of type `int32`.
dtypes: A list of `tf.DTypes` that has length `>= 1`.
capacity: An optional `int` that is `>= 0`. Defaults to `0`.
memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
container: An optional `string`. Defaults to `""`.
shared_name: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (key, values).
key: A `Tensor` of type `int64`.
values: A list of `Tensor` objects of type `dtypes`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"OrderedMapUnstageNoKey", name, _ctx._post_execution_callbacks,
indices, "capacity", capacity, "memory_limit", memory_limit, "dtypes",
dtypes, "container", container, "shared_name", shared_name)
_result = _OrderedMapUnstageNoKeyOutput._make(_result)
return _result
except _core._FallbackException:
try:
return ordered_map_unstage_no_key_eager_fallback(
indices, capacity=capacity, memory_limit=memory_limit,
dtypes=dtypes, container=container, shared_name=shared_name,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'ordered_map_unstage_no_key' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"OrderedMapUnstageNoKey", indices=indices, dtypes=dtypes,
capacity=capacity,
memory_limit=memory_limit,
container=container,
shared_name=shared_name, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
_op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
"container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_execute.record_gradient(
"OrderedMapUnstageNoKey", _inputs_flat, _attrs, _result, name)
_result = _result[:1] + [_result[1:]]
_result = _OrderedMapUnstageNoKeyOutput._make(_result)
return _result
def OrderedMapUnstageNoKey(indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
return ordered_map_unstage_no_key(indices=indices, dtypes=dtypes, capacity=capacity, memory_limit=memory_limit, container=container, shared_name=shared_name, name=name)
OrderedMapUnstageNoKey.__doc__ = ordered_map_unstage_no_key.__doc__
OrderedMapUnstageNoKey = _doc_controls.do_not_generate_docs(_kwarg_only(OrderedMapUnstageNoKey))
tf_export("raw_ops.OrderedMapUnstageNoKey")(OrderedMapUnstageNoKey)
def ordered_map_unstage_no_key_eager_fallback(indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function ordered_map_unstage_no_key
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'ordered_map_unstage_no_key' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
indices = _ops.convert_to_tensor(indices, _dtypes.int32)
_inputs_flat = [indices]
_attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
dtypes, "container", container, "shared_name", shared_name)
_result = _execute.execute(b"OrderedMapUnstageNoKey", len(dtypes) + 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"OrderedMapUnstageNoKey", _inputs_flat, _attrs, _result, name)
_result = _result[:1] + [_result[1:]]
_result = _OrderedMapUnstageNoKeyOutput._make(_result)
return _result
def padding_fifo_queue(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None):
r"""A queue that produces elements in first-in first-out order.
Variable-size shapes are allowed by setting the corresponding shape dimensions
to 0 in the shape attr. In this case DequeueMany will pad up to the maximum
size of any given element in the minibatch. See below for details.
Args:
component_types: A list of `tf.DTypes` that has length `>= 1`.
The type of each component in a value.
shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.
The shape of each component in a value. The length of this attr must
be either 0 or the same as the length of component_types.
Shapes of fixed rank but variable size are allowed by setting
any shape dimension to -1. In this case, the inputs' shape may vary along
the given dimension, and DequeueMany will pad the given dimension with
zeros up to the maximum shape of all elements in the given batch.
If the length of this attr is 0, different queue elements may have
different ranks and shapes, but only one element may be dequeued at a time.
capacity: An optional `int`. Defaults to `-1`.
The upper bound on the number of elements in this queue.
Negative numbers mean no limit.
container: An optional `string`. Defaults to `""`.
If non-empty, this queue is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional `string`. Defaults to `""`.
If non-empty, this queue will be shared under the given name
across multiple sessions.
name: A name for the operation (optional).
Returns:
A `Tensor` of type mutable `string`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("padding_fifo_queue op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
if not isinstance(component_types, (list, tuple)):
raise TypeError(
"Expected list for 'component_types' argument to "
"'padding_fifo_queue' Op, not %r." % component_types)
component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
if shapes is None:
shapes = []
if not isinstance(shapes, (list, tuple)):
raise TypeError(
"Expected list for 'shapes' argument to "
"'padding_fifo_queue' Op, not %r." % shapes)
shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
if capacity is None:
capacity = -1
capacity = _execute.make_int(capacity, "capacity")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"PaddingFIFOQueue", component_types=component_types, shapes=shapes,
capacity=capacity, container=container,
shared_name=shared_name, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("component_types", _op.get_attr("component_types"), "shapes",
_op.get_attr("shapes"), "capacity", _op.get_attr("capacity"),
"container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_execute.record_gradient(
"PaddingFIFOQueue", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def PaddingFIFOQueue(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None):
return padding_fifo_queue(component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name, name=name)
PaddingFIFOQueue.__doc__ = padding_fifo_queue.__doc__
PaddingFIFOQueue = _doc_controls.do_not_generate_docs(_kwarg_only(PaddingFIFOQueue))
tf_export("raw_ops.PaddingFIFOQueue")(PaddingFIFOQueue)
def padding_fifo_queue_eager_fallback(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None, ctx=None):
raise RuntimeError("padding_fifo_queue op does not support eager execution. Arg 'handle' is a ref.")
def padding_fifo_queue_v2(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None):
r"""A queue that produces elements in first-in first-out order.
Variable-size shapes are allowed by setting the corresponding shape dimensions
to 0 in the shape attr. In this case DequeueMany will pad up to the maximum
size of any given element in the minibatch. See below for details.
Args:
component_types: A list of `tf.DTypes` that has length `>= 1`.
The type of each component in a value.
shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.
The shape of each component in a value. The length of this attr must
be either 0 or the same as the length of component_types.
Shapes of fixed rank but variable size are allowed by setting
any shape dimension to -1. In this case, the inputs' shape may vary along
the given dimension, and DequeueMany will pad the given dimension with
zeros up to the maximum shape of all elements in the given batch.
If the length of this attr is 0, different queue elements may have
different ranks and shapes, but only one element may be dequeued at a time.
capacity: An optional `int`. Defaults to `-1`.
The upper bound on the number of elements in this queue.
Negative numbers mean no limit.
container: An optional `string`. Defaults to `""`.
If non-empty, this queue is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional `string`. Defaults to `""`.
If non-empty, this queue will be shared under the given name
across multiple sessions.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `resource`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"PaddingFIFOQueueV2", name, _ctx._post_execution_callbacks,
"component_types", component_types, "shapes", shapes, "capacity",
capacity, "container", container, "shared_name", shared_name)
return _result
except _core._FallbackException:
try:
return padding_fifo_queue_v2_eager_fallback(
component_types=component_types, shapes=shapes, capacity=capacity,
container=container, shared_name=shared_name, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(component_types, (list, tuple)):
raise TypeError(
"Expected list for 'component_types' argument to "
"'padding_fifo_queue_v2' Op, not %r." % component_types)
component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
if shapes is None:
shapes = []
if not isinstance(shapes, (list, tuple)):
raise TypeError(
"Expected list for 'shapes' argument to "
"'padding_fifo_queue_v2' Op, not %r." % shapes)
shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
if capacity is None:
capacity = -1
capacity = _execute.make_int(capacity, "capacity")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"PaddingFIFOQueueV2", component_types=component_types, shapes=shapes,
capacity=capacity, container=container,
shared_name=shared_name, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("component_types", _op.get_attr("component_types"), "shapes",
_op.get_attr("shapes"), "capacity", _op.get_attr("capacity"),
"container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_execute.record_gradient(
"PaddingFIFOQueueV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def PaddingFIFOQueueV2(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None):
return padding_fifo_queue_v2(component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name, name=name)
PaddingFIFOQueueV2.__doc__ = padding_fifo_queue_v2.__doc__
PaddingFIFOQueueV2 = _doc_controls.do_not_generate_docs(_kwarg_only(PaddingFIFOQueueV2))
tf_export("raw_ops.PaddingFIFOQueueV2")(PaddingFIFOQueueV2)
def padding_fifo_queue_v2_eager_fallback(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function padding_fifo_queue_v2
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(component_types, (list, tuple)):
raise TypeError(
"Expected list for 'component_types' argument to "
"'padding_fifo_queue_v2' Op, not %r." % component_types)
component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
if shapes is None:
shapes = []
if not isinstance(shapes, (list, tuple)):
raise TypeError(
"Expected list for 'shapes' argument to "
"'padding_fifo_queue_v2' Op, not %r." % shapes)
shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
if capacity is None:
capacity = -1
capacity = _execute.make_int(capacity, "capacity")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_inputs_flat = []
_attrs = ("component_types", component_types, "shapes", shapes, "capacity",
capacity, "container", container, "shared_name", shared_name)
_result = _execute.execute(b"PaddingFIFOQueueV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"PaddingFIFOQueueV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def parallel_dynamic_stitch(indices, data, name=None):
r"""Interleave the values from the `data` tensors into a single tensor.
Builds a merged tensor such that
```python
merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
```
For example, if each `indices[m]` is scalar or vector, we have
```python
# Scalar indices:
merged[indices[m], ...] = data[m][...]
# Vector indices:
merged[indices[m][i], ...] = data[m][i, ...]
```
Each `data[i].shape` must start with the corresponding `indices[i].shape`,
and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we
must have `data[i].shape = indices[i].shape + constant`. In terms of this
`constant`, the output shape is
merged.shape = [max(indices)] + constant
Values may be merged in parallel, so if an index appears in both `indices[m][i]`
and `indices[n][j]`, the result may be invalid. This differs from the normal
DynamicStitch operator that defines the behavior in that case.
For example:
```python
indices[0] = 6
indices[1] = [4, 1]
indices[2] = [[5, 2], [0, 3]]
data[0] = [61, 62]
data[1] = [[41, 42], [11, 12]]
data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
[51, 52], [61, 62]]
```
This method can be used to merge partitions created by `dynamic_partition`
as illustrated on the following example:
```python
# Apply function (increments x_i) on elements for which a certain condition
# apply (x_i != -1 in this example).
x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
condition_mask=tf.not_equal(x,tf.constant(-1.))
partitioned_data = tf.dynamic_partition(
x, tf.cast(condition_mask, tf.int32) , 2)
partitioned_data[1] = partitioned_data[1] + 1.0
condition_indices = tf.dynamic_partition(
tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
x = tf.dynamic_stitch(condition_indices, partitioned_data)
# Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
# unchanged.
```
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
</div>
Args:
indices: A list of at least 1 `Tensor` objects with type `int32`.
data: A list with the same length as `indices` of `Tensor` objects with the same type.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"ParallelDynamicStitch", name, _ctx._post_execution_callbacks,
indices, data)
return _result
except _core._FallbackException:
try:
return parallel_dynamic_stitch_eager_fallback(
indices, data, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(indices, (list, tuple)):
raise TypeError(
"Expected list for 'indices' argument to "
"'parallel_dynamic_stitch' Op, not %r." % indices)
_attr_N = len(indices)
if not isinstance(data, (list, tuple)):
raise TypeError(
"Expected list for 'data' argument to "
"'parallel_dynamic_stitch' Op, not %r." % data)
if len(data) != _attr_N:
raise ValueError(
"List argument 'data' to 'parallel_dynamic_stitch' Op with length %d "
"must match length %d of argument 'indices'." %
(len(data), _attr_N))
_, _, _op = _op_def_lib._apply_op_helper(
"ParallelDynamicStitch", indices=indices, data=data, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T"))
_execute.record_gradient(
"ParallelDynamicStitch", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def ParallelDynamicStitch(indices, data, name=None):
return parallel_dynamic_stitch(indices=indices, data=data, name=name)
ParallelDynamicStitch.__doc__ = parallel_dynamic_stitch.__doc__
ParallelDynamicStitch = _doc_controls.do_not_generate_docs(_kwarg_only(ParallelDynamicStitch))
tf_export("raw_ops.ParallelDynamicStitch")(ParallelDynamicStitch)
def parallel_dynamic_stitch_eager_fallback(indices, data, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function parallel_dynamic_stitch
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(indices, (list, tuple)):
raise TypeError(
"Expected list for 'indices' argument to "
"'parallel_dynamic_stitch' Op, not %r." % indices)
_attr_N = len(indices)
if not isinstance(data, (list, tuple)):
raise TypeError(
"Expected list for 'data' argument to "
"'parallel_dynamic_stitch' Op, not %r." % data)
if len(data) != _attr_N:
raise ValueError(
"List argument 'data' to 'parallel_dynamic_stitch' Op with length %d "
"must match length %d of argument 'indices'." %
(len(data), _attr_N))
_attr_T, data = _execute.args_to_matching_eager(list(data), _ctx)
indices = _ops.convert_n_to_tensor(indices, _dtypes.int32)
_inputs_flat = list(indices) + list(data)
_attrs = ("N", _attr_N, "T", _attr_T)
_result = _execute.execute(b"ParallelDynamicStitch", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"ParallelDynamicStitch", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def priority_queue(shapes, component_types=[], capacity=-1, container="", shared_name="", name=None):
r"""A queue that produces elements sorted by the first component value.
Note that the PriorityQueue requires the first component of any element
to be a scalar int64, in addition to the other elements declared by
component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue
and DequeueMany) on a PriorityQueue will all require (resp. output) one extra
entry in their input (resp. output) lists.
Args:
shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`).
The shape of each component in a value. The length of this attr must
be either 0 or the same as the length of component_types. If the length of
this attr is 0, the shapes of queue elements are not constrained, and
only one element may be dequeued at a time.
component_types: An optional list of `tf.DTypes`. Defaults to `[]`.
The type of each component in a value.
capacity: An optional `int`. Defaults to `-1`.
The upper bound on the number of elements in this queue.
Negative numbers mean no limit.
container: An optional `string`. Defaults to `""`.
If non-empty, this queue is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional `string`. Defaults to `""`.
If non-empty, this queue will be shared under the given name
across multiple sessions.
name: A name for the operation (optional).
Returns:
A `Tensor` of type mutable `string`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("priority_queue op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
if not isinstance(shapes, (list, tuple)):
raise TypeError(
"Expected list for 'shapes' argument to "
"'priority_queue' Op, not %r." % shapes)
shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
if component_types is None:
component_types = []
if not isinstance(component_types, (list, tuple)):
raise TypeError(
"Expected list for 'component_types' argument to "
"'priority_queue' Op, not %r." % component_types)
component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
if capacity is None:
capacity = -1
capacity = _execute.make_int(capacity, "capacity")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"PriorityQueue", shapes=shapes, component_types=component_types,
capacity=capacity, container=container,
shared_name=shared_name, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("component_types", _op.get_attr("component_types"), "shapes",
_op.get_attr("shapes"), "capacity", _op.get_attr("capacity"),
"container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_execute.record_gradient(
"PriorityQueue", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def PriorityQueue(shapes, component_types=[], capacity=-1, container="", shared_name="", name=None):
return priority_queue(shapes=shapes, component_types=component_types, capacity=capacity, container=container, shared_name=shared_name, name=name)
PriorityQueue.__doc__ = priority_queue.__doc__
PriorityQueue = _doc_controls.do_not_generate_docs(_kwarg_only(PriorityQueue))
tf_export("raw_ops.PriorityQueue")(PriorityQueue)
def priority_queue_eager_fallback(shapes, component_types=[], capacity=-1, container="", shared_name="", name=None, ctx=None):
raise RuntimeError("priority_queue op does not support eager execution. Arg 'handle' is a ref.")
def priority_queue_v2(shapes, component_types=[], capacity=-1, container="", shared_name="", name=None):
r"""A queue that produces elements sorted by the first component value.
Note that the PriorityQueue requires the first component of any element
to be a scalar int64, in addition to the other elements declared by
component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue
and DequeueMany) on a PriorityQueue will all require (resp. output) one extra
entry in their input (resp. output) lists.
Args:
shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`).
The shape of each component in a value. The length of this attr must
be either 0 or the same as the length of component_types. If the length of
this attr is 0, the shapes of queue elements are not constrained, and
only one element may be dequeued at a time.
component_types: An optional list of `tf.DTypes`. Defaults to `[]`.
The type of each component in a value.
capacity: An optional `int`. Defaults to `-1`.
The upper bound on the number of elements in this queue.
Negative numbers mean no limit.
container: An optional `string`. Defaults to `""`.
If non-empty, this queue is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional `string`. Defaults to `""`.
If non-empty, this queue will be shared under the given name
across multiple sessions.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `resource`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"PriorityQueueV2", name, _ctx._post_execution_callbacks,
"component_types", component_types, "shapes", shapes, "capacity",
capacity, "container", container, "shared_name", shared_name)
return _result
except _core._FallbackException:
try:
return priority_queue_v2_eager_fallback(
component_types=component_types, shapes=shapes, capacity=capacity,
container=container, shared_name=shared_name, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(shapes, (list, tuple)):
raise TypeError(
"Expected list for 'shapes' argument to "
"'priority_queue_v2' Op, not %r." % shapes)
shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
if component_types is None:
component_types = []
if not isinstance(component_types, (list, tuple)):
raise TypeError(
"Expected list for 'component_types' argument to "
"'priority_queue_v2' Op, not %r." % component_types)
component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
if capacity is None:
capacity = -1
capacity = _execute.make_int(capacity, "capacity")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"PriorityQueueV2", shapes=shapes, component_types=component_types,
capacity=capacity, container=container,
shared_name=shared_name, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("component_types", _op.get_attr("component_types"), "shapes",
_op.get_attr("shapes"), "capacity", _op.get_attr("capacity"),
"container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_execute.record_gradient(
"PriorityQueueV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def PriorityQueueV2(shapes, component_types=[], capacity=-1, container="", shared_name="", name=None):
return priority_queue_v2(shapes=shapes, component_types=component_types, capacity=capacity, container=container, shared_name=shared_name, name=name)
PriorityQueueV2.__doc__ = priority_queue_v2.__doc__
PriorityQueueV2 = _doc_controls.do_not_generate_docs(_kwarg_only(PriorityQueueV2))
tf_export("raw_ops.PriorityQueueV2")(PriorityQueueV2)
def priority_queue_v2_eager_fallback(shapes, component_types=[], capacity=-1, container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function priority_queue_v2
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(shapes, (list, tuple)):
raise TypeError(
"Expected list for 'shapes' argument to "
"'priority_queue_v2' Op, not %r." % shapes)
shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
if component_types is None:
component_types = []
if not isinstance(component_types, (list, tuple)):
raise TypeError(
"Expected list for 'component_types' argument to "
"'priority_queue_v2' Op, not %r." % component_types)
component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
if capacity is None:
capacity = -1
capacity = _execute.make_int(capacity, "capacity")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_inputs_flat = []
_attrs = ("component_types", component_types, "shapes", shapes, "capacity",
capacity, "container", container, "shared_name", shared_name)
_result = _execute.execute(b"PriorityQueueV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"PriorityQueueV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def queue_close(handle, cancel_pending_enqueues=False, name=None):
r"""Closes the given queue.
This operation signals that no more elements will be enqueued in the
given queue. Subsequent Enqueue(Many) operations will fail.
Subsequent Dequeue(Many) operations will continue to succeed if
sufficient elements remain in the queue. Subsequent Dequeue(Many)
operations that would block will fail immediately.
Args:
handle: A `Tensor` of type mutable `string`. The handle to a queue.
cancel_pending_enqueues: An optional `bool`. Defaults to `False`.
If true, all pending enqueue requests that are
blocked on the given queue will be canceled.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("queue_close op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
if cancel_pending_enqueues is None:
cancel_pending_enqueues = False
cancel_pending_enqueues = _execute.make_bool(cancel_pending_enqueues, "cancel_pending_enqueues")
_, _, _op = _op_def_lib._apply_op_helper(
"QueueClose", handle=handle,
cancel_pending_enqueues=cancel_pending_enqueues,
name=name)
return _op
_result = None
return _result
def QueueClose(handle, cancel_pending_enqueues=False, name=None):
return queue_close(handle=handle, cancel_pending_enqueues=cancel_pending_enqueues, name=name)
QueueClose.__doc__ = queue_close.__doc__
QueueClose = _doc_controls.do_not_generate_docs(_kwarg_only(QueueClose))
tf_export("raw_ops.QueueClose")(QueueClose)
def queue_close_eager_fallback(handle, cancel_pending_enqueues=False, name=None, ctx=None):
raise RuntimeError("queue_close op does not support eager execution. Arg 'handle' is a ref.")
def queue_close_v2(handle, cancel_pending_enqueues=False, name=None):
r"""Closes the given queue.
This operation signals that no more elements will be enqueued in the
given queue. Subsequent Enqueue(Many) operations will fail.
Subsequent Dequeue(Many) operations will continue to succeed if
sufficient elements remain in the queue. Subsequent Dequeue(Many)
operations that would block will fail immediately.
Args:
handle: A `Tensor` of type `resource`. The handle to a queue.
cancel_pending_enqueues: An optional `bool`. Defaults to `False`.
If true, all pending enqueue requests that are
blocked on the given queue will be canceled.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QueueCloseV2", name, _ctx._post_execution_callbacks, handle,
"cancel_pending_enqueues", cancel_pending_enqueues)
return _result
except _core._FallbackException:
try:
return queue_close_v2_eager_fallback(
handle, cancel_pending_enqueues=cancel_pending_enqueues,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if cancel_pending_enqueues is None:
cancel_pending_enqueues = False
cancel_pending_enqueues = _execute.make_bool(cancel_pending_enqueues, "cancel_pending_enqueues")
_, _, _op = _op_def_lib._apply_op_helper(
"QueueCloseV2", handle=handle,
cancel_pending_enqueues=cancel_pending_enqueues,
name=name)
return _op
_result = None
return _result
def QueueCloseV2(handle, cancel_pending_enqueues=False, name=None):
return queue_close_v2(handle=handle, cancel_pending_enqueues=cancel_pending_enqueues, name=name)
QueueCloseV2.__doc__ = queue_close_v2.__doc__
QueueCloseV2 = _doc_controls.do_not_generate_docs(_kwarg_only(QueueCloseV2))
tf_export("raw_ops.QueueCloseV2")(QueueCloseV2)
def queue_close_v2_eager_fallback(handle, cancel_pending_enqueues=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function queue_close_v2
"""
_ctx = ctx if ctx else _context.context()
if cancel_pending_enqueues is None:
cancel_pending_enqueues = False
cancel_pending_enqueues = _execute.make_bool(cancel_pending_enqueues, "cancel_pending_enqueues")
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
_inputs_flat = [handle]
_attrs = ("cancel_pending_enqueues", cancel_pending_enqueues)
_result = _execute.execute(b"QueueCloseV2", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
def queue_dequeue(handle, component_types, timeout_ms=-1, name=None):
r"""Dequeues a tuple of one or more tensors from the given queue.
This operation has k outputs, where k is the number of components
in the tuples stored in the given queue, and output i is the ith
component of the dequeued tuple.
N.B. If the queue is empty, this operation will block until an element
has been dequeued (or 'timeout_ms' elapses, if specified).
Args:
handle: A `Tensor` of type mutable `string`. The handle to a queue.
component_types: A list of `tf.DTypes` that has length `>= 1`.
The type of each component in a tuple.
timeout_ms: An optional `int`. Defaults to `-1`.
If the queue is empty, this operation will block for up to
timeout_ms milliseconds.
Note: This option is not supported yet.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects of type `component_types`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("queue_dequeue op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
if not isinstance(component_types, (list, tuple)):
raise TypeError(
"Expected list for 'component_types' argument to "
"'queue_dequeue' Op, not %r." % component_types)
component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
if timeout_ms is None:
timeout_ms = -1
timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
_, _, _op = _op_def_lib._apply_op_helper(
"QueueDequeue", handle=handle, component_types=component_types,
timeout_ms=timeout_ms, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("component_types", _op.get_attr("component_types"), "timeout_ms",
_op.get_attr("timeout_ms"))
_execute.record_gradient(
"QueueDequeue", _inputs_flat, _attrs, _result, name)
return _result
def QueueDequeue(handle, component_types, timeout_ms=-1, name=None):
return queue_dequeue(handle=handle, component_types=component_types, timeout_ms=timeout_ms, name=name)
QueueDequeue.__doc__ = queue_dequeue.__doc__
QueueDequeue = _doc_controls.do_not_generate_docs(_kwarg_only(QueueDequeue))
tf_export("raw_ops.QueueDequeue")(QueueDequeue)
def queue_dequeue_eager_fallback(handle, component_types, timeout_ms=-1, name=None, ctx=None):
raise RuntimeError("queue_dequeue op does not support eager execution. Arg 'handle' is a ref.")
def queue_dequeue_many(handle, n, component_types, timeout_ms=-1, name=None):
r"""Dequeues `n` tuples of one or more tensors from the given queue.
If the queue is closed and there are fewer than `n` elements, then an
OutOfRange error is returned.
This operation concatenates queue-element component tensors along the
0th dimension to make a single component tensor. All of the components
in the dequeued tuple will have size `n` in the 0th dimension.
This operation has `k` outputs, where `k` is the number of components in
the tuples stored in the given queue, and output `i` is the ith
component of the dequeued tuple.
N.B. If the queue is empty, this operation will block until `n` elements
have been dequeued (or 'timeout_ms' elapses, if specified).
Args:
handle: A `Tensor` of type mutable `string`. The handle to a queue.
n: A `Tensor` of type `int32`. The number of tuples to dequeue.
component_types: A list of `tf.DTypes` that has length `>= 1`.
The type of each component in a tuple.
timeout_ms: An optional `int`. Defaults to `-1`.
If the queue has fewer than n elements, this operation
will block for up to timeout_ms milliseconds.
Note: This option is not supported yet.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects of type `component_types`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("queue_dequeue_many op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
if not isinstance(component_types, (list, tuple)):
raise TypeError(
"Expected list for 'component_types' argument to "
"'queue_dequeue_many' Op, not %r." % component_types)
component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
if timeout_ms is None:
timeout_ms = -1
timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
_, _, _op = _op_def_lib._apply_op_helper(
"QueueDequeueMany", handle=handle, n=n,
component_types=component_types,
timeout_ms=timeout_ms, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("component_types", _op.get_attr("component_types"), "timeout_ms",
_op.get_attr("timeout_ms"))
_execute.record_gradient(
"QueueDequeueMany", _inputs_flat, _attrs, _result, name)
return _result
def QueueDequeueMany(handle, n, component_types, timeout_ms=-1, name=None):
return queue_dequeue_many(handle=handle, n=n, component_types=component_types, timeout_ms=timeout_ms, name=name)
QueueDequeueMany.__doc__ = queue_dequeue_many.__doc__
QueueDequeueMany = _doc_controls.do_not_generate_docs(_kwarg_only(QueueDequeueMany))
tf_export("raw_ops.QueueDequeueMany")(QueueDequeueMany)
def queue_dequeue_many_eager_fallback(handle, n, component_types, timeout_ms=-1, name=None, ctx=None):
raise RuntimeError("queue_dequeue_many op does not support eager execution. Arg 'handle' is a ref.")
def queue_dequeue_many_v2(handle, n, component_types, timeout_ms=-1, name=None):
r"""Dequeues `n` tuples of one or more tensors from the given queue.
If the queue is closed and there are fewer than `n` elements, then an
OutOfRange error is returned.
This operation concatenates queue-element component tensors along the
0th dimension to make a single component tensor. All of the components
in the dequeued tuple will have size `n` in the 0th dimension.
This operation has `k` outputs, where `k` is the number of components in
the tuples stored in the given queue, and output `i` is the ith
component of the dequeued tuple.
N.B. If the queue is empty, this operation will block until `n` elements
have been dequeued (or 'timeout_ms' elapses, if specified).
Args:
handle: A `Tensor` of type `resource`. The handle to a queue.
n: A `Tensor` of type `int32`. The number of tuples to dequeue.
component_types: A list of `tf.DTypes` that has length `>= 1`.
The type of each component in a tuple.
timeout_ms: An optional `int`. Defaults to `-1`.
If the queue has fewer than n elements, this operation
will block for up to timeout_ms milliseconds.
Note: This option is not supported yet.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects of type `component_types`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QueueDequeueManyV2", name, _ctx._post_execution_callbacks, handle, n,
"component_types", component_types, "timeout_ms", timeout_ms)
return _result
except _core._FallbackException:
try:
return queue_dequeue_many_v2_eager_fallback(
handle, n, component_types=component_types, timeout_ms=timeout_ms,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(component_types, (list, tuple)):
raise TypeError(
"Expected list for 'component_types' argument to "
"'queue_dequeue_many_v2' Op, not %r." % component_types)
component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
if timeout_ms is None:
timeout_ms = -1
timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
_, _, _op = _op_def_lib._apply_op_helper(
"QueueDequeueManyV2", handle=handle, n=n,
component_types=component_types,
timeout_ms=timeout_ms, name=name)
_result = _op.outputs[:]
if not _result:
return _op
_inputs_flat = _op.inputs
_attrs = ("component_types", _op.get_attr("component_types"), "timeout_ms",
_op.get_attr("timeout_ms"))
_execute.record_gradient(
"QueueDequeueManyV2", _inputs_flat, _attrs, _result, name)
return _result
def QueueDequeueManyV2(handle, n, component_types, timeout_ms=-1, name=None):
return queue_dequeue_many_v2(handle=handle, n=n, component_types=component_types, timeout_ms=timeout_ms, name=name)
QueueDequeueManyV2.__doc__ = queue_dequeue_many_v2.__doc__
QueueDequeueManyV2 = _doc_controls.do_not_generate_docs(_kwarg_only(QueueDequeueManyV2))
tf_export("raw_ops.QueueDequeueManyV2")(QueueDequeueManyV2)
def queue_dequeue_many_v2_eager_fallback(handle, n, component_types, timeout_ms=-1, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function queue_dequeue_many_v2
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(component_types, (list, tuple)):
raise TypeError(
"Expected list for 'component_types' argument to "
"'queue_dequeue_many_v2' Op, not %r." % component_types)
component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
if timeout_ms is None:
timeout_ms = -1
timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
n = _ops.convert_to_tensor(n, _dtypes.int32)
_inputs_flat = [handle, n]
_attrs = ("component_types", component_types, "timeout_ms", timeout_ms)
_result = _execute.execute(b"QueueDequeueManyV2", len(component_types),
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QueueDequeueManyV2", _inputs_flat, _attrs, _result, name)
return _result
def queue_dequeue_up_to(handle, n, component_types, timeout_ms=-1, name=None):
r"""Dequeues `n` tuples of one or more tensors from the given queue.
This operation is not supported by all queues. If a queue does not support
DequeueUpTo, then an Unimplemented error is returned.
If the queue is closed and there are more than 0 but less than `n`
elements remaining, then instead of returning an OutOfRange error like
QueueDequeueMany, less than `n` elements are returned immediately. If
the queue is closed and there are 0 elements left in the queue, then
an OutOfRange error is returned just like in QueueDequeueMany.
Otherwise the behavior is identical to QueueDequeueMany:
This operation concatenates queue-element component tensors along the
0th dimension to make a single component tensor. All of the components
in the dequeued tuple will have size `n` in the 0th dimension.
This operation has k outputs, where `k` is the number of components in
the tuples stored in the given queue, and output `i` is the ith
component of the dequeued tuple.
Args:
handle: A `Tensor` of type mutable `string`. The handle to a queue.
n: A `Tensor` of type `int32`. The number of tuples to dequeue.
component_types: A list of `tf.DTypes` that has length `>= 1`.
The type of each component in a tuple.
timeout_ms: An optional `int`. Defaults to `-1`.
If the queue has fewer than n elements, this operation
will block for up to timeout_ms milliseconds.
Note: This option is not supported yet.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects of type `component_types`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("queue_dequeue_up_to op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
if not isinstance(component_types, (list, tuple)):
raise TypeError(
"Expected list for 'component_types' argument to "
"'queue_dequeue_up_to' Op, not %r." % component_types)
component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
if timeout_ms is None:
timeout_ms = -1
timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
_, _, _op = _op_def_lib._apply_op_helper(
"QueueDequeueUpTo", handle=handle, n=n,
component_types=component_types,
timeout_ms=timeout_ms, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("component_types", _op.get_attr("component_types"), "timeout_ms",
_op.get_attr("timeout_ms"))
_execute.record_gradient(
"QueueDequeueUpTo", _inputs_flat, _attrs, _result, name)
return _result
def QueueDequeueUpTo(handle, n, component_types, timeout_ms=-1, name=None):
return queue_dequeue_up_to(handle=handle, n=n, component_types=component_types, timeout_ms=timeout_ms, name=name)
QueueDequeueUpTo.__doc__ = queue_dequeue_up_to.__doc__
QueueDequeueUpTo = _doc_controls.do_not_generate_docs(_kwarg_only(QueueDequeueUpTo))
tf_export("raw_ops.QueueDequeueUpTo")(QueueDequeueUpTo)
def queue_dequeue_up_to_eager_fallback(handle, n, component_types, timeout_ms=-1, name=None, ctx=None):
raise RuntimeError("queue_dequeue_up_to op does not support eager execution. Arg 'handle' is a ref.")
def queue_dequeue_up_to_v2(handle, n, component_types, timeout_ms=-1, name=None):
r"""Dequeues `n` tuples of one or more tensors from the given queue.
This operation is not supported by all queues. If a queue does not support
DequeueUpTo, then an Unimplemented error is returned.
If the queue is closed and there are more than 0 but less than `n`
elements remaining, then instead of returning an OutOfRange error like
QueueDequeueMany, less than `n` elements are returned immediately. If
the queue is closed and there are 0 elements left in the queue, then
an OutOfRange error is returned just like in QueueDequeueMany.
Otherwise the behavior is identical to QueueDequeueMany:
This operation concatenates queue-element component tensors along the
0th dimension to make a single component tensor. All of the components
in the dequeued tuple will have size n in the 0th dimension.
This operation has `k` outputs, where `k` is the number of components in
the tuples stored in the given queue, and output `i` is the ith
component of the dequeued tuple.
Args:
handle: A `Tensor` of type `resource`. The handle to a queue.
n: A `Tensor` of type `int32`. The number of tuples to dequeue.
component_types: A list of `tf.DTypes` that has length `>= 1`.
The type of each component in a tuple.
timeout_ms: An optional `int`. Defaults to `-1`.
If the queue has fewer than n elements, this operation
will block for up to timeout_ms milliseconds.
Note: This option is not supported yet.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects of type `component_types`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QueueDequeueUpToV2", name, _ctx._post_execution_callbacks, handle, n,
"component_types", component_types, "timeout_ms", timeout_ms)
return _result
except _core._FallbackException:
try:
return queue_dequeue_up_to_v2_eager_fallback(
handle, n, component_types=component_types, timeout_ms=timeout_ms,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(component_types, (list, tuple)):
raise TypeError(
"Expected list for 'component_types' argument to "
"'queue_dequeue_up_to_v2' Op, not %r." % component_types)
component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
if timeout_ms is None:
timeout_ms = -1
timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
_, _, _op = _op_def_lib._apply_op_helper(
"QueueDequeueUpToV2", handle=handle, n=n,
component_types=component_types,
timeout_ms=timeout_ms, name=name)
_result = _op.outputs[:]
if not _result:
return _op
_inputs_flat = _op.inputs
_attrs = ("component_types", _op.get_attr("component_types"), "timeout_ms",
_op.get_attr("timeout_ms"))
_execute.record_gradient(
"QueueDequeueUpToV2", _inputs_flat, _attrs, _result, name)
return _result
def QueueDequeueUpToV2(handle, n, component_types, timeout_ms=-1, name=None):
return queue_dequeue_up_to_v2(handle=handle, n=n, component_types=component_types, timeout_ms=timeout_ms, name=name)
QueueDequeueUpToV2.__doc__ = queue_dequeue_up_to_v2.__doc__
QueueDequeueUpToV2 = _doc_controls.do_not_generate_docs(_kwarg_only(QueueDequeueUpToV2))
tf_export("raw_ops.QueueDequeueUpToV2")(QueueDequeueUpToV2)
def queue_dequeue_up_to_v2_eager_fallback(handle, n, component_types, timeout_ms=-1, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function queue_dequeue_up_to_v2
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(component_types, (list, tuple)):
raise TypeError(
"Expected list for 'component_types' argument to "
"'queue_dequeue_up_to_v2' Op, not %r." % component_types)
component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
if timeout_ms is None:
timeout_ms = -1
timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
n = _ops.convert_to_tensor(n, _dtypes.int32)
_inputs_flat = [handle, n]
_attrs = ("component_types", component_types, "timeout_ms", timeout_ms)
_result = _execute.execute(b"QueueDequeueUpToV2", len(component_types),
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QueueDequeueUpToV2", _inputs_flat, _attrs, _result, name)
return _result
def queue_dequeue_v2(handle, component_types, timeout_ms=-1, name=None):
r"""Dequeues a tuple of one or more tensors from the given queue.
This operation has k outputs, where k is the number of components
in the tuples stored in the given queue, and output i is the ith
component of the dequeued tuple.
N.B. If the queue is empty, this operation will block until an element
has been dequeued (or 'timeout_ms' elapses, if specified).
Args:
handle: A `Tensor` of type `resource`. The handle to a queue.
component_types: A list of `tf.DTypes` that has length `>= 1`.
The type of each component in a tuple.
timeout_ms: An optional `int`. Defaults to `-1`.
If the queue is empty, this operation will block for up to
timeout_ms milliseconds.
Note: This option is not supported yet.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects of type `component_types`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QueueDequeueV2", name, _ctx._post_execution_callbacks, handle,
"component_types", component_types, "timeout_ms", timeout_ms)
return _result
except _core._FallbackException:
try:
return queue_dequeue_v2_eager_fallback(
handle, component_types=component_types, timeout_ms=timeout_ms,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(component_types, (list, tuple)):
raise TypeError(
"Expected list for 'component_types' argument to "
"'queue_dequeue_v2' Op, not %r." % component_types)
component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
if timeout_ms is None:
timeout_ms = -1
timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
_, _, _op = _op_def_lib._apply_op_helper(
"QueueDequeueV2", handle=handle, component_types=component_types,
timeout_ms=timeout_ms, name=name)
_result = _op.outputs[:]
if not _result:
return _op
_inputs_flat = _op.inputs
_attrs = ("component_types", _op.get_attr("component_types"), "timeout_ms",
_op.get_attr("timeout_ms"))
_execute.record_gradient(
"QueueDequeueV2", _inputs_flat, _attrs, _result, name)
return _result
def QueueDequeueV2(handle, component_types, timeout_ms=-1, name=None):
return queue_dequeue_v2(handle=handle, component_types=component_types, timeout_ms=timeout_ms, name=name)
QueueDequeueV2.__doc__ = queue_dequeue_v2.__doc__
QueueDequeueV2 = _doc_controls.do_not_generate_docs(_kwarg_only(QueueDequeueV2))
tf_export("raw_ops.QueueDequeueV2")(QueueDequeueV2)
def queue_dequeue_v2_eager_fallback(handle, component_types, timeout_ms=-1, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function queue_dequeue_v2
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(component_types, (list, tuple)):
raise TypeError(
"Expected list for 'component_types' argument to "
"'queue_dequeue_v2' Op, not %r." % component_types)
component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
if timeout_ms is None:
timeout_ms = -1
timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
_inputs_flat = [handle]
_attrs = ("component_types", component_types, "timeout_ms", timeout_ms)
_result = _execute.execute(b"QueueDequeueV2", len(component_types),
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QueueDequeueV2", _inputs_flat, _attrs, _result, name)
return _result
def queue_enqueue(handle, components, timeout_ms=-1, name=None):
r"""Enqueues a tuple of one or more tensors in the given queue.
The components input has k elements, which correspond to the components of
tuples stored in the given queue.
N.B. If the queue is full, this operation will block until the given
element has been enqueued (or 'timeout_ms' elapses, if specified).
Args:
handle: A `Tensor` of type mutable `string`. The handle to a queue.
components: A list of `Tensor` objects.
One or more tensors from which the enqueued tensors should be taken.
timeout_ms: An optional `int`. Defaults to `-1`.
If the queue is full, this operation will block for up to
timeout_ms milliseconds.
Note: This option is not supported yet.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("queue_enqueue op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
if timeout_ms is None:
timeout_ms = -1
timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
_, _, _op = _op_def_lib._apply_op_helper(
"QueueEnqueue", handle=handle, components=components,
timeout_ms=timeout_ms, name=name)
return _op
_result = None
return _result
def QueueEnqueue(handle, components, timeout_ms=-1, name=None):
return queue_enqueue(handle=handle, components=components, timeout_ms=timeout_ms, name=name)
QueueEnqueue.__doc__ = queue_enqueue.__doc__
QueueEnqueue = _doc_controls.do_not_generate_docs(_kwarg_only(QueueEnqueue))
tf_export("raw_ops.QueueEnqueue")(QueueEnqueue)
def queue_enqueue_eager_fallback(handle, components, timeout_ms=-1, name=None, ctx=None):
raise RuntimeError("queue_enqueue op does not support eager execution. Arg 'handle' is a ref.")
def queue_enqueue_many(handle, components, timeout_ms=-1, name=None):
r"""Enqueues zero or more tuples of one or more tensors in the given queue.
This operation slices each component tensor along the 0th dimension to
make multiple queue elements. All of the tuple components must have the
same size in the 0th dimension.
The components input has k elements, which correspond to the components of
tuples stored in the given queue.
N.B. If the queue is full, this operation will block until the given
elements have been enqueued (or 'timeout_ms' elapses, if specified).
Args:
handle: A `Tensor` of type mutable `string`. The handle to a queue.
components: A list of `Tensor` objects.
One or more tensors from which the enqueued tensors should
be taken.
timeout_ms: An optional `int`. Defaults to `-1`.
If the queue is too full, this operation will block for up
to timeout_ms milliseconds.
Note: This option is not supported yet.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("queue_enqueue_many op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
if timeout_ms is None:
timeout_ms = -1
timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
_, _, _op = _op_def_lib._apply_op_helper(
"QueueEnqueueMany", handle=handle, components=components,
timeout_ms=timeout_ms, name=name)
return _op
_result = None
return _result
def QueueEnqueueMany(handle, components, timeout_ms=-1, name=None):
return queue_enqueue_many(handle=handle, components=components, timeout_ms=timeout_ms, name=name)
QueueEnqueueMany.__doc__ = queue_enqueue_many.__doc__
QueueEnqueueMany = _doc_controls.do_not_generate_docs(_kwarg_only(QueueEnqueueMany))
tf_export("raw_ops.QueueEnqueueMany")(QueueEnqueueMany)
def queue_enqueue_many_eager_fallback(handle, components, timeout_ms=-1, name=None, ctx=None):
raise RuntimeError("queue_enqueue_many op does not support eager execution. Arg 'handle' is a ref.")
def queue_enqueue_many_v2(handle, components, timeout_ms=-1, name=None):
r"""Enqueues zero or more tuples of one or more tensors in the given queue.
This operation slices each component tensor along the 0th dimension to
make multiple queue elements. All of the tuple components must have the
same size in the 0th dimension.
The components input has k elements, which correspond to the components of
tuples stored in the given queue.
N.B. If the queue is full, this operation will block until the given
elements have been enqueued (or 'timeout_ms' elapses, if specified).
Args:
handle: A `Tensor` of type `resource`. The handle to a queue.
components: A list of `Tensor` objects.
One or more tensors from which the enqueued tensors should
be taken.
timeout_ms: An optional `int`. Defaults to `-1`.
If the queue is too full, this operation will block for up
to timeout_ms milliseconds.
Note: This option is not supported yet.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QueueEnqueueManyV2", name, _ctx._post_execution_callbacks, handle,
components, "timeout_ms", timeout_ms)
return _result
except _core._FallbackException:
try:
return queue_enqueue_many_v2_eager_fallback(
handle, components, timeout_ms=timeout_ms, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if timeout_ms is None:
timeout_ms = -1
timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
_, _, _op = _op_def_lib._apply_op_helper(
"QueueEnqueueManyV2", handle=handle, components=components,
timeout_ms=timeout_ms, name=name)
return _op
_result = None
return _result
def QueueEnqueueManyV2(handle, components, timeout_ms=-1, name=None):
return queue_enqueue_many_v2(handle=handle, components=components, timeout_ms=timeout_ms, name=name)
QueueEnqueueManyV2.__doc__ = queue_enqueue_many_v2.__doc__
QueueEnqueueManyV2 = _doc_controls.do_not_generate_docs(_kwarg_only(QueueEnqueueManyV2))
tf_export("raw_ops.QueueEnqueueManyV2")(QueueEnqueueManyV2)
def queue_enqueue_many_v2_eager_fallback(handle, components, timeout_ms=-1, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function queue_enqueue_many_v2
"""
_ctx = ctx if ctx else _context.context()
if timeout_ms is None:
timeout_ms = -1
timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
_attr_Tcomponents, components = _execute.convert_to_mixed_eager_tensors(components, _ctx)
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
_inputs_flat = [handle] + list(components)
_attrs = ("Tcomponents", _attr_Tcomponents, "timeout_ms", timeout_ms)
_result = _execute.execute(b"QueueEnqueueManyV2", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
def queue_enqueue_v2(handle, components, timeout_ms=-1, name=None):
r"""Enqueues a tuple of one or more tensors in the given queue.
The components input has k elements, which correspond to the components of
tuples stored in the given queue.
N.B. If the queue is full, this operation will block until the given
element has been enqueued (or 'timeout_ms' elapses, if specified).
Args:
handle: A `Tensor` of type `resource`. The handle to a queue.
components: A list of `Tensor` objects.
One or more tensors from which the enqueued tensors should be taken.
timeout_ms: An optional `int`. Defaults to `-1`.
If the queue is full, this operation will block for up to
timeout_ms milliseconds.
Note: This option is not supported yet.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QueueEnqueueV2", name, _ctx._post_execution_callbacks, handle,
components, "timeout_ms", timeout_ms)
return _result
except _core._FallbackException:
try:
return queue_enqueue_v2_eager_fallback(
handle, components, timeout_ms=timeout_ms, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if timeout_ms is None:
timeout_ms = -1
timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
_, _, _op = _op_def_lib._apply_op_helper(
"QueueEnqueueV2", handle=handle, components=components,
timeout_ms=timeout_ms, name=name)
return _op
_result = None
return _result
def QueueEnqueueV2(handle, components, timeout_ms=-1, name=None):
return queue_enqueue_v2(handle=handle, components=components, timeout_ms=timeout_ms, name=name)
QueueEnqueueV2.__doc__ = queue_enqueue_v2.__doc__
QueueEnqueueV2 = _doc_controls.do_not_generate_docs(_kwarg_only(QueueEnqueueV2))
tf_export("raw_ops.QueueEnqueueV2")(QueueEnqueueV2)
def queue_enqueue_v2_eager_fallback(handle, components, timeout_ms=-1, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function queue_enqueue_v2
"""
_ctx = ctx if ctx else _context.context()
if timeout_ms is None:
timeout_ms = -1
timeout_ms = _execute.make_int(timeout_ms, "timeout_ms")
_attr_Tcomponents, components = _execute.convert_to_mixed_eager_tensors(components, _ctx)
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
_inputs_flat = [handle] + list(components)
_attrs = ("Tcomponents", _attr_Tcomponents, "timeout_ms", timeout_ms)
_result = _execute.execute(b"QueueEnqueueV2", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
def queue_is_closed(handle, name=None):
r"""Returns true if queue is closed.
This operation returns true if the queue is closed and false if the queue
is open.
Args:
handle: A `Tensor` of type mutable `string`. The handle to a queue.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("queue_is_closed op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"QueueIsClosed", handle=handle, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"QueueIsClosed", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def QueueIsClosed(handle, name=None):
return queue_is_closed(handle=handle, name=name)
QueueIsClosed.__doc__ = queue_is_closed.__doc__
QueueIsClosed = _doc_controls.do_not_generate_docs(_kwarg_only(QueueIsClosed))
tf_export("raw_ops.QueueIsClosed")(QueueIsClosed)
def queue_is_closed_eager_fallback(handle, name=None, ctx=None):
raise RuntimeError("queue_is_closed op does not support eager execution. Arg 'handle' is a ref.")
def queue_is_closed_v2(handle, name=None):
r"""Returns true if queue is closed.
This operation returns true if the queue is closed and false if the queue
is open.
Args:
handle: A `Tensor` of type `resource`. The handle to a queue.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QueueIsClosedV2", name, _ctx._post_execution_callbacks, handle)
return _result
except _core._FallbackException:
try:
return queue_is_closed_v2_eager_fallback(
handle, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"QueueIsClosedV2", handle=handle, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"QueueIsClosedV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def QueueIsClosedV2(handle, name=None):
return queue_is_closed_v2(handle=handle, name=name)
QueueIsClosedV2.__doc__ = queue_is_closed_v2.__doc__
QueueIsClosedV2 = _doc_controls.do_not_generate_docs(_kwarg_only(QueueIsClosedV2))
tf_export("raw_ops.QueueIsClosedV2")(QueueIsClosedV2)
def queue_is_closed_v2_eager_fallback(handle, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function queue_is_closed_v2
"""
_ctx = ctx if ctx else _context.context()
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
_inputs_flat = [handle]
_attrs = None
_result = _execute.execute(b"QueueIsClosedV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"QueueIsClosedV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def queue_size(handle, name=None):
r"""Computes the number of elements in the given queue.
Args:
handle: A `Tensor` of type mutable `string`. The handle to a queue.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("queue_size op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"QueueSize", handle=handle, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"QueueSize", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def QueueSize(handle, name=None):
return queue_size(handle=handle, name=name)
QueueSize.__doc__ = queue_size.__doc__
QueueSize = _doc_controls.do_not_generate_docs(_kwarg_only(QueueSize))
tf_export("raw_ops.QueueSize")(QueueSize)
def queue_size_eager_fallback(handle, name=None, ctx=None):
raise RuntimeError("queue_size op does not support eager execution. Arg 'handle' is a ref.")
def queue_size_v2(handle, name=None):
r"""Computes the number of elements in the given queue.
Args:
handle: A `Tensor` of type `resource`. The handle to a queue.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QueueSizeV2", name, _ctx._post_execution_callbacks, handle)
return _result
except _core._FallbackException:
try:
return queue_size_v2_eager_fallback(
handle, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"QueueSizeV2", handle=handle, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"QueueSizeV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def QueueSizeV2(handle, name=None):
return queue_size_v2(handle=handle, name=name)
QueueSizeV2.__doc__ = queue_size_v2.__doc__
QueueSizeV2 = _doc_controls.do_not_generate_docs(_kwarg_only(QueueSizeV2))
tf_export("raw_ops.QueueSizeV2")(QueueSizeV2)
def queue_size_v2_eager_fallback(handle, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function queue_size_v2
"""
_ctx = ctx if ctx else _context.context()
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
_inputs_flat = [handle]
_attrs = None
_result = _execute.execute(b"QueueSizeV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"QueueSizeV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def random_shuffle_queue(component_types, shapes=[], capacity=-1, min_after_dequeue=0, seed=0, seed2=0, container="", shared_name="", name=None):
r"""A queue that randomizes the order of elements.
Args:
component_types: A list of `tf.DTypes` that has length `>= 1`.
The type of each component in a value.
shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.
The shape of each component in a value. The length of this attr must
be either 0 or the same as the length of component_types. If the length of
this attr is 0, the shapes of queue elements are not constrained, and
only one element may be dequeued at a time.
capacity: An optional `int`. Defaults to `-1`.
The upper bound on the number of elements in this queue.
Negative numbers mean no limit.
min_after_dequeue: An optional `int`. Defaults to `0`.
Dequeue will block unless there would be this
many elements after the dequeue or the queue is closed. This
ensures a minimum level of mixing of elements.
seed: An optional `int`. Defaults to `0`.
If either seed or seed2 is set to be non-zero, the random number
generator is seeded by the given seed. Otherwise, a random seed is used.
seed2: An optional `int`. Defaults to `0`.
A second seed to avoid seed collision.
container: An optional `string`. Defaults to `""`.
If non-empty, this queue is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional `string`. Defaults to `""`.
If non-empty, this queue will be shared under the given name
across multiple sessions.
name: A name for the operation (optional).
Returns:
A `Tensor` of type mutable `string`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("random_shuffle_queue op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
if not isinstance(component_types, (list, tuple)):
raise TypeError(
"Expected list for 'component_types' argument to "
"'random_shuffle_queue' Op, not %r." % component_types)
component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
if shapes is None:
shapes = []
if not isinstance(shapes, (list, tuple)):
raise TypeError(
"Expected list for 'shapes' argument to "
"'random_shuffle_queue' Op, not %r." % shapes)
shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
if capacity is None:
capacity = -1
capacity = _execute.make_int(capacity, "capacity")
if min_after_dequeue is None:
min_after_dequeue = 0
min_after_dequeue = _execute.make_int(min_after_dequeue, "min_after_dequeue")
if seed is None:
seed = 0
seed = _execute.make_int(seed, "seed")
if seed2 is None:
seed2 = 0
seed2 = _execute.make_int(seed2, "seed2")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"RandomShuffleQueue", component_types=component_types, shapes=shapes,
capacity=capacity,
min_after_dequeue=min_after_dequeue, seed=seed,
seed2=seed2, container=container,
shared_name=shared_name, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("component_types", _op.get_attr("component_types"), "shapes",
_op.get_attr("shapes"), "capacity", _op.get_attr("capacity"),
"min_after_dequeue", _op.get_attr("min_after_dequeue"), "seed",
_op.get_attr("seed"), "seed2", _op.get_attr("seed2"), "container",
_op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_execute.record_gradient(
"RandomShuffleQueue", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def RandomShuffleQueue(component_types, shapes=[], capacity=-1, min_after_dequeue=0, seed=0, seed2=0, container="", shared_name="", name=None):
return random_shuffle_queue(component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name, name=name)
RandomShuffleQueue.__doc__ = random_shuffle_queue.__doc__
RandomShuffleQueue = _doc_controls.do_not_generate_docs(_kwarg_only(RandomShuffleQueue))
tf_export("raw_ops.RandomShuffleQueue")(RandomShuffleQueue)
def random_shuffle_queue_eager_fallback(component_types, shapes=[], capacity=-1, min_after_dequeue=0, seed=0, seed2=0, container="", shared_name="", name=None, ctx=None):
raise RuntimeError("random_shuffle_queue op does not support eager execution. Arg 'handle' is a ref.")
def random_shuffle_queue_v2(component_types, shapes=[], capacity=-1, min_after_dequeue=0, seed=0, seed2=0, container="", shared_name="", name=None):
r"""A queue that randomizes the order of elements.
Args:
component_types: A list of `tf.DTypes` that has length `>= 1`.
The type of each component in a value.
shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.
The shape of each component in a value. The length of this attr must
be either 0 or the same as the length of component_types. If the length of
this attr is 0, the shapes of queue elements are not constrained, and
only one element may be dequeued at a time.
capacity: An optional `int`. Defaults to `-1`.
The upper bound on the number of elements in this queue.
Negative numbers mean no limit.
min_after_dequeue: An optional `int`. Defaults to `0`.
Dequeue will block unless there would be this
many elements after the dequeue or the queue is closed. This
ensures a minimum level of mixing of elements.
seed: An optional `int`. Defaults to `0`.
If either seed or seed2 is set to be non-zero, the random number
generator is seeded by the given seed. Otherwise, a random seed is used.
seed2: An optional `int`. Defaults to `0`.
A second seed to avoid seed collision.
container: An optional `string`. Defaults to `""`.
If non-empty, this queue is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional `string`. Defaults to `""`.
If non-empty, this queue will be shared under the given name
across multiple sessions.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `resource`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"RandomShuffleQueueV2", name, _ctx._post_execution_callbacks,
"component_types", component_types, "shapes", shapes, "capacity",
capacity, "min_after_dequeue", min_after_dequeue, "seed", seed,
"seed2", seed2, "container", container, "shared_name", shared_name)
return _result
except _core._FallbackException:
try:
return random_shuffle_queue_v2_eager_fallback(
component_types=component_types, shapes=shapes, capacity=capacity,
min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2,
container=container, shared_name=shared_name, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(component_types, (list, tuple)):
raise TypeError(
"Expected list for 'component_types' argument to "
"'random_shuffle_queue_v2' Op, not %r." % component_types)
component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
if shapes is None:
shapes = []
if not isinstance(shapes, (list, tuple)):
raise TypeError(
"Expected list for 'shapes' argument to "
"'random_shuffle_queue_v2' Op, not %r." % shapes)
shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
if capacity is None:
capacity = -1
capacity = _execute.make_int(capacity, "capacity")
if min_after_dequeue is None:
min_after_dequeue = 0
min_after_dequeue = _execute.make_int(min_after_dequeue, "min_after_dequeue")
if seed is None:
seed = 0
seed = _execute.make_int(seed, "seed")
if seed2 is None:
seed2 = 0
seed2 = _execute.make_int(seed2, "seed2")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"RandomShuffleQueueV2", component_types=component_types,
shapes=shapes, capacity=capacity,
min_after_dequeue=min_after_dequeue,
seed=seed, seed2=seed2, container=container,
shared_name=shared_name, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("component_types", _op.get_attr("component_types"), "shapes",
_op.get_attr("shapes"), "capacity", _op.get_attr("capacity"),
"min_after_dequeue", _op.get_attr("min_after_dequeue"), "seed",
_op.get_attr("seed"), "seed2", _op.get_attr("seed2"), "container",
_op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_execute.record_gradient(
"RandomShuffleQueueV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def RandomShuffleQueueV2(component_types, shapes=[], capacity=-1, min_after_dequeue=0, seed=0, seed2=0, container="", shared_name="", name=None):
return random_shuffle_queue_v2(component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name, name=name)
RandomShuffleQueueV2.__doc__ = random_shuffle_queue_v2.__doc__
RandomShuffleQueueV2 = _doc_controls.do_not_generate_docs(_kwarg_only(RandomShuffleQueueV2))
tf_export("raw_ops.RandomShuffleQueueV2")(RandomShuffleQueueV2)
def random_shuffle_queue_v2_eager_fallback(component_types, shapes=[], capacity=-1, min_after_dequeue=0, seed=0, seed2=0, container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function random_shuffle_queue_v2
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(component_types, (list, tuple)):
raise TypeError(
"Expected list for 'component_types' argument to "
"'random_shuffle_queue_v2' Op, not %r." % component_types)
component_types = [_execute.make_type(_t, "component_types") for _t in component_types]
if shapes is None:
shapes = []
if not isinstance(shapes, (list, tuple)):
raise TypeError(
"Expected list for 'shapes' argument to "
"'random_shuffle_queue_v2' Op, not %r." % shapes)
shapes = [_execute.make_shape(_s, "shapes") for _s in shapes]
if capacity is None:
capacity = -1
capacity = _execute.make_int(capacity, "capacity")
if min_after_dequeue is None:
min_after_dequeue = 0
min_after_dequeue = _execute.make_int(min_after_dequeue, "min_after_dequeue")
if seed is None:
seed = 0
seed = _execute.make_int(seed, "seed")
if seed2 is None:
seed2 = 0
seed2 = _execute.make_int(seed2, "seed2")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_inputs_flat = []
_attrs = ("component_types", component_types, "shapes", shapes, "capacity",
capacity, "min_after_dequeue", min_after_dequeue, "seed", seed, "seed2",
seed2, "container", container, "shared_name", shared_name)
_result = _execute.execute(b"RandomShuffleQueueV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"RandomShuffleQueueV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def record_input(file_pattern, file_random_seed=301, file_shuffle_shift_ratio=0, file_buffer_size=10000, file_parallelism=16, batch_size=32, compression_type="", name=None):
r"""Emits randomized records.
Args:
file_pattern: A `string`. Glob pattern for the data files.
file_random_seed: An optional `int`. Defaults to `301`.
Random seeds used to produce randomized records.
file_shuffle_shift_ratio: An optional `float`. Defaults to `0`.
Shifts the list of files after the list is randomly
shuffled.
file_buffer_size: An optional `int`. Defaults to `10000`.
The randomization shuffling buffer.
file_parallelism: An optional `int`. Defaults to `16`.
How many sstables are opened and concurrently iterated over.
batch_size: An optional `int`. Defaults to `32`. The batch size.
compression_type: An optional `string`. Defaults to `""`.
The type of compression for the file. Currently ZLIB and
GZIP are supported. Defaults to none.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"RecordInput", name, _ctx._post_execution_callbacks, "file_pattern",
file_pattern, "file_random_seed", file_random_seed,
"file_shuffle_shift_ratio", file_shuffle_shift_ratio,
"file_buffer_size", file_buffer_size, "file_parallelism",
file_parallelism, "batch_size", batch_size, "compression_type",
compression_type)
return _result
except _core._FallbackException:
try:
return record_input_eager_fallback(
file_pattern=file_pattern, file_random_seed=file_random_seed,
file_shuffle_shift_ratio=file_shuffle_shift_ratio,
file_buffer_size=file_buffer_size,
file_parallelism=file_parallelism, batch_size=batch_size,
compression_type=compression_type, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
file_pattern = _execute.make_str(file_pattern, "file_pattern")
if file_random_seed is None:
file_random_seed = 301
file_random_seed = _execute.make_int(file_random_seed, "file_random_seed")
if file_shuffle_shift_ratio is None:
file_shuffle_shift_ratio = 0
file_shuffle_shift_ratio = _execute.make_float(file_shuffle_shift_ratio, "file_shuffle_shift_ratio")
if file_buffer_size is None:
file_buffer_size = 10000
file_buffer_size = _execute.make_int(file_buffer_size, "file_buffer_size")
if file_parallelism is None:
file_parallelism = 16
file_parallelism = _execute.make_int(file_parallelism, "file_parallelism")
if batch_size is None:
batch_size = 32
batch_size = _execute.make_int(batch_size, "batch_size")
if compression_type is None:
compression_type = ""
compression_type = _execute.make_str(compression_type, "compression_type")
_, _, _op = _op_def_lib._apply_op_helper(
"RecordInput", file_pattern=file_pattern,
file_random_seed=file_random_seed,
file_shuffle_shift_ratio=file_shuffle_shift_ratio,
file_buffer_size=file_buffer_size,
file_parallelism=file_parallelism,
batch_size=batch_size,
compression_type=compression_type, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("file_pattern", _op.get_attr("file_pattern"), "file_random_seed",
_op.get_attr("file_random_seed"), "file_shuffle_shift_ratio",
_op.get_attr("file_shuffle_shift_ratio"), "file_buffer_size",
_op.get_attr("file_buffer_size"), "file_parallelism",
_op.get_attr("file_parallelism"), "batch_size",
_op.get_attr("batch_size"), "compression_type",
_op.get_attr("compression_type"))
_execute.record_gradient(
"RecordInput", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def RecordInput(file_pattern, file_random_seed=301, file_shuffle_shift_ratio=0, file_buffer_size=10000, file_parallelism=16, batch_size=32, compression_type="", name=None):
return record_input(file_pattern=file_pattern, file_random_seed=file_random_seed, file_shuffle_shift_ratio=file_shuffle_shift_ratio, file_buffer_size=file_buffer_size, file_parallelism=file_parallelism, batch_size=batch_size, compression_type=compression_type, name=name)
RecordInput.__doc__ = record_input.__doc__
RecordInput = _doc_controls.do_not_generate_docs(_kwarg_only(RecordInput))
tf_export("raw_ops.RecordInput")(RecordInput)
def record_input_eager_fallback(file_pattern, file_random_seed=301, file_shuffle_shift_ratio=0, file_buffer_size=10000, file_parallelism=16, batch_size=32, compression_type="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function record_input
"""
_ctx = ctx if ctx else _context.context()
file_pattern = _execute.make_str(file_pattern, "file_pattern")
if file_random_seed is None:
file_random_seed = 301
file_random_seed = _execute.make_int(file_random_seed, "file_random_seed")
if file_shuffle_shift_ratio is None:
file_shuffle_shift_ratio = 0
file_shuffle_shift_ratio = _execute.make_float(file_shuffle_shift_ratio, "file_shuffle_shift_ratio")
if file_buffer_size is None:
file_buffer_size = 10000
file_buffer_size = _execute.make_int(file_buffer_size, "file_buffer_size")
if file_parallelism is None:
file_parallelism = 16
file_parallelism = _execute.make_int(file_parallelism, "file_parallelism")
if batch_size is None:
batch_size = 32
batch_size = _execute.make_int(batch_size, "batch_size")
if compression_type is None:
compression_type = ""
compression_type = _execute.make_str(compression_type, "compression_type")
_inputs_flat = []
_attrs = ("file_pattern", file_pattern, "file_random_seed",
file_random_seed, "file_shuffle_shift_ratio", file_shuffle_shift_ratio,
"file_buffer_size", file_buffer_size, "file_parallelism", file_parallelism,
"batch_size", batch_size, "compression_type", compression_type)
_result = _execute.execute(b"RecordInput", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"RecordInput", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_accumulator_apply_gradient(handle, local_step, gradient_indices, gradient_values, gradient_shape, has_known_shape, name=None):
r"""Applies a sparse gradient to a given accumulator.
Does not add if local_step is smaller than the accumulator's
global_step.
Args:
handle: A `Tensor` of type mutable `string`. The handle to a accumulator.
local_step: A `Tensor` of type `int64`.
The local_step value at which the sparse gradient was computed.
gradient_indices: A `Tensor` of type `int64`.
Indices of the sparse gradient to be accumulated. Must be a
vector.
gradient_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Values are the non-zero slices of the gradient, and must have
the same first dimension as indices, i.e., the nnz represented by indices and
values must be consistent.
gradient_shape: A `Tensor` of type `int64`.
Shape of the sparse gradient to be accumulated.
has_known_shape: A `bool`.
Boolean indicating whether gradient_shape is unknown, in which
case the input is ignored during validation.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("sparse_accumulator_apply_gradient op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
has_known_shape = _execute.make_bool(has_known_shape, "has_known_shape")
_, _, _op = _op_def_lib._apply_op_helper(
"SparseAccumulatorApplyGradient", handle=handle,
local_step=local_step,
gradient_indices=gradient_indices,
gradient_values=gradient_values,
gradient_shape=gradient_shape,
has_known_shape=has_known_shape,
name=name)
return _op
_result = None
return _result
def SparseAccumulatorApplyGradient(handle, local_step, gradient_indices, gradient_values, gradient_shape, has_known_shape, name=None):
return sparse_accumulator_apply_gradient(handle=handle, local_step=local_step, gradient_indices=gradient_indices, gradient_values=gradient_values, gradient_shape=gradient_shape, has_known_shape=has_known_shape, name=name)
SparseAccumulatorApplyGradient.__doc__ = sparse_accumulator_apply_gradient.__doc__
SparseAccumulatorApplyGradient = _doc_controls.do_not_generate_docs(_kwarg_only(SparseAccumulatorApplyGradient))
tf_export("raw_ops.SparseAccumulatorApplyGradient")(SparseAccumulatorApplyGradient)
def sparse_accumulator_apply_gradient_eager_fallback(handle, local_step, gradient_indices, gradient_values, gradient_shape, has_known_shape, name=None, ctx=None):
raise RuntimeError("sparse_accumulator_apply_gradient op does not support eager execution. Arg 'handle' is a ref.")
_sparse_accumulator_take_gradient_outputs = ["indices", "values", "shape"]
_SparseAccumulatorTakeGradientOutput = _collections.namedtuple(
"SparseAccumulatorTakeGradient",
_sparse_accumulator_take_gradient_outputs)
def sparse_accumulator_take_gradient(handle, num_required, dtype, name=None):
r"""Extracts the average sparse gradient in a SparseConditionalAccumulator.
The op will blocks until sufficient (i.e., more than num_required)
gradients have been accumulated. If the accumulator has already
aggregated more than num_required gradients, it will return its
average of the accumulated gradients. Also automatically increments
the recorded global_step in the accumulator by 1, and resets the
aggregate to 0.
Args:
handle: A `Tensor` of type mutable `string`.
The handle to a SparseConditionalAccumulator.
num_required: A `Tensor` of type `int32`.
Number of gradients required before we return an aggregate.
dtype: A `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8, tf.complex64, tf.int64, tf.qint8, tf.quint8, tf.qint32, tf.bfloat16, tf.uint16, tf.complex128, tf.half, tf.uint32, tf.uint64`.
The data type of accumulated gradients. Needs to correspond to the type
of the accumulator.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (indices, values, shape).
indices: A `Tensor` of type `int64`.
values: A `Tensor` of type `dtype`.
shape: A `Tensor` of type `int64`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("sparse_accumulator_take_gradient op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
dtype = _execute.make_type(dtype, "dtype")
_, _, _op = _op_def_lib._apply_op_helper(
"SparseAccumulatorTakeGradient", handle=handle,
num_required=num_required,
dtype=dtype, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("dtype", _op.get_attr("dtype"))
_execute.record_gradient(
"SparseAccumulatorTakeGradient", _inputs_flat, _attrs, _result, name)
_result = _SparseAccumulatorTakeGradientOutput._make(_result)
return _result
def SparseAccumulatorTakeGradient(handle, num_required, dtype, name=None):
return sparse_accumulator_take_gradient(handle=handle, num_required=num_required, dtype=dtype, name=name)
SparseAccumulatorTakeGradient.__doc__ = sparse_accumulator_take_gradient.__doc__
SparseAccumulatorTakeGradient = _doc_controls.do_not_generate_docs(_kwarg_only(SparseAccumulatorTakeGradient))
tf_export("raw_ops.SparseAccumulatorTakeGradient")(SparseAccumulatorTakeGradient)
def sparse_accumulator_take_gradient_eager_fallback(handle, num_required, dtype, name=None, ctx=None):
raise RuntimeError("sparse_accumulator_take_gradient op does not support eager execution. Arg 'handle' is a ref.")
def sparse_conditional_accumulator(dtype, shape, container="", shared_name="", reduction_type="MEAN", name=None):
r"""A conditional accumulator for aggregating sparse gradients.
The accumulator accepts gradients marked with local_step greater or
equal to the most recent global_step known to the accumulator. The
average can be extracted from the accumulator, provided sufficient
gradients have been accumulated. Extracting the average automatically
resets the aggregate to 0, and increments the global_step recorded by
the accumulator.
Args:
dtype: A `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8, tf.complex64, tf.int64, tf.qint8, tf.quint8, tf.qint32, tf.bfloat16, tf.uint16, tf.complex128, tf.half, tf.uint32, tf.uint64`.
The type of the value being accumulated.
shape: A `tf.TensorShape` or list of `ints`. The shape of the values.
container: An optional `string`. Defaults to `""`.
If non-empty, this accumulator is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional `string`. Defaults to `""`.
If non-empty, this accumulator will be shared under the given name
across multiple sessions.
reduction_type: An optional `string` from: `"MEAN", "SUM"`. Defaults to `"MEAN"`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type mutable `string`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("sparse_conditional_accumulator op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
dtype = _execute.make_type(dtype, "dtype")
shape = _execute.make_shape(shape, "shape")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
if reduction_type is None:
reduction_type = "MEAN"
reduction_type = _execute.make_str(reduction_type, "reduction_type")
_, _, _op = _op_def_lib._apply_op_helper(
"SparseConditionalAccumulator", dtype=dtype, shape=shape,
container=container,
shared_name=shared_name,
reduction_type=reduction_type,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("dtype", _op.get_attr("dtype"), "shape", _op.get_attr("shape"),
"container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"), "reduction_type",
_op.get_attr("reduction_type"))
_execute.record_gradient(
"SparseConditionalAccumulator", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def SparseConditionalAccumulator(dtype, shape, container="", shared_name="", reduction_type="MEAN", name=None):
return sparse_conditional_accumulator(dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type, name=name)
SparseConditionalAccumulator.__doc__ = sparse_conditional_accumulator.__doc__
SparseConditionalAccumulator = _doc_controls.do_not_generate_docs(_kwarg_only(SparseConditionalAccumulator))
tf_export("raw_ops.SparseConditionalAccumulator")(SparseConditionalAccumulator)
def sparse_conditional_accumulator_eager_fallback(dtype, shape, container="", shared_name="", reduction_type="MEAN", name=None, ctx=None):
raise RuntimeError("sparse_conditional_accumulator op does not support eager execution. Arg 'handle' is a ref.")
def _stack(elem_type, stack_name="", name=None):
r"""Deprecated, use StackV2.
Args:
elem_type: A `tf.DType`.
stack_name: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type mutable `string`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("stack op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
elem_type = _execute.make_type(elem_type, "elem_type")
if stack_name is None:
stack_name = ""
stack_name = _execute.make_str(stack_name, "stack_name")
_, _, _op = _op_def_lib._apply_op_helper(
"Stack", elem_type=elem_type, stack_name=stack_name, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("elem_type", _op.get_attr("elem_type"), "stack_name",
_op.get_attr("stack_name"))
_execute.record_gradient(
"Stack", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def Stack(elem_type, stack_name="", name=None):
return _stack(elem_type=elem_type, stack_name=stack_name, name=name)
Stack.__doc__ = _stack.__doc__
Stack = _doc_controls.do_not_generate_docs(_kwarg_only(Stack))
tf_export("raw_ops.Stack")(Stack)
def _stack_eager_fallback(elem_type, stack_name="", name=None, ctx=None):
raise RuntimeError("stack op does not support eager execution. Arg 'handle' is a ref.")
def stack_close(handle, name=None):
r"""Deprecated, use StackCloseV2.
Args:
handle: A `Tensor` of type mutable `string`.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("stack_close op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"StackClose", handle=handle, name=name)
return _op
_result = None
return _result
def StackClose(handle, name=None):
return stack_close(handle=handle, name=name)
StackClose.__doc__ = stack_close.__doc__
StackClose = _doc_controls.do_not_generate_docs(_kwarg_only(StackClose))
tf_export("raw_ops.StackClose")(StackClose)
def stack_close_eager_fallback(handle, name=None, ctx=None):
raise RuntimeError("stack_close op does not support eager execution. Arg 'handle' is a ref.")
def stack_close_v2(handle, name=None):
r"""Delete the stack from its resource container.
Args:
handle: A `Tensor` of type `resource`. The handle to a stack.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"StackCloseV2", name, _ctx._post_execution_callbacks, handle)
return _result
except _core._FallbackException:
try:
return stack_close_v2_eager_fallback(
handle, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"StackCloseV2", handle=handle, name=name)
return _op
_result = None
return _result
def StackCloseV2(handle, name=None):
return stack_close_v2(handle=handle, name=name)
StackCloseV2.__doc__ = stack_close_v2.__doc__
StackCloseV2 = _doc_controls.do_not_generate_docs(_kwarg_only(StackCloseV2))
tf_export("raw_ops.StackCloseV2")(StackCloseV2)
def stack_close_v2_eager_fallback(handle, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function stack_close_v2
"""
_ctx = ctx if ctx else _context.context()
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
_inputs_flat = [handle]
_attrs = None
_result = _execute.execute(b"StackCloseV2", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
def stack_pop(handle, elem_type, name=None):
r"""Deprecated, use StackPopV2.
Args:
handle: A `Tensor` of type mutable `string`.
elem_type: A `tf.DType`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `elem_type`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("stack_pop op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
elem_type = _execute.make_type(elem_type, "elem_type")
_, _, _op = _op_def_lib._apply_op_helper(
"StackPop", handle=handle, elem_type=elem_type, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("elem_type", _op.get_attr("elem_type"))
_execute.record_gradient(
"StackPop", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def StackPop(handle, elem_type, name=None):
return stack_pop(handle=handle, elem_type=elem_type, name=name)
StackPop.__doc__ = stack_pop.__doc__
StackPop = _doc_controls.do_not_generate_docs(_kwarg_only(StackPop))
tf_export("raw_ops.StackPop")(StackPop)
def stack_pop_eager_fallback(handle, elem_type, name=None, ctx=None):
raise RuntimeError("stack_pop op does not support eager execution. Arg 'handle' is a ref.")
def stack_pop_v2(handle, elem_type, name=None):
r"""Pop the element at the top of the stack.
Args:
handle: A `Tensor` of type `resource`. The handle to a stack.
elem_type: A `tf.DType`. The type of the elem that is popped.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `elem_type`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"StackPopV2", name, _ctx._post_execution_callbacks, handle,
"elem_type", elem_type)
return _result
except _core._FallbackException:
try:
return stack_pop_v2_eager_fallback(
handle, elem_type=elem_type, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
elem_type = _execute.make_type(elem_type, "elem_type")
_, _, _op = _op_def_lib._apply_op_helper(
"StackPopV2", handle=handle, elem_type=elem_type, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("elem_type", _op.get_attr("elem_type"))
_execute.record_gradient(
"StackPopV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def StackPopV2(handle, elem_type, name=None):
return stack_pop_v2(handle=handle, elem_type=elem_type, name=name)
StackPopV2.__doc__ = stack_pop_v2.__doc__
StackPopV2 = _doc_controls.do_not_generate_docs(_kwarg_only(StackPopV2))
tf_export("raw_ops.StackPopV2")(StackPopV2)
def stack_pop_v2_eager_fallback(handle, elem_type, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function stack_pop_v2
"""
_ctx = ctx if ctx else _context.context()
elem_type = _execute.make_type(elem_type, "elem_type")
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
_inputs_flat = [handle]
_attrs = ("elem_type", elem_type)
_result = _execute.execute(b"StackPopV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"StackPopV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def stack_push(handle, elem, swap_memory=False, name=None):
r"""Deprecated, use StackPushV2.
Args:
handle: A `Tensor` of type mutable `string`.
elem: A `Tensor`.
swap_memory: An optional `bool`. Defaults to `False`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `elem`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("stack_push op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
if swap_memory is None:
swap_memory = False
swap_memory = _execute.make_bool(swap_memory, "swap_memory")
_, _, _op = _op_def_lib._apply_op_helper(
"StackPush", handle=handle, elem=elem, swap_memory=swap_memory,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "swap_memory",
_op.get_attr("swap_memory"))
_execute.record_gradient(
"StackPush", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def StackPush(handle, elem, swap_memory=False, name=None):
return stack_push(handle=handle, elem=elem, swap_memory=swap_memory, name=name)
StackPush.__doc__ = stack_push.__doc__
StackPush = _doc_controls.do_not_generate_docs(_kwarg_only(StackPush))
tf_export("raw_ops.StackPush")(StackPush)
def stack_push_eager_fallback(handle, elem, swap_memory=False, name=None, ctx=None):
raise RuntimeError("stack_push op does not support eager execution. Arg 'handle' is a ref.")
def stack_push_v2(handle, elem, swap_memory=False, name=None):
r"""Push an element onto the stack.
Args:
handle: A `Tensor` of type `resource`. The handle to a stack.
elem: A `Tensor`. The tensor to be pushed onto the stack.
swap_memory: An optional `bool`. Defaults to `False`.
Swap `elem` to CPU. Default to false.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `elem`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"StackPushV2", name, _ctx._post_execution_callbacks, handle, elem,
"swap_memory", swap_memory)
return _result
except _core._FallbackException:
try:
return stack_push_v2_eager_fallback(
handle, elem, swap_memory=swap_memory, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if swap_memory is None:
swap_memory = False
swap_memory = _execute.make_bool(swap_memory, "swap_memory")
_, _, _op = _op_def_lib._apply_op_helper(
"StackPushV2", handle=handle, elem=elem, swap_memory=swap_memory,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "swap_memory",
_op.get_attr("swap_memory"))
_execute.record_gradient(
"StackPushV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def StackPushV2(handle, elem, swap_memory=False, name=None):
return stack_push_v2(handle=handle, elem=elem, swap_memory=swap_memory, name=name)
StackPushV2.__doc__ = stack_push_v2.__doc__
StackPushV2 = _doc_controls.do_not_generate_docs(_kwarg_only(StackPushV2))
tf_export("raw_ops.StackPushV2")(StackPushV2)
def stack_push_v2_eager_fallback(handle, elem, swap_memory=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function stack_push_v2
"""
_ctx = ctx if ctx else _context.context()
if swap_memory is None:
swap_memory = False
swap_memory = _execute.make_bool(swap_memory, "swap_memory")
_attr_T, (elem,) = _execute.args_to_matching_eager([elem], _ctx)
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
_inputs_flat = [handle, elem]
_attrs = ("T", _attr_T, "swap_memory", swap_memory)
_result = _execute.execute(b"StackPushV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"StackPushV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def stack_v2(max_size, elem_type, stack_name="", name=None):
r"""A stack that produces elements in first-in last-out order.
Args:
max_size: A `Tensor` of type `int32`.
The maximum size of the stack if non-negative. If negative, the stack
size is unlimited.
elem_type: A `tf.DType`. The type of the elements on the stack.
stack_name: An optional `string`. Defaults to `""`.
Overrides the name used for the temporary stack resource. Default
value is the name of the 'Stack' op (which is guaranteed unique).
name: A name for the operation (optional).
Returns:
A `Tensor` of type `resource`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "StackV2",
name, _ctx._post_execution_callbacks, max_size, "elem_type",
elem_type, "stack_name", stack_name)
return _result
except _core._FallbackException:
try:
return stack_v2_eager_fallback(
max_size, elem_type=elem_type, stack_name=stack_name, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
elem_type = _execute.make_type(elem_type, "elem_type")
if stack_name is None:
stack_name = ""
stack_name = _execute.make_str(stack_name, "stack_name")
_, _, _op = _op_def_lib._apply_op_helper(
"StackV2", max_size=max_size, elem_type=elem_type,
stack_name=stack_name, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("elem_type", _op.get_attr("elem_type"), "stack_name",
_op.get_attr("stack_name"))
_execute.record_gradient(
"StackV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def StackV2(max_size, elem_type, stack_name="", name=None):
return stack_v2(max_size=max_size, elem_type=elem_type, stack_name=stack_name, name=name)
StackV2.__doc__ = stack_v2.__doc__
StackV2 = _doc_controls.do_not_generate_docs(_kwarg_only(StackV2))
tf_export("raw_ops.StackV2")(StackV2)
def stack_v2_eager_fallback(max_size, elem_type, stack_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function stack_v2
"""
_ctx = ctx if ctx else _context.context()
elem_type = _execute.make_type(elem_type, "elem_type")
if stack_name is None:
stack_name = ""
stack_name = _execute.make_str(stack_name, "stack_name")
max_size = _ops.convert_to_tensor(max_size, _dtypes.int32)
_inputs_flat = [max_size]
_attrs = ("elem_type", elem_type, "stack_name", stack_name)
_result = _execute.execute(b"StackV2", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"StackV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def stage(values, capacity=0, memory_limit=0, container="", shared_name="", name=None):
r"""Stage values similar to a lightweight Enqueue.
The basic functionality of this Op is similar to a queue with many
fewer capabilities and options. This Op is optimized for performance.
Args:
values: A list of `Tensor` objects. a list of tensors
dtypes A list of data types that inserted values should adhere to.
capacity: An optional `int` that is `>= 0`. Defaults to `0`.
Maximum number of elements in the Staging Area. If > 0, inserts
on the container will block when the capacity is reached.
memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
The maximum number of bytes allowed for Tensors in the Staging Area.
If > 0, inserts will block until sufficient space is available.
container: An optional `string`. Defaults to `""`.
If non-empty, this queue is placed in the given container. Otherwise,
a default container is used.
shared_name: An optional `string`. Defaults to `""`.
It is necessary to match this name to the matching Unstage Op.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "Stage",
name, _ctx._post_execution_callbacks, values, "capacity", capacity,
"memory_limit", memory_limit, "container", container, "shared_name",
shared_name)
return _result
except _core._FallbackException:
try:
return stage_eager_fallback(
values, capacity=capacity, memory_limit=memory_limit,
container=container, shared_name=shared_name, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"Stage", values=values, capacity=capacity, memory_limit=memory_limit,
container=container, shared_name=shared_name, name=name)
return _op
_result = None
return _result
def Stage(values, capacity=0, memory_limit=0, container="", shared_name="", name=None):
return stage(values=values, capacity=capacity, memory_limit=memory_limit, container=container, shared_name=shared_name, name=name)
Stage.__doc__ = stage.__doc__
Stage = _doc_controls.do_not_generate_docs(_kwarg_only(Stage))
tf_export("raw_ops.Stage")(Stage)
def stage_eager_fallback(values, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function stage
"""
_ctx = ctx if ctx else _context.context()
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_attr_dtypes, values = _execute.convert_to_mixed_eager_tensors(values, _ctx)
_inputs_flat = list(values)
_attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
_attr_dtypes, "container", container, "shared_name", shared_name)
_result = _execute.execute(b"Stage", 0, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_result = None
return _result
def stage_clear(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
r"""Op removes all elements in the underlying container.
Args:
dtypes: A list of `tf.DTypes`.
capacity: An optional `int` that is `>= 0`. Defaults to `0`.
memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
container: An optional `string`. Defaults to `""`.
shared_name: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"StageClear", name, _ctx._post_execution_callbacks, "capacity",
capacity, "memory_limit", memory_limit, "dtypes", dtypes, "container",
container, "shared_name", shared_name)
return _result
except _core._FallbackException:
try:
return stage_clear_eager_fallback(
capacity=capacity, memory_limit=memory_limit, dtypes=dtypes,
container=container, shared_name=shared_name, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'stage_clear' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"StageClear", dtypes=dtypes, capacity=capacity,
memory_limit=memory_limit, container=container,
shared_name=shared_name, name=name)
return _op
_result = None
return _result
def StageClear(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
return stage_clear(dtypes=dtypes, capacity=capacity, memory_limit=memory_limit, container=container, shared_name=shared_name, name=name)
StageClear.__doc__ = stage_clear.__doc__
StageClear = _doc_controls.do_not_generate_docs(_kwarg_only(StageClear))
tf_export("raw_ops.StageClear")(StageClear)
def stage_clear_eager_fallback(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function stage_clear
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'stage_clear' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_inputs_flat = []
_attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
dtypes, "container", container, "shared_name", shared_name)
_result = _execute.execute(b"StageClear", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
def stage_peek(index, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
r"""Op peeks at the values at the specified index. If the
underlying container does not contain sufficient elements
this op will block until it does. This Op is optimized for
performance.
Args:
index: A `Tensor` of type `int32`.
dtypes: A list of `tf.DTypes` that has length `>= 1`.
capacity: An optional `int` that is `>= 0`. Defaults to `0`.
memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
container: An optional `string`. Defaults to `""`.
shared_name: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects of type `dtypes`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"StagePeek", name, _ctx._post_execution_callbacks, index, "capacity",
capacity, "memory_limit", memory_limit, "dtypes", dtypes, "container",
container, "shared_name", shared_name)
return _result
except _core._FallbackException:
try:
return stage_peek_eager_fallback(
index, capacity=capacity, memory_limit=memory_limit,
dtypes=dtypes, container=container, shared_name=shared_name,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'stage_peek' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"StagePeek", index=index, dtypes=dtypes, capacity=capacity,
memory_limit=memory_limit, container=container,
shared_name=shared_name, name=name)
_result = _op.outputs[:]
if not _result:
return _op
_inputs_flat = _op.inputs
_attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
_op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
"container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_execute.record_gradient(
"StagePeek", _inputs_flat, _attrs, _result, name)
return _result
def StagePeek(index, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
return stage_peek(index=index, dtypes=dtypes, capacity=capacity, memory_limit=memory_limit, container=container, shared_name=shared_name, name=name)
StagePeek.__doc__ = stage_peek.__doc__
StagePeek = _doc_controls.do_not_generate_docs(_kwarg_only(StagePeek))
tf_export("raw_ops.StagePeek")(StagePeek)
def stage_peek_eager_fallback(index, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function stage_peek
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'stage_peek' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
index = _ops.convert_to_tensor(index, _dtypes.int32)
_inputs_flat = [index]
_attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
dtypes, "container", container, "shared_name", shared_name)
_result = _execute.execute(b"StagePeek", len(dtypes), inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"StagePeek", _inputs_flat, _attrs, _result, name)
return _result
def stage_size(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
r"""Op returns the number of elements in the underlying container.
Args:
dtypes: A list of `tf.DTypes`.
capacity: An optional `int` that is `>= 0`. Defaults to `0`.
memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
container: An optional `string`. Defaults to `""`.
shared_name: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"StageSize", name, _ctx._post_execution_callbacks, "capacity",
capacity, "memory_limit", memory_limit, "dtypes", dtypes, "container",
container, "shared_name", shared_name)
return _result
except _core._FallbackException:
try:
return stage_size_eager_fallback(
capacity=capacity, memory_limit=memory_limit, dtypes=dtypes,
container=container, shared_name=shared_name, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'stage_size' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"StageSize", dtypes=dtypes, capacity=capacity,
memory_limit=memory_limit, container=container,
shared_name=shared_name, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
_op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
"container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_execute.record_gradient(
"StageSize", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def StageSize(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
return stage_size(dtypes=dtypes, capacity=capacity, memory_limit=memory_limit, container=container, shared_name=shared_name, name=name)
StageSize.__doc__ = stage_size.__doc__
StageSize = _doc_controls.do_not_generate_docs(_kwarg_only(StageSize))
tf_export("raw_ops.StageSize")(StageSize)
def stage_size_eager_fallback(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function stage_size
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'stage_size' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_inputs_flat = []
_attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
dtypes, "container", container, "shared_name", shared_name)
_result = _execute.execute(b"StageSize", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"StageSize", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def tensor_array(size, dtype, dynamic_size=False, clear_after_read=True, tensor_array_name="", element_shape=None, name=None):
r"""TODO: add doc.
Args:
size: A `Tensor` of type `int32`.
dtype: A `tf.DType`.
dynamic_size: An optional `bool`. Defaults to `False`.
clear_after_read: An optional `bool`. Defaults to `True`.
tensor_array_name: An optional `string`. Defaults to `""`.
element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type mutable `string`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("tensor_array op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
dtype = _execute.make_type(dtype, "dtype")
if dynamic_size is None:
dynamic_size = False
dynamic_size = _execute.make_bool(dynamic_size, "dynamic_size")
if clear_after_read is None:
clear_after_read = True
clear_after_read = _execute.make_bool(clear_after_read, "clear_after_read")
if tensor_array_name is None:
tensor_array_name = ""
tensor_array_name = _execute.make_str(tensor_array_name, "tensor_array_name")
if element_shape is None:
element_shape = None
element_shape = _execute.make_shape(element_shape, "element_shape")
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArray", size=size, dtype=dtype, dynamic_size=dynamic_size,
clear_after_read=clear_after_read,
tensor_array_name=tensor_array_name,
element_shape=element_shape, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("dtype", _op.get_attr("dtype"), "dynamic_size",
_op.get_attr("dynamic_size"), "clear_after_read",
_op.get_attr("clear_after_read"), "tensor_array_name",
_op.get_attr("tensor_array_name"), "element_shape",
_op.get_attr("element_shape"))
_execute.record_gradient(
"TensorArray", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArray(size, dtype, dynamic_size=False, clear_after_read=True, tensor_array_name="", element_shape=None, name=None):
return tensor_array(size=size, dtype=dtype, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name, element_shape=element_shape, name=name)
TensorArray.__doc__ = tensor_array.__doc__
TensorArray = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArray))
tf_export("raw_ops.TensorArray")(TensorArray)
def tensor_array_eager_fallback(size, dtype, dynamic_size=False, clear_after_read=True, tensor_array_name="", element_shape=None, name=None, ctx=None):
raise RuntimeError("tensor_array op does not support eager execution. Arg 'handle' is a ref.")
def tensor_array_close(handle, name=None):
r"""TODO: add doc.
Args:
handle: A `Tensor` of type mutable `string`.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("tensor_array_close op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayClose", handle=handle, name=name)
return _op
_result = None
return _result
def TensorArrayClose(handle, name=None):
return tensor_array_close(handle=handle, name=name)
TensorArrayClose.__doc__ = tensor_array_close.__doc__
TensorArrayClose = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayClose))
tf_export("raw_ops.TensorArrayClose")(TensorArrayClose)
def tensor_array_close_eager_fallback(handle, name=None, ctx=None):
raise RuntimeError("tensor_array_close op does not support eager execution. Arg 'handle' is a ref.")
def tensor_array_close_v2(handle, name=None):
r"""Deprecated. Use TensorArrayCloseV3
Args:
handle: A `Tensor` of type `string`.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"TensorArrayCloseV2", name, _ctx._post_execution_callbacks, handle)
return _result
except _core._FallbackException:
try:
return tensor_array_close_v2_eager_fallback(
handle, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayCloseV2", handle=handle, name=name)
return _op
_result = None
return _result
def TensorArrayCloseV2(handle, name=None):
return tensor_array_close_v2(handle=handle, name=name)
TensorArrayCloseV2.__doc__ = tensor_array_close_v2.__doc__
TensorArrayCloseV2 = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayCloseV2))
tf_export("raw_ops.TensorArrayCloseV2")(TensorArrayCloseV2)
def tensor_array_close_v2_eager_fallback(handle, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tensor_array_close_v2
"""
_ctx = ctx if ctx else _context.context()
handle = _ops.convert_to_tensor(handle, _dtypes.string)
_inputs_flat = [handle]
_attrs = None
_result = _execute.execute(b"TensorArrayCloseV2", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
def tensor_array_close_v3(handle, name=None):
r"""Delete the TensorArray from its resource container.
This enables the user to close and release the resource in the middle
of a step/run.
Args:
handle: A `Tensor` of type `resource`.
The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"TensorArrayCloseV3", name, _ctx._post_execution_callbacks, handle)
return _result
except _core._FallbackException:
try:
return tensor_array_close_v3_eager_fallback(
handle, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayCloseV3", handle=handle, name=name)
return _op
_result = None
return _result
def TensorArrayCloseV3(handle, name=None):
return tensor_array_close_v3(handle=handle, name=name)
TensorArrayCloseV3.__doc__ = tensor_array_close_v3.__doc__
TensorArrayCloseV3 = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayCloseV3))
tf_export("raw_ops.TensorArrayCloseV3")(TensorArrayCloseV3)
def tensor_array_close_v3_eager_fallback(handle, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tensor_array_close_v3
"""
_ctx = ctx if ctx else _context.context()
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
_inputs_flat = [handle]
_attrs = None
_result = _execute.execute(b"TensorArrayCloseV3", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
_tensor_array_concat_outputs = ["value", "lengths"]
_TensorArrayConcatOutput = _collections.namedtuple(
"TensorArrayConcat", _tensor_array_concat_outputs)
def tensor_array_concat(handle, flow_in, dtype, element_shape_except0=None, name=None):
r"""TODO: add doc.
Args:
handle: A `Tensor` of type mutable `string`.
flow_in: A `Tensor` of type `float32`.
dtype: A `tf.DType`.
element_shape_except0: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (value, lengths).
value: A `Tensor` of type `dtype`.
lengths: A `Tensor` of type `int64`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("tensor_array_concat op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
dtype = _execute.make_type(dtype, "dtype")
if element_shape_except0 is None:
element_shape_except0 = None
element_shape_except0 = _execute.make_shape(element_shape_except0, "element_shape_except0")
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayConcat", handle=handle, flow_in=flow_in, dtype=dtype,
element_shape_except0=element_shape_except0,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("dtype", _op.get_attr("dtype"), "element_shape_except0",
_op.get_attr("element_shape_except0"))
_execute.record_gradient(
"TensorArrayConcat", _inputs_flat, _attrs, _result, name)
_result = _TensorArrayConcatOutput._make(_result)
return _result
def TensorArrayConcat(handle, flow_in, dtype, element_shape_except0=None, name=None):
return tensor_array_concat(handle=handle, flow_in=flow_in, dtype=dtype, element_shape_except0=element_shape_except0, name=name)
TensorArrayConcat.__doc__ = tensor_array_concat.__doc__
TensorArrayConcat = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayConcat))
tf_export("raw_ops.TensorArrayConcat")(TensorArrayConcat)
def tensor_array_concat_eager_fallback(handle, flow_in, dtype, element_shape_except0=None, name=None, ctx=None):
raise RuntimeError("tensor_array_concat op does not support eager execution. Arg 'handle' is a ref.")
_tensor_array_concat_v2_outputs = ["value", "lengths"]
_TensorArrayConcatV2Output = _collections.namedtuple(
"TensorArrayConcatV2", _tensor_array_concat_v2_outputs)
def tensor_array_concat_v2(handle, flow_in, dtype, element_shape_except0=None, name=None):
r"""Deprecated. Use TensorArrayConcatV3
Args:
handle: A `Tensor` of type `string`.
flow_in: A `Tensor` of type `float32`.
dtype: A `tf.DType`.
element_shape_except0: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (value, lengths).
value: A `Tensor` of type `dtype`.
lengths: A `Tensor` of type `int64`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"TensorArrayConcatV2", name, _ctx._post_execution_callbacks, handle,
flow_in, "dtype", dtype, "element_shape_except0",
element_shape_except0)
_result = _TensorArrayConcatV2Output._make(_result)
return _result
except _core._FallbackException:
try:
return tensor_array_concat_v2_eager_fallback(
handle, flow_in, dtype=dtype,
element_shape_except0=element_shape_except0, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
dtype = _execute.make_type(dtype, "dtype")
if element_shape_except0 is None:
element_shape_except0 = None
element_shape_except0 = _execute.make_shape(element_shape_except0, "element_shape_except0")
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayConcatV2", handle=handle, flow_in=flow_in, dtype=dtype,
element_shape_except0=element_shape_except0,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("dtype", _op.get_attr("dtype"), "element_shape_except0",
_op.get_attr("element_shape_except0"))
_execute.record_gradient(
"TensorArrayConcatV2", _inputs_flat, _attrs, _result, name)
_result = _TensorArrayConcatV2Output._make(_result)
return _result
def TensorArrayConcatV2(handle, flow_in, dtype, element_shape_except0=None, name=None):
return tensor_array_concat_v2(handle=handle, flow_in=flow_in, dtype=dtype, element_shape_except0=element_shape_except0, name=name)
TensorArrayConcatV2.__doc__ = tensor_array_concat_v2.__doc__
TensorArrayConcatV2 = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayConcatV2))
tf_export("raw_ops.TensorArrayConcatV2")(TensorArrayConcatV2)
def tensor_array_concat_v2_eager_fallback(handle, flow_in, dtype, element_shape_except0=None, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tensor_array_concat_v2
"""
_ctx = ctx if ctx else _context.context()
dtype = _execute.make_type(dtype, "dtype")
if element_shape_except0 is None:
element_shape_except0 = None
element_shape_except0 = _execute.make_shape(element_shape_except0, "element_shape_except0")
handle = _ops.convert_to_tensor(handle, _dtypes.string)
flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
_inputs_flat = [handle, flow_in]
_attrs = ("dtype", dtype, "element_shape_except0", element_shape_except0)
_result = _execute.execute(b"TensorArrayConcatV2", 2, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TensorArrayConcatV2", _inputs_flat, _attrs, _result, name)
_result = _TensorArrayConcatV2Output._make(_result)
return _result
_tensor_array_concat_v3_outputs = ["value", "lengths"]
_TensorArrayConcatV3Output = _collections.namedtuple(
"TensorArrayConcatV3", _tensor_array_concat_v3_outputs)
def tensor_array_concat_v3(handle, flow_in, dtype, element_shape_except0=None, name=None):
r"""Concat the elements from the TensorArray into value `value`.
Takes `T` elements of shapes
```
(n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
```
and concatenates them into a Tensor of shape:
```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```
All elements must have the same shape (excepting the first dimension).
Args:
handle: A `Tensor` of type `resource`. The handle to a TensorArray.
flow_in: A `Tensor` of type `float32`.
A float scalar that enforces proper chaining of operations.
dtype: A `tf.DType`. The type of the elem that is returned.
element_shape_except0: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.
The expected shape of an element, if known,
excluding the first dimension. Used to validate the shapes of
TensorArray elements. If this shape is not fully specified, concatenating
zero-size TensorArrays is an error.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (value, lengths).
value: A `Tensor` of type `dtype`.
lengths: A `Tensor` of type `int64`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"TensorArrayConcatV3", name, _ctx._post_execution_callbacks, handle,
flow_in, "dtype", dtype, "element_shape_except0",
element_shape_except0)
_result = _TensorArrayConcatV3Output._make(_result)
return _result
except _core._FallbackException:
try:
return tensor_array_concat_v3_eager_fallback(
handle, flow_in, dtype=dtype,
element_shape_except0=element_shape_except0, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
dtype = _execute.make_type(dtype, "dtype")
if element_shape_except0 is None:
element_shape_except0 = None
element_shape_except0 = _execute.make_shape(element_shape_except0, "element_shape_except0")
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayConcatV3", handle=handle, flow_in=flow_in, dtype=dtype,
element_shape_except0=element_shape_except0,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("dtype", _op.get_attr("dtype"), "element_shape_except0",
_op.get_attr("element_shape_except0"))
_execute.record_gradient(
"TensorArrayConcatV3", _inputs_flat, _attrs, _result, name)
_result = _TensorArrayConcatV3Output._make(_result)
return _result
def TensorArrayConcatV3(handle, flow_in, dtype, element_shape_except0=None, name=None):
return tensor_array_concat_v3(handle=handle, flow_in=flow_in, dtype=dtype, element_shape_except0=element_shape_except0, name=name)
TensorArrayConcatV3.__doc__ = tensor_array_concat_v3.__doc__
TensorArrayConcatV3 = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayConcatV3))
tf_export("raw_ops.TensorArrayConcatV3")(TensorArrayConcatV3)
def tensor_array_concat_v3_eager_fallback(handle, flow_in, dtype, element_shape_except0=None, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tensor_array_concat_v3
"""
_ctx = ctx if ctx else _context.context()
dtype = _execute.make_type(dtype, "dtype")
if element_shape_except0 is None:
element_shape_except0 = None
element_shape_except0 = _execute.make_shape(element_shape_except0, "element_shape_except0")
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
_inputs_flat = [handle, flow_in]
_attrs = ("dtype", dtype, "element_shape_except0", element_shape_except0)
_result = _execute.execute(b"TensorArrayConcatV3", 2, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TensorArrayConcatV3", _inputs_flat, _attrs, _result, name)
_result = _TensorArrayConcatV3Output._make(_result)
return _result
def tensor_array_gather(handle, indices, flow_in, dtype, element_shape=None, name=None):
r"""TODO: add doc.
Args:
handle: A `Tensor` of type mutable `string`.
indices: A `Tensor` of type `int32`.
flow_in: A `Tensor` of type `float32`.
dtype: A `tf.DType`.
element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("tensor_array_gather op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
dtype = _execute.make_type(dtype, "dtype")
if element_shape is None:
element_shape = None
element_shape = _execute.make_shape(element_shape, "element_shape")
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayGather", handle=handle, indices=indices, flow_in=flow_in,
dtype=dtype, element_shape=element_shape,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("dtype", _op.get_attr("dtype"), "element_shape",
_op.get_attr("element_shape"))
_execute.record_gradient(
"TensorArrayGather", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArrayGather(handle, indices, flow_in, dtype, element_shape=None, name=None):
return tensor_array_gather(handle=handle, indices=indices, flow_in=flow_in, dtype=dtype, element_shape=element_shape, name=name)
TensorArrayGather.__doc__ = tensor_array_gather.__doc__
TensorArrayGather = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayGather))
tf_export("raw_ops.TensorArrayGather")(TensorArrayGather)
def tensor_array_gather_eager_fallback(handle, indices, flow_in, dtype, element_shape=None, name=None, ctx=None):
raise RuntimeError("tensor_array_gather op does not support eager execution. Arg 'handle' is a ref.")
def tensor_array_gather_v2(handle, indices, flow_in, dtype, element_shape=None, name=None):
r"""Deprecated. Use TensorArrayGatherV3
Args:
handle: A `Tensor` of type `string`.
indices: A `Tensor` of type `int32`.
flow_in: A `Tensor` of type `float32`.
dtype: A `tf.DType`.
element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"TensorArrayGatherV2", name, _ctx._post_execution_callbacks, handle,
indices, flow_in, "dtype", dtype, "element_shape", element_shape)
return _result
except _core._FallbackException:
try:
return tensor_array_gather_v2_eager_fallback(
handle, indices, flow_in, dtype=dtype,
element_shape=element_shape, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
dtype = _execute.make_type(dtype, "dtype")
if element_shape is None:
element_shape = None
element_shape = _execute.make_shape(element_shape, "element_shape")
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayGatherV2", handle=handle, indices=indices,
flow_in=flow_in, dtype=dtype,
element_shape=element_shape, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("dtype", _op.get_attr("dtype"), "element_shape",
_op.get_attr("element_shape"))
_execute.record_gradient(
"TensorArrayGatherV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArrayGatherV2(handle, indices, flow_in, dtype, element_shape=None, name=None):
return tensor_array_gather_v2(handle=handle, indices=indices, flow_in=flow_in, dtype=dtype, element_shape=element_shape, name=name)
TensorArrayGatherV2.__doc__ = tensor_array_gather_v2.__doc__
TensorArrayGatherV2 = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayGatherV2))
tf_export("raw_ops.TensorArrayGatherV2")(TensorArrayGatherV2)
def tensor_array_gather_v2_eager_fallback(handle, indices, flow_in, dtype, element_shape=None, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tensor_array_gather_v2
"""
_ctx = ctx if ctx else _context.context()
dtype = _execute.make_type(dtype, "dtype")
if element_shape is None:
element_shape = None
element_shape = _execute.make_shape(element_shape, "element_shape")
handle = _ops.convert_to_tensor(handle, _dtypes.string)
indices = _ops.convert_to_tensor(indices, _dtypes.int32)
flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
_inputs_flat = [handle, indices, flow_in]
_attrs = ("dtype", dtype, "element_shape", element_shape)
_result = _execute.execute(b"TensorArrayGatherV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TensorArrayGatherV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def tensor_array_gather_v3(handle, indices, flow_in, dtype, element_shape=None, name=None):
r"""Gather specific elements from the TensorArray into output `value`.
All elements selected by `indices` must have the same shape.
Args:
handle: A `Tensor` of type `resource`. The handle to a TensorArray.
indices: A `Tensor` of type `int32`.
The locations in the TensorArray from which to read tensor elements.
flow_in: A `Tensor` of type `float32`.
A float scalar that enforces proper chaining of operations.
dtype: A `tf.DType`. The type of the elem that is returned.
element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.
The expected shape of an element, if known. Used to
validate the shapes of TensorArray elements. If this shape is not
fully specified, gathering zero-size TensorArrays is an error.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"TensorArrayGatherV3", name, _ctx._post_execution_callbacks, handle,
indices, flow_in, "dtype", dtype, "element_shape", element_shape)
return _result
except _core._FallbackException:
try:
return tensor_array_gather_v3_eager_fallback(
handle, indices, flow_in, dtype=dtype,
element_shape=element_shape, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
dtype = _execute.make_type(dtype, "dtype")
if element_shape is None:
element_shape = None
element_shape = _execute.make_shape(element_shape, "element_shape")
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayGatherV3", handle=handle, indices=indices,
flow_in=flow_in, dtype=dtype,
element_shape=element_shape, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("dtype", _op.get_attr("dtype"), "element_shape",
_op.get_attr("element_shape"))
_execute.record_gradient(
"TensorArrayGatherV3", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArrayGatherV3(handle, indices, flow_in, dtype, element_shape=None, name=None):
return tensor_array_gather_v3(handle=handle, indices=indices, flow_in=flow_in, dtype=dtype, element_shape=element_shape, name=name)
TensorArrayGatherV3.__doc__ = tensor_array_gather_v3.__doc__
TensorArrayGatherV3 = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayGatherV3))
tf_export("raw_ops.TensorArrayGatherV3")(TensorArrayGatherV3)
def tensor_array_gather_v3_eager_fallback(handle, indices, flow_in, dtype, element_shape=None, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tensor_array_gather_v3
"""
_ctx = ctx if ctx else _context.context()
dtype = _execute.make_type(dtype, "dtype")
if element_shape is None:
element_shape = None
element_shape = _execute.make_shape(element_shape, "element_shape")
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
indices = _ops.convert_to_tensor(indices, _dtypes.int32)
flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
_inputs_flat = [handle, indices, flow_in]
_attrs = ("dtype", dtype, "element_shape", element_shape)
_result = _execute.execute(b"TensorArrayGatherV3", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TensorArrayGatherV3", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def tensor_array_grad(handle, flow_in, source, name=None):
r"""TODO: add doc.
Args:
handle: A `Tensor` of type `string`.
flow_in: A `Tensor` of type `float32`.
source: A `string`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type mutable `string`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("tensor_array_grad op does not support eager execution. Arg 'grad_handle' is a ref.")
# Add nodes to the TensorFlow graph.
source = _execute.make_str(source, "source")
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayGrad", handle=handle, flow_in=flow_in, source=source,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("source", _op.get_attr("source"))
_execute.record_gradient(
"TensorArrayGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArrayGrad(handle, flow_in, source, name=None):
return tensor_array_grad(handle=handle, flow_in=flow_in, source=source, name=name)
TensorArrayGrad.__doc__ = tensor_array_grad.__doc__
TensorArrayGrad = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayGrad))
tf_export("raw_ops.TensorArrayGrad")(TensorArrayGrad)
def tensor_array_grad_eager_fallback(handle, flow_in, source, name=None, ctx=None):
raise RuntimeError("tensor_array_grad op does not support eager execution. Arg 'grad_handle' is a ref.")
def tensor_array_grad_v2(handle, flow_in, source, name=None):
r"""Deprecated. Use TensorArrayGradV3
Args:
handle: A `Tensor` of type `string`.
flow_in: A `Tensor` of type `float32`.
source: A `string`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"TensorArrayGradV2", name, _ctx._post_execution_callbacks, handle,
flow_in, "source", source)
return _result
except _core._FallbackException:
try:
return tensor_array_grad_v2_eager_fallback(
handle, flow_in, source=source, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
source = _execute.make_str(source, "source")
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayGradV2", handle=handle, flow_in=flow_in, source=source,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("source", _op.get_attr("source"))
_execute.record_gradient(
"TensorArrayGradV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArrayGradV2(handle, flow_in, source, name=None):
return tensor_array_grad_v2(handle=handle, flow_in=flow_in, source=source, name=name)
TensorArrayGradV2.__doc__ = tensor_array_grad_v2.__doc__
TensorArrayGradV2 = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayGradV2))
tf_export("raw_ops.TensorArrayGradV2")(TensorArrayGradV2)
def tensor_array_grad_v2_eager_fallback(handle, flow_in, source, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tensor_array_grad_v2
"""
_ctx = ctx if ctx else _context.context()
source = _execute.make_str(source, "source")
handle = _ops.convert_to_tensor(handle, _dtypes.string)
flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
_inputs_flat = [handle, flow_in]
_attrs = ("source", source)
_result = _execute.execute(b"TensorArrayGradV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TensorArrayGradV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_tensor_array_grad_v3_outputs = ["grad_handle", "flow_out"]
_TensorArrayGradV3Output = _collections.namedtuple(
"TensorArrayGradV3", _tensor_array_grad_v3_outputs)
def tensor_array_grad_v3(handle, flow_in, source, name=None):
r"""Creates a TensorArray for storing the gradients of values in the given handle.
If the given TensorArray gradient already exists, returns a reference to it.
Locks the size of the original TensorArray by disabling its dynamic size flag.
**A note about the input flow_in:**
The handle flow_in forces the execution of the gradient lookup to occur
only after certain other operations have occurred. For example, when
the forward TensorArray is dynamically sized, writes to this TensorArray
may resize the object. The gradient TensorArray is statically sized based
on the size of the forward TensorArray when this operation executes.
Furthermore, the size of the forward TensorArray is frozen by this call.
As a result, the flow is used to ensure that the call to generate the gradient
TensorArray only happens after all writes are executed.
In the case of dynamically sized TensorArrays, gradient computation should
only be performed on read operations that have themselves been chained via
flow to occur only after all writes have executed. That way the final size
of the forward TensorArray is known when this operation is called.
**A note about the source attribute:**
TensorArray gradient calls use an accumulator TensorArray object. If
multiple gradients are calculated and run in the same session, the multiple
gradient nodes may accidentally flow through the same accumulator TensorArray.
This double counts and generally breaks the TensorArray gradient flow.
The solution is to identify which gradient call this particular
TensorArray gradient is being called in. This is performed by identifying
a unique string (e.g. "gradients", "gradients_1", ...) from the input
gradient Tensor's name. This string is used as a suffix when creating
the TensorArray gradient object here (the attribute `source`).
The attribute `source` is added as a suffix to the forward TensorArray's
name when performing the creation / lookup, so that each separate gradient
calculation gets its own TensorArray accumulator.
Args:
handle: A `Tensor` of type `resource`.
The handle to the forward TensorArray.
flow_in: A `Tensor` of type `float32`.
A float scalar that enforces proper chaining of operations.
source: A `string`.
The gradient source string, used to decide which gradient TensorArray
to return.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (grad_handle, flow_out).
grad_handle: A `Tensor` of type `resource`.
flow_out: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"TensorArrayGradV3", name, _ctx._post_execution_callbacks, handle,
flow_in, "source", source)
_result = _TensorArrayGradV3Output._make(_result)
return _result
except _core._FallbackException:
try:
return tensor_array_grad_v3_eager_fallback(
handle, flow_in, source=source, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
source = _execute.make_str(source, "source")
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayGradV3", handle=handle, flow_in=flow_in, source=source,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("source", _op.get_attr("source"))
_execute.record_gradient(
"TensorArrayGradV3", _inputs_flat, _attrs, _result, name)
_result = _TensorArrayGradV3Output._make(_result)
return _result
def TensorArrayGradV3(handle, flow_in, source, name=None):
return tensor_array_grad_v3(handle=handle, flow_in=flow_in, source=source, name=name)
TensorArrayGradV3.__doc__ = tensor_array_grad_v3.__doc__
TensorArrayGradV3 = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayGradV3))
tf_export("raw_ops.TensorArrayGradV3")(TensorArrayGradV3)
def tensor_array_grad_v3_eager_fallback(handle, flow_in, source, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tensor_array_grad_v3
"""
_ctx = ctx if ctx else _context.context()
source = _execute.make_str(source, "source")
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
_inputs_flat = [handle, flow_in]
_attrs = ("source", source)
_result = _execute.execute(b"TensorArrayGradV3", 2, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TensorArrayGradV3", _inputs_flat, _attrs, _result, name)
_result = _TensorArrayGradV3Output._make(_result)
return _result
_tensor_array_grad_with_shape_outputs = ["grad_handle", "flow_out"]
_TensorArrayGradWithShapeOutput = _collections.namedtuple(
"TensorArrayGradWithShape", _tensor_array_grad_with_shape_outputs)
def tensor_array_grad_with_shape(handle, flow_in, shape_to_prepend, source, name=None):
r"""Creates a TensorArray for storing multiple gradients of values in the given handle.
Similar to TensorArrayGradV3. However it creates an accumulator with an
expanded shape compared to the input TensorArray whose gradient is being
computed. This enables multiple gradients for the same TensorArray to be
calculated using the same accumulator.
Args:
handle: A `Tensor` of type `resource`.
The handle to the forward TensorArray.
flow_in: A `Tensor` of type `float32`.
A float scalar that enforces proper chaining of operations.
shape_to_prepend: A `Tensor` of type `int32`.
An int32 vector representing a shape. Elements in the gradient accumulator will
have shape which is this shape_to_prepend value concatenated with shape of the
elements in the TensorArray corresponding to the input handle.
source: A `string`.
The gradient source string, used to decide which gradient TensorArray
to return.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (grad_handle, flow_out).
grad_handle: A `Tensor` of type `resource`.
flow_out: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"TensorArrayGradWithShape", name, _ctx._post_execution_callbacks,
handle, flow_in, shape_to_prepend, "source", source)
_result = _TensorArrayGradWithShapeOutput._make(_result)
return _result
except _core._FallbackException:
try:
return tensor_array_grad_with_shape_eager_fallback(
handle, flow_in, shape_to_prepend, source=source, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
source = _execute.make_str(source, "source")
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayGradWithShape", handle=handle, flow_in=flow_in,
shape_to_prepend=shape_to_prepend,
source=source, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("source", _op.get_attr("source"))
_execute.record_gradient(
"TensorArrayGradWithShape", _inputs_flat, _attrs, _result, name)
_result = _TensorArrayGradWithShapeOutput._make(_result)
return _result
def TensorArrayGradWithShape(handle, flow_in, shape_to_prepend, source, name=None):
return tensor_array_grad_with_shape(handle=handle, flow_in=flow_in, shape_to_prepend=shape_to_prepend, source=source, name=name)
TensorArrayGradWithShape.__doc__ = tensor_array_grad_with_shape.__doc__
TensorArrayGradWithShape = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayGradWithShape))
tf_export("raw_ops.TensorArrayGradWithShape")(TensorArrayGradWithShape)
def tensor_array_grad_with_shape_eager_fallback(handle, flow_in, shape_to_prepend, source, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tensor_array_grad_with_shape
"""
_ctx = ctx if ctx else _context.context()
source = _execute.make_str(source, "source")
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
shape_to_prepend = _ops.convert_to_tensor(shape_to_prepend, _dtypes.int32)
_inputs_flat = [handle, flow_in, shape_to_prepend]
_attrs = ("source", source)
_result = _execute.execute(b"TensorArrayGradWithShape", 2,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"TensorArrayGradWithShape", _inputs_flat, _attrs, _result, name)
_result = _TensorArrayGradWithShapeOutput._make(_result)
return _result
def tensor_array_pack(handle, flow_in, dtype, element_shape=None, name=None):
r"""TODO: add doc.
Args:
handle: A `Tensor` of type mutable `string`.
flow_in: A `Tensor` of type `float32`.
dtype: A `tf.DType`.
element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("tensor_array_pack op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
dtype = _execute.make_type(dtype, "dtype")
if element_shape is None:
element_shape = None
element_shape = _execute.make_shape(element_shape, "element_shape")
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayPack", handle=handle, flow_in=flow_in, dtype=dtype,
element_shape=element_shape, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("dtype", _op.get_attr("dtype"), "element_shape",
_op.get_attr("element_shape"))
_execute.record_gradient(
"TensorArrayPack", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArrayPack(handle, flow_in, dtype, element_shape=None, name=None):
return tensor_array_pack(handle=handle, flow_in=flow_in, dtype=dtype, element_shape=element_shape, name=name)
TensorArrayPack.__doc__ = tensor_array_pack.__doc__
TensorArrayPack = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayPack))
tf_export("raw_ops.TensorArrayPack")(TensorArrayPack)
def tensor_array_pack_eager_fallback(handle, flow_in, dtype, element_shape=None, name=None, ctx=None):
raise RuntimeError("tensor_array_pack op does not support eager execution. Arg 'handle' is a ref.")
def tensor_array_read(handle, index, flow_in, dtype, name=None):
r"""TODO: add doc.
Args:
handle: A `Tensor` of type mutable `string`.
index: A `Tensor` of type `int32`.
flow_in: A `Tensor` of type `float32`.
dtype: A `tf.DType`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("tensor_array_read op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
dtype = _execute.make_type(dtype, "dtype")
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayRead", handle=handle, index=index, flow_in=flow_in,
dtype=dtype, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("dtype", _op.get_attr("dtype"))
_execute.record_gradient(
"TensorArrayRead", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArrayRead(handle, index, flow_in, dtype, name=None):
return tensor_array_read(handle=handle, index=index, flow_in=flow_in, dtype=dtype, name=name)
TensorArrayRead.__doc__ = tensor_array_read.__doc__
TensorArrayRead = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayRead))
tf_export("raw_ops.TensorArrayRead")(TensorArrayRead)
def tensor_array_read_eager_fallback(handle, index, flow_in, dtype, name=None, ctx=None):
raise RuntimeError("tensor_array_read op does not support eager execution. Arg 'handle' is a ref.")
def tensor_array_read_v2(handle, index, flow_in, dtype, name=None):
r"""Deprecated. Use TensorArrayReadV3
Args:
handle: A `Tensor` of type `string`.
index: A `Tensor` of type `int32`.
flow_in: A `Tensor` of type `float32`.
dtype: A `tf.DType`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"TensorArrayReadV2", name, _ctx._post_execution_callbacks, handle,
index, flow_in, "dtype", dtype)
return _result
except _core._FallbackException:
try:
return tensor_array_read_v2_eager_fallback(
handle, index, flow_in, dtype=dtype, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
dtype = _execute.make_type(dtype, "dtype")
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayReadV2", handle=handle, index=index, flow_in=flow_in,
dtype=dtype, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("dtype", _op.get_attr("dtype"))
_execute.record_gradient(
"TensorArrayReadV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArrayReadV2(handle, index, flow_in, dtype, name=None):
return tensor_array_read_v2(handle=handle, index=index, flow_in=flow_in, dtype=dtype, name=name)
TensorArrayReadV2.__doc__ = tensor_array_read_v2.__doc__
TensorArrayReadV2 = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayReadV2))
tf_export("raw_ops.TensorArrayReadV2")(TensorArrayReadV2)
def tensor_array_read_v2_eager_fallback(handle, index, flow_in, dtype, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tensor_array_read_v2
"""
_ctx = ctx if ctx else _context.context()
dtype = _execute.make_type(dtype, "dtype")
handle = _ops.convert_to_tensor(handle, _dtypes.string)
index = _ops.convert_to_tensor(index, _dtypes.int32)
flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
_inputs_flat = [handle, index, flow_in]
_attrs = ("dtype", dtype)
_result = _execute.execute(b"TensorArrayReadV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TensorArrayReadV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def tensor_array_read_v3(handle, index, flow_in, dtype, name=None):
r"""Read an element from the TensorArray into output `value`.
Args:
handle: A `Tensor` of type `resource`. The handle to a TensorArray.
index: A `Tensor` of type `int32`.
flow_in: A `Tensor` of type `float32`.
A float scalar that enforces proper chaining of operations.
dtype: A `tf.DType`. The type of the elem that is returned.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"TensorArrayReadV3", name, _ctx._post_execution_callbacks, handle,
index, flow_in, "dtype", dtype)
return _result
except _core._FallbackException:
try:
return tensor_array_read_v3_eager_fallback(
handle, index, flow_in, dtype=dtype, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
dtype = _execute.make_type(dtype, "dtype")
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayReadV3", handle=handle, index=index, flow_in=flow_in,
dtype=dtype, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("dtype", _op.get_attr("dtype"))
_execute.record_gradient(
"TensorArrayReadV3", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArrayReadV3(handle, index, flow_in, dtype, name=None):
return tensor_array_read_v3(handle=handle, index=index, flow_in=flow_in, dtype=dtype, name=name)
TensorArrayReadV3.__doc__ = tensor_array_read_v3.__doc__
TensorArrayReadV3 = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayReadV3))
tf_export("raw_ops.TensorArrayReadV3")(TensorArrayReadV3)
def tensor_array_read_v3_eager_fallback(handle, index, flow_in, dtype, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tensor_array_read_v3
"""
_ctx = ctx if ctx else _context.context()
dtype = _execute.make_type(dtype, "dtype")
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
index = _ops.convert_to_tensor(index, _dtypes.int32)
flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
_inputs_flat = [handle, index, flow_in]
_attrs = ("dtype", dtype)
_result = _execute.execute(b"TensorArrayReadV3", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TensorArrayReadV3", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def tensor_array_scatter(handle, indices, value, flow_in, name=None):
r"""TODO: add doc.
Args:
handle: A `Tensor` of type mutable `string`.
indices: A `Tensor` of type `int32`.
value: A `Tensor`.
flow_in: A `Tensor` of type `float32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("tensor_array_scatter op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayScatter", handle=handle, indices=indices, value=value,
flow_in=flow_in, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"TensorArrayScatter", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArrayScatter(handle, indices, value, flow_in, name=None):
return tensor_array_scatter(handle=handle, indices=indices, value=value, flow_in=flow_in, name=name)
TensorArrayScatter.__doc__ = tensor_array_scatter.__doc__
TensorArrayScatter = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayScatter))
tf_export("raw_ops.TensorArrayScatter")(TensorArrayScatter)
def tensor_array_scatter_eager_fallback(handle, indices, value, flow_in, name=None, ctx=None):
raise RuntimeError("tensor_array_scatter op does not support eager execution. Arg 'handle' is a ref.")
def tensor_array_scatter_v2(handle, indices, value, flow_in, name=None):
r"""Deprecated. Use TensorArrayScatterV3
Args:
handle: A `Tensor` of type `string`.
indices: A `Tensor` of type `int32`.
value: A `Tensor`.
flow_in: A `Tensor` of type `float32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"TensorArrayScatterV2", name, _ctx._post_execution_callbacks, handle,
indices, value, flow_in)
return _result
except _core._FallbackException:
try:
return tensor_array_scatter_v2_eager_fallback(
handle, indices, value, flow_in, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayScatterV2", handle=handle, indices=indices, value=value,
flow_in=flow_in, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"TensorArrayScatterV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArrayScatterV2(handle, indices, value, flow_in, name=None):
return tensor_array_scatter_v2(handle=handle, indices=indices, value=value, flow_in=flow_in, name=name)
TensorArrayScatterV2.__doc__ = tensor_array_scatter_v2.__doc__
TensorArrayScatterV2 = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayScatterV2))
tf_export("raw_ops.TensorArrayScatterV2")(TensorArrayScatterV2)
def tensor_array_scatter_v2_eager_fallback(handle, indices, value, flow_in, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tensor_array_scatter_v2
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
handle = _ops.convert_to_tensor(handle, _dtypes.string)
indices = _ops.convert_to_tensor(indices, _dtypes.int32)
flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
_inputs_flat = [handle, indices, value, flow_in]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"TensorArrayScatterV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TensorArrayScatterV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def tensor_array_scatter_v3(handle, indices, value, flow_in, name=None):
r"""Scatter the data from the input value into specific TensorArray elements.
`indices` must be a vector, its length must match the first dim of `value`.
Args:
handle: A `Tensor` of type `resource`. The handle to a TensorArray.
indices: A `Tensor` of type `int32`.
The locations at which to write the tensor elements.
value: A `Tensor`. The concatenated tensor to write to the TensorArray.
flow_in: A `Tensor` of type `float32`.
A float scalar that enforces proper chaining of operations.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"TensorArrayScatterV3", name, _ctx._post_execution_callbacks, handle,
indices, value, flow_in)
return _result
except _core._FallbackException:
try:
return tensor_array_scatter_v3_eager_fallback(
handle, indices, value, flow_in, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayScatterV3", handle=handle, indices=indices, value=value,
flow_in=flow_in, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"TensorArrayScatterV3", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArrayScatterV3(handle, indices, value, flow_in, name=None):
return tensor_array_scatter_v3(handle=handle, indices=indices, value=value, flow_in=flow_in, name=name)
TensorArrayScatterV3.__doc__ = tensor_array_scatter_v3.__doc__
TensorArrayScatterV3 = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayScatterV3))
tf_export("raw_ops.TensorArrayScatterV3")(TensorArrayScatterV3)
def tensor_array_scatter_v3_eager_fallback(handle, indices, value, flow_in, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tensor_array_scatter_v3
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
indices = _ops.convert_to_tensor(indices, _dtypes.int32)
flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
_inputs_flat = [handle, indices, value, flow_in]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"TensorArrayScatterV3", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TensorArrayScatterV3", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def tensor_array_size(handle, flow_in, name=None):
r"""TODO: add doc.
Args:
handle: A `Tensor` of type mutable `string`.
flow_in: A `Tensor` of type `float32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("tensor_array_size op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArraySize", handle=handle, flow_in=flow_in, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"TensorArraySize", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArraySize(handle, flow_in, name=None):
return tensor_array_size(handle=handle, flow_in=flow_in, name=name)
TensorArraySize.__doc__ = tensor_array_size.__doc__
TensorArraySize = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArraySize))
tf_export("raw_ops.TensorArraySize")(TensorArraySize)
def tensor_array_size_eager_fallback(handle, flow_in, name=None, ctx=None):
raise RuntimeError("tensor_array_size op does not support eager execution. Arg 'handle' is a ref.")
def tensor_array_size_v2(handle, flow_in, name=None):
r"""Deprecated. Use TensorArraySizeV3
Args:
handle: A `Tensor` of type `string`.
flow_in: A `Tensor` of type `float32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"TensorArraySizeV2", name, _ctx._post_execution_callbacks, handle,
flow_in)
return _result
except _core._FallbackException:
try:
return tensor_array_size_v2_eager_fallback(
handle, flow_in, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArraySizeV2", handle=handle, flow_in=flow_in, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"TensorArraySizeV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArraySizeV2(handle, flow_in, name=None):
return tensor_array_size_v2(handle=handle, flow_in=flow_in, name=name)
TensorArraySizeV2.__doc__ = tensor_array_size_v2.__doc__
TensorArraySizeV2 = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArraySizeV2))
tf_export("raw_ops.TensorArraySizeV2")(TensorArraySizeV2)
def tensor_array_size_v2_eager_fallback(handle, flow_in, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tensor_array_size_v2
"""
_ctx = ctx if ctx else _context.context()
handle = _ops.convert_to_tensor(handle, _dtypes.string)
flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
_inputs_flat = [handle, flow_in]
_attrs = None
_result = _execute.execute(b"TensorArraySizeV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TensorArraySizeV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def tensor_array_size_v3(handle, flow_in, name=None):
r"""Get the current size of the TensorArray.
Args:
handle: A `Tensor` of type `resource`.
The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
flow_in: A `Tensor` of type `float32`.
A float scalar that enforces proper chaining of operations.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"TensorArraySizeV3", name, _ctx._post_execution_callbacks, handle,
flow_in)
return _result
except _core._FallbackException:
try:
return tensor_array_size_v3_eager_fallback(
handle, flow_in, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArraySizeV3", handle=handle, flow_in=flow_in, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"TensorArraySizeV3", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArraySizeV3(handle, flow_in, name=None):
return tensor_array_size_v3(handle=handle, flow_in=flow_in, name=name)
TensorArraySizeV3.__doc__ = tensor_array_size_v3.__doc__
TensorArraySizeV3 = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArraySizeV3))
tf_export("raw_ops.TensorArraySizeV3")(TensorArraySizeV3)
def tensor_array_size_v3_eager_fallback(handle, flow_in, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tensor_array_size_v3
"""
_ctx = ctx if ctx else _context.context()
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
_inputs_flat = [handle, flow_in]
_attrs = None
_result = _execute.execute(b"TensorArraySizeV3", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TensorArraySizeV3", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def tensor_array_split(handle, value, lengths, flow_in, name=None):
r"""TODO: add doc.
Args:
handle: A `Tensor` of type mutable `string`.
value: A `Tensor`.
lengths: A `Tensor` of type `int64`.
flow_in: A `Tensor` of type `float32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("tensor_array_split op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArraySplit", handle=handle, value=value, lengths=lengths,
flow_in=flow_in, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"TensorArraySplit", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArraySplit(handle, value, lengths, flow_in, name=None):
return tensor_array_split(handle=handle, value=value, lengths=lengths, flow_in=flow_in, name=name)
TensorArraySplit.__doc__ = tensor_array_split.__doc__
TensorArraySplit = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArraySplit))
tf_export("raw_ops.TensorArraySplit")(TensorArraySplit)
def tensor_array_split_eager_fallback(handle, value, lengths, flow_in, name=None, ctx=None):
raise RuntimeError("tensor_array_split op does not support eager execution. Arg 'handle' is a ref.")
def tensor_array_split_v2(handle, value, lengths, flow_in, name=None):
r"""Deprecated. Use TensorArraySplitV3
Args:
handle: A `Tensor` of type `string`.
value: A `Tensor`.
lengths: A `Tensor` of type `int64`.
flow_in: A `Tensor` of type `float32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"TensorArraySplitV2", name, _ctx._post_execution_callbacks, handle,
value, lengths, flow_in)
return _result
except _core._FallbackException:
try:
return tensor_array_split_v2_eager_fallback(
handle, value, lengths, flow_in, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArraySplitV2", handle=handle, value=value, lengths=lengths,
flow_in=flow_in, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"TensorArraySplitV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArraySplitV2(handle, value, lengths, flow_in, name=None):
return tensor_array_split_v2(handle=handle, value=value, lengths=lengths, flow_in=flow_in, name=name)
TensorArraySplitV2.__doc__ = tensor_array_split_v2.__doc__
TensorArraySplitV2 = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArraySplitV2))
tf_export("raw_ops.TensorArraySplitV2")(TensorArraySplitV2)
def tensor_array_split_v2_eager_fallback(handle, value, lengths, flow_in, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tensor_array_split_v2
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
handle = _ops.convert_to_tensor(handle, _dtypes.string)
lengths = _ops.convert_to_tensor(lengths, _dtypes.int64)
flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
_inputs_flat = [handle, value, lengths, flow_in]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"TensorArraySplitV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TensorArraySplitV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def tensor_array_split_v3(handle, value, lengths, flow_in, name=None):
r"""Split the data from the input value into TensorArray elements.
Assuming that `lengths` takes on values
```(n0, n1, ..., n(T-1))```
and that `value` has shape
```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,
this splits values into a TensorArray with T tensors.
TensorArray index t will be the subtensor of values with starting position
```(n0 + n1 + ... + n(t-1), 0, 0, ...)```
and having size
```nt x d0 x d1 x ...```
Args:
handle: A `Tensor` of type `resource`. The handle to a TensorArray.
value: A `Tensor`. The concatenated tensor to write to the TensorArray.
lengths: A `Tensor` of type `int64`.
The vector of lengths, how to split the rows of value into the
TensorArray.
flow_in: A `Tensor` of type `float32`.
A float scalar that enforces proper chaining of operations.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"TensorArraySplitV3", name, _ctx._post_execution_callbacks, handle,
value, lengths, flow_in)
return _result
except _core._FallbackException:
try:
return tensor_array_split_v3_eager_fallback(
handle, value, lengths, flow_in, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArraySplitV3", handle=handle, value=value, lengths=lengths,
flow_in=flow_in, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"TensorArraySplitV3", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArraySplitV3(handle, value, lengths, flow_in, name=None):
return tensor_array_split_v3(handle=handle, value=value, lengths=lengths, flow_in=flow_in, name=name)
TensorArraySplitV3.__doc__ = tensor_array_split_v3.__doc__
TensorArraySplitV3 = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArraySplitV3))
tf_export("raw_ops.TensorArraySplitV3")(TensorArraySplitV3)
def tensor_array_split_v3_eager_fallback(handle, value, lengths, flow_in, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tensor_array_split_v3
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
lengths = _ops.convert_to_tensor(lengths, _dtypes.int64)
flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
_inputs_flat = [handle, value, lengths, flow_in]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"TensorArraySplitV3", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TensorArraySplitV3", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def tensor_array_unpack(handle, value, flow_in, name=None):
r"""TODO: add doc.
Args:
handle: A `Tensor` of type mutable `string`.
value: A `Tensor`.
flow_in: A `Tensor` of type `float32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("tensor_array_unpack op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayUnpack", handle=handle, value=value, flow_in=flow_in,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"TensorArrayUnpack", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArrayUnpack(handle, value, flow_in, name=None):
return tensor_array_unpack(handle=handle, value=value, flow_in=flow_in, name=name)
TensorArrayUnpack.__doc__ = tensor_array_unpack.__doc__
TensorArrayUnpack = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayUnpack))
tf_export("raw_ops.TensorArrayUnpack")(TensorArrayUnpack)
def tensor_array_unpack_eager_fallback(handle, value, flow_in, name=None, ctx=None):
raise RuntimeError("tensor_array_unpack op does not support eager execution. Arg 'handle' is a ref.")
def tensor_array_v2(size, dtype, element_shape=None, dynamic_size=False, clear_after_read=True, tensor_array_name="", name=None):
r"""Deprecated. Use TensorArrayV3
Args:
size: A `Tensor` of type `int32`.
dtype: A `tf.DType`.
element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.
dynamic_size: An optional `bool`. Defaults to `False`.
clear_after_read: An optional `bool`. Defaults to `True`.
tensor_array_name: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"TensorArrayV2", name, _ctx._post_execution_callbacks, size, "dtype",
dtype, "element_shape", element_shape, "dynamic_size", dynamic_size,
"clear_after_read", clear_after_read, "tensor_array_name",
tensor_array_name)
return _result
except _core._FallbackException:
try:
return tensor_array_v2_eager_fallback(
size, dtype=dtype, element_shape=element_shape,
dynamic_size=dynamic_size, clear_after_read=clear_after_read,
tensor_array_name=tensor_array_name, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
dtype = _execute.make_type(dtype, "dtype")
if element_shape is None:
element_shape = None
element_shape = _execute.make_shape(element_shape, "element_shape")
if dynamic_size is None:
dynamic_size = False
dynamic_size = _execute.make_bool(dynamic_size, "dynamic_size")
if clear_after_read is None:
clear_after_read = True
clear_after_read = _execute.make_bool(clear_after_read, "clear_after_read")
if tensor_array_name is None:
tensor_array_name = ""
tensor_array_name = _execute.make_str(tensor_array_name, "tensor_array_name")
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayV2", size=size, dtype=dtype, element_shape=element_shape,
dynamic_size=dynamic_size,
clear_after_read=clear_after_read,
tensor_array_name=tensor_array_name, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("dtype", _op.get_attr("dtype"), "element_shape",
_op.get_attr("element_shape"), "dynamic_size",
_op.get_attr("dynamic_size"), "clear_after_read",
_op.get_attr("clear_after_read"), "tensor_array_name",
_op.get_attr("tensor_array_name"))
_execute.record_gradient(
"TensorArrayV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArrayV2(size, dtype, element_shape=None, dynamic_size=False, clear_after_read=True, tensor_array_name="", name=None):
return tensor_array_v2(size=size, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name, name=name)
TensorArrayV2.__doc__ = tensor_array_v2.__doc__
TensorArrayV2 = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayV2))
tf_export("raw_ops.TensorArrayV2")(TensorArrayV2)
def tensor_array_v2_eager_fallback(size, dtype, element_shape=None, dynamic_size=False, clear_after_read=True, tensor_array_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tensor_array_v2
"""
_ctx = ctx if ctx else _context.context()
dtype = _execute.make_type(dtype, "dtype")
if element_shape is None:
element_shape = None
element_shape = _execute.make_shape(element_shape, "element_shape")
if dynamic_size is None:
dynamic_size = False
dynamic_size = _execute.make_bool(dynamic_size, "dynamic_size")
if clear_after_read is None:
clear_after_read = True
clear_after_read = _execute.make_bool(clear_after_read, "clear_after_read")
if tensor_array_name is None:
tensor_array_name = ""
tensor_array_name = _execute.make_str(tensor_array_name, "tensor_array_name")
size = _ops.convert_to_tensor(size, _dtypes.int32)
_inputs_flat = [size]
_attrs = ("dtype", dtype, "element_shape", element_shape, "dynamic_size",
dynamic_size, "clear_after_read", clear_after_read, "tensor_array_name",
tensor_array_name)
_result = _execute.execute(b"TensorArrayV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TensorArrayV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_tensor_array_v3_outputs = ["handle", "flow"]
_TensorArrayV3Output = _collections.namedtuple(
"TensorArrayV3", _tensor_array_v3_outputs)
def tensor_array_v3(size, dtype, element_shape=None, dynamic_size=False, clear_after_read=True, identical_element_shapes=False, tensor_array_name="", name=None):
r"""An array of Tensors of given size.
Write data via Write and read via Read or Pack.
Args:
size: A `Tensor` of type `int32`. The size of the array.
dtype: A `tf.DType`. The type of the elements on the tensor_array.
element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.
The expected shape of an element, if known. Used to
validate the shapes of TensorArray elements. If this shape is not
fully specified, gathering zero-size TensorArrays is an error.
dynamic_size: An optional `bool`. Defaults to `False`.
A boolean that determines whether writes to the TensorArray
are allowed to grow the size. By default, this is not allowed.
clear_after_read: An optional `bool`. Defaults to `True`.
If true (default), Tensors in the TensorArray are cleared
after being read. This disables multiple read semantics but allows early
release of memory.
identical_element_shapes: An optional `bool`. Defaults to `False`.
If true (default is false), then all
elements in the TensorArray will be expected to have have identical shapes.
This allows certain behaviors, like dynamically checking for
consistent shapes on write, and being able to fill in properly
shaped zero tensors on stack -- even if the element_shape attribute
is not fully defined.
tensor_array_name: An optional `string`. Defaults to `""`.
Overrides the name used for the temporary tensor_array
resource. Default value is the name of the 'TensorArray' op (which
is guaranteed unique).
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (handle, flow).
handle: A `Tensor` of type `resource`.
flow: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"TensorArrayV3", name, _ctx._post_execution_callbacks, size, "dtype",
dtype, "element_shape", element_shape, "dynamic_size", dynamic_size,
"clear_after_read", clear_after_read, "identical_element_shapes",
identical_element_shapes, "tensor_array_name", tensor_array_name)
_result = _TensorArrayV3Output._make(_result)
return _result
except _core._FallbackException:
try:
return tensor_array_v3_eager_fallback(
size, dtype=dtype, element_shape=element_shape,
dynamic_size=dynamic_size, clear_after_read=clear_after_read,
identical_element_shapes=identical_element_shapes,
tensor_array_name=tensor_array_name, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
dtype = _execute.make_type(dtype, "dtype")
if element_shape is None:
element_shape = None
element_shape = _execute.make_shape(element_shape, "element_shape")
if dynamic_size is None:
dynamic_size = False
dynamic_size = _execute.make_bool(dynamic_size, "dynamic_size")
if clear_after_read is None:
clear_after_read = True
clear_after_read = _execute.make_bool(clear_after_read, "clear_after_read")
if identical_element_shapes is None:
identical_element_shapes = False
identical_element_shapes = _execute.make_bool(identical_element_shapes, "identical_element_shapes")
if tensor_array_name is None:
tensor_array_name = ""
tensor_array_name = _execute.make_str(tensor_array_name, "tensor_array_name")
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayV3", size=size, dtype=dtype, element_shape=element_shape,
dynamic_size=dynamic_size,
clear_after_read=clear_after_read,
identical_element_shapes=identical_element_shapes,
tensor_array_name=tensor_array_name, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("dtype", _op.get_attr("dtype"), "element_shape",
_op.get_attr("element_shape"), "dynamic_size",
_op.get_attr("dynamic_size"), "clear_after_read",
_op.get_attr("clear_after_read"), "identical_element_shapes",
_op.get_attr("identical_element_shapes"), "tensor_array_name",
_op.get_attr("tensor_array_name"))
_execute.record_gradient(
"TensorArrayV3", _inputs_flat, _attrs, _result, name)
_result = _TensorArrayV3Output._make(_result)
return _result
def TensorArrayV3(size, dtype, element_shape=None, dynamic_size=False, clear_after_read=True, identical_element_shapes=False, tensor_array_name="", name=None):
return tensor_array_v3(size=size, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, identical_element_shapes=identical_element_shapes, tensor_array_name=tensor_array_name, name=name)
TensorArrayV3.__doc__ = tensor_array_v3.__doc__
TensorArrayV3 = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayV3))
tf_export("raw_ops.TensorArrayV3")(TensorArrayV3)
def tensor_array_v3_eager_fallback(size, dtype, element_shape=None, dynamic_size=False, clear_after_read=True, identical_element_shapes=False, tensor_array_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tensor_array_v3
"""
_ctx = ctx if ctx else _context.context()
dtype = _execute.make_type(dtype, "dtype")
if element_shape is None:
element_shape = None
element_shape = _execute.make_shape(element_shape, "element_shape")
if dynamic_size is None:
dynamic_size = False
dynamic_size = _execute.make_bool(dynamic_size, "dynamic_size")
if clear_after_read is None:
clear_after_read = True
clear_after_read = _execute.make_bool(clear_after_read, "clear_after_read")
if identical_element_shapes is None:
identical_element_shapes = False
identical_element_shapes = _execute.make_bool(identical_element_shapes, "identical_element_shapes")
if tensor_array_name is None:
tensor_array_name = ""
tensor_array_name = _execute.make_str(tensor_array_name, "tensor_array_name")
size = _ops.convert_to_tensor(size, _dtypes.int32)
_inputs_flat = [size]
_attrs = ("dtype", dtype, "element_shape", element_shape, "dynamic_size",
dynamic_size, "clear_after_read", clear_after_read,
"identical_element_shapes", identical_element_shapes, "tensor_array_name",
tensor_array_name)
_result = _execute.execute(b"TensorArrayV3", 2, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TensorArrayV3", _inputs_flat, _attrs, _result, name)
_result = _TensorArrayV3Output._make(_result)
return _result
def tensor_array_write(handle, index, value, flow_in, name=None):
r"""TODO: add doc.
Args:
handle: A `Tensor` of type mutable `string`.
index: A `Tensor` of type `int32`.
value: A `Tensor`.
flow_in: A `Tensor` of type `float32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
raise RuntimeError("tensor_array_write op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayWrite", handle=handle, index=index, value=value,
flow_in=flow_in, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"TensorArrayWrite", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArrayWrite(handle, index, value, flow_in, name=None):
return tensor_array_write(handle=handle, index=index, value=value, flow_in=flow_in, name=name)
TensorArrayWrite.__doc__ = tensor_array_write.__doc__
TensorArrayWrite = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayWrite))
tf_export("raw_ops.TensorArrayWrite")(TensorArrayWrite)
def tensor_array_write_eager_fallback(handle, index, value, flow_in, name=None, ctx=None):
raise RuntimeError("tensor_array_write op does not support eager execution. Arg 'handle' is a ref.")
def tensor_array_write_v2(handle, index, value, flow_in, name=None):
r"""Deprecated. Use TensorArrayGradV3
Args:
handle: A `Tensor` of type `string`.
index: A `Tensor` of type `int32`.
value: A `Tensor`.
flow_in: A `Tensor` of type `float32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"TensorArrayWriteV2", name, _ctx._post_execution_callbacks, handle,
index, value, flow_in)
return _result
except _core._FallbackException:
try:
return tensor_array_write_v2_eager_fallback(
handle, index, value, flow_in, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayWriteV2", handle=handle, index=index, value=value,
flow_in=flow_in, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"TensorArrayWriteV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArrayWriteV2(handle, index, value, flow_in, name=None):
return tensor_array_write_v2(handle=handle, index=index, value=value, flow_in=flow_in, name=name)
TensorArrayWriteV2.__doc__ = tensor_array_write_v2.__doc__
TensorArrayWriteV2 = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayWriteV2))
tf_export("raw_ops.TensorArrayWriteV2")(TensorArrayWriteV2)
def tensor_array_write_v2_eager_fallback(handle, index, value, flow_in, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tensor_array_write_v2
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
handle = _ops.convert_to_tensor(handle, _dtypes.string)
index = _ops.convert_to_tensor(index, _dtypes.int32)
flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
_inputs_flat = [handle, index, value, flow_in]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"TensorArrayWriteV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TensorArrayWriteV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def tensor_array_write_v3(handle, index, value, flow_in, name=None):
r"""Push an element onto the tensor_array.
Args:
handle: A `Tensor` of type `resource`. The handle to a TensorArray.
index: A `Tensor` of type `int32`.
The position to write to inside the TensorArray.
value: A `Tensor`. The tensor to write to the TensorArray.
flow_in: A `Tensor` of type `float32`.
A float scalar that enforces proper chaining of operations.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"TensorArrayWriteV3", name, _ctx._post_execution_callbacks, handle,
index, value, flow_in)
return _result
except _core._FallbackException:
try:
return tensor_array_write_v3_eager_fallback(
handle, index, value, flow_in, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"TensorArrayWriteV3", handle=handle, index=index, value=value,
flow_in=flow_in, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"TensorArrayWriteV3", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def TensorArrayWriteV3(handle, index, value, flow_in, name=None):
return tensor_array_write_v3(handle=handle, index=index, value=value, flow_in=flow_in, name=name)
TensorArrayWriteV3.__doc__ = tensor_array_write_v3.__doc__
TensorArrayWriteV3 = _doc_controls.do_not_generate_docs(_kwarg_only(TensorArrayWriteV3))
tf_export("raw_ops.TensorArrayWriteV3")(TensorArrayWriteV3)
def tensor_array_write_v3_eager_fallback(handle, index, value, flow_in, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tensor_array_write_v3
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
index = _ops.convert_to_tensor(index, _dtypes.int32)
flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
_inputs_flat = [handle, index, value, flow_in]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"TensorArrayWriteV3", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TensorArrayWriteV3", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def unstage(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
r"""Op is similar to a lightweight Dequeue.
The basic functionality is similar to dequeue with many fewer
capabilities and options. This Op is optimized for performance.
Args:
dtypes: A list of `tf.DTypes` that has length `>= 1`.
capacity: An optional `int` that is `>= 0`. Defaults to `0`.
memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
container: An optional `string`. Defaults to `""`.
shared_name: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects of type `dtypes`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "Unstage",
name, _ctx._post_execution_callbacks, "capacity", capacity,
"memory_limit", memory_limit, "dtypes", dtypes, "container",
container, "shared_name", shared_name)
return _result
except _core._FallbackException:
try:
return unstage_eager_fallback(
capacity=capacity, memory_limit=memory_limit, dtypes=dtypes,
container=container, shared_name=shared_name, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'unstage' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op = _op_def_lib._apply_op_helper(
"Unstage", dtypes=dtypes, capacity=capacity,
memory_limit=memory_limit, container=container,
shared_name=shared_name, name=name)
_result = _op.outputs[:]
if not _result:
return _op
_inputs_flat = _op.inputs
_attrs = ("capacity", _op.get_attr("capacity"), "memory_limit",
_op.get_attr("memory_limit"), "dtypes", _op.get_attr("dtypes"),
"container", _op.get_attr("container"), "shared_name",
_op.get_attr("shared_name"))
_execute.record_gradient(
"Unstage", _inputs_flat, _attrs, _result, name)
return _result
def Unstage(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
return unstage(dtypes=dtypes, capacity=capacity, memory_limit=memory_limit, container=container, shared_name=shared_name, name=name)
Unstage.__doc__ = unstage.__doc__
Unstage = _doc_controls.do_not_generate_docs(_kwarg_only(Unstage))
tf_export("raw_ops.Unstage")(Unstage)
def unstage_eager_fallback(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function unstage
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(dtypes, (list, tuple)):
raise TypeError(
"Expected list for 'dtypes' argument to "
"'unstage' Op, not %r." % dtypes)
dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_inputs_flat = []
_attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
dtypes, "container", container, "shared_name", shared_name)
_result = _execute.execute(b"Unstage", len(dtypes), inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"Unstage", _inputs_flat, _attrs, _result, name)
return _result
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "AccumulatorApplyGradient"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# input_arg {
# name: "local_step"
# type: DT_INT64
# }
# input_arg {
# name: "gradient"
# type_attr: "dtype"
# }
# attr {
# name: "dtype"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "AccumulatorNumAccumulated"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# output_arg {
# name: "num_accumulated"
# type: DT_INT32
# }
# }
# op {
# name: "AccumulatorSetGlobalStep"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# input_arg {
# name: "new_global_step"
# type: DT_INT64
# }
# }
# op {
# name: "AccumulatorTakeGradient"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# input_arg {
# name: "num_required"
# type: DT_INT32
# }
# output_arg {
# name: "average"
# type_attr: "dtype"
# }
# attr {
# name: "dtype"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "Barrier"
# output_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# attr {
# name: "component_types"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "shapes"
# type: "list(shape)"
# default_value {
# list {
# }
# }
# has_minimum: true
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: -1
# }
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "BarrierClose"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# attr {
# name: "cancel_pending_enqueues"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "BarrierIncompleteSize"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# output_arg {
# name: "size"
# type: DT_INT32
# }
# }
# op {
# name: "BarrierInsertMany"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# input_arg {
# name: "keys"
# type: DT_STRING
# }
# input_arg {
# name: "values"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "component_index"
# type: "int"
# }
# }
# op {
# name: "BarrierReadySize"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# output_arg {
# name: "size"
# type: DT_INT32
# }
# }
# op {
# name: "BarrierTakeMany"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# input_arg {
# name: "num_elements"
# type: DT_INT32
# }
# output_arg {
# name: "indices"
# type: DT_INT64
# }
# output_arg {
# name: "keys"
# type: DT_STRING
# }
# output_arg {
# name: "values"
# type_list_attr: "component_types"
# }
# attr {
# name: "component_types"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "allow_small_batch"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "wait_for_incomplete"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "timeout_ms"
# type: "int"
# default_value {
# i: -1
# }
# }
# }
# op {
# name: "ConditionalAccumulator"
# output_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# attr {
# name: "dtype"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "shape"
# type: "shape"
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "reduction_type"
# type: "string"
# default_value {
# s: "MEAN"
# }
# allowed_values {
# list {
# s: "MEAN"
# s: "SUM"
# }
# }
# }
# is_stateful: true
# }
# op {
# name: "DeleteSessionTensor"
# input_arg {
# name: "handle"
# type: DT_STRING
# }
# is_stateful: true
# }
# op {
# name: "DynamicPartition"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "partitions"
# type: DT_INT32
# }
# output_arg {
# name: "outputs"
# type_attr: "T"
# number_attr: "num_partitions"
# }
# attr {
# name: "num_partitions"
# type: "int"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "T"
# type: "type"
# }
# }
# op {
# name: "DynamicStitch"
# input_arg {
# name: "indices"
# type: DT_INT32
# number_attr: "N"
# }
# input_arg {
# name: "data"
# type_attr: "T"
# number_attr: "N"
# }
# output_arg {
# name: "merged"
# type_attr: "T"
# }
# attr {
# name: "N"
# type: "int"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "T"
# type: "type"
# }
# }
# op {
# name: "FIFOQueue"
# output_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# attr {
# name: "component_types"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "shapes"
# type: "list(shape)"
# default_value {
# list {
# }
# }
# has_minimum: true
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: -1
# }
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "FIFOQueueV2"
# output_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# attr {
# name: "component_types"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "shapes"
# type: "list(shape)"
# default_value {
# list {
# }
# }
# has_minimum: true
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: -1
# }
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "FakeQueue"
# input_arg {
# name: "resource"
# type: DT_RESOURCE
# }
# output_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# is_stateful: true
# }
# op {
# name: "GetSessionHandle"
# input_arg {
# name: "value"
# type_attr: "T"
# }
# output_arg {
# name: "handle"
# type: DT_STRING
# }
# attr {
# name: "T"
# type: "type"
# }
# is_stateful: true
# }
# op {
# name: "GetSessionHandleV2"
# input_arg {
# name: "value"
# type_attr: "T"
# }
# output_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# attr {
# name: "T"
# type: "type"
# }
# is_stateful: true
# }
# op {
# name: "GetSessionTensor"
# input_arg {
# name: "handle"
# type: DT_STRING
# }
# output_arg {
# name: "value"
# type_attr: "dtype"
# }
# attr {
# name: "dtype"
# type: "type"
# }
# is_stateful: true
# }
# op {
# name: "MapClear"
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "memory_limit"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "dtypes"
# type: "list(type)"
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "MapIncompleteSize"
# output_arg {
# name: "size"
# type: DT_INT32
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "memory_limit"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "dtypes"
# type: "list(type)"
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "MapPeek"
# input_arg {
# name: "key"
# type: DT_INT64
# }
# input_arg {
# name: "indices"
# type: DT_INT32
# }
# output_arg {
# name: "values"
# type_list_attr: "dtypes"
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "memory_limit"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "dtypes"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "MapSize"
# output_arg {
# name: "size"
# type: DT_INT32
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "memory_limit"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "dtypes"
# type: "list(type)"
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "MapStage"
# input_arg {
# name: "key"
# type: DT_INT64
# }
# input_arg {
# name: "indices"
# type: DT_INT32
# }
# input_arg {
# name: "values"
# type_list_attr: "fake_dtypes"
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "memory_limit"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "dtypes"
# type: "list(type)"
# }
# attr {
# name: "fake_dtypes"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "MapUnstage"
# input_arg {
# name: "key"
# type: DT_INT64
# }
# input_arg {
# name: "indices"
# type: DT_INT32
# }
# output_arg {
# name: "values"
# type_list_attr: "dtypes"
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "memory_limit"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "dtypes"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "MapUnstageNoKey"
# input_arg {
# name: "indices"
# type: DT_INT32
# }
# output_arg {
# name: "key"
# type: DT_INT64
# }
# output_arg {
# name: "values"
# type_list_attr: "dtypes"
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "memory_limit"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "dtypes"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "OrderedMapClear"
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "memory_limit"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "dtypes"
# type: "list(type)"
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "OrderedMapIncompleteSize"
# output_arg {
# name: "size"
# type: DT_INT32
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "memory_limit"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "dtypes"
# type: "list(type)"
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "OrderedMapPeek"
# input_arg {
# name: "key"
# type: DT_INT64
# }
# input_arg {
# name: "indices"
# type: DT_INT32
# }
# output_arg {
# name: "values"
# type_list_attr: "dtypes"
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "memory_limit"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "dtypes"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "OrderedMapSize"
# output_arg {
# name: "size"
# type: DT_INT32
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "memory_limit"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "dtypes"
# type: "list(type)"
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "OrderedMapStage"
# input_arg {
# name: "key"
# type: DT_INT64
# }
# input_arg {
# name: "indices"
# type: DT_INT32
# }
# input_arg {
# name: "values"
# type_list_attr: "fake_dtypes"
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "memory_limit"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "dtypes"
# type: "list(type)"
# }
# attr {
# name: "fake_dtypes"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "OrderedMapUnstage"
# input_arg {
# name: "key"
# type: DT_INT64
# }
# input_arg {
# name: "indices"
# type: DT_INT32
# }
# output_arg {
# name: "values"
# type_list_attr: "dtypes"
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "memory_limit"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "dtypes"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "OrderedMapUnstageNoKey"
# input_arg {
# name: "indices"
# type: DT_INT32
# }
# output_arg {
# name: "key"
# type: DT_INT64
# }
# output_arg {
# name: "values"
# type_list_attr: "dtypes"
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "memory_limit"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "dtypes"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "PaddingFIFOQueue"
# output_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# attr {
# name: "component_types"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "shapes"
# type: "list(shape)"
# default_value {
# list {
# }
# }
# has_minimum: true
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: -1
# }
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "PaddingFIFOQueueV2"
# output_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# attr {
# name: "component_types"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "shapes"
# type: "list(shape)"
# default_value {
# list {
# }
# }
# has_minimum: true
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: -1
# }
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "ParallelDynamicStitch"
# input_arg {
# name: "indices"
# type: DT_INT32
# number_attr: "N"
# }
# input_arg {
# name: "data"
# type_attr: "T"
# number_attr: "N"
# }
# output_arg {
# name: "merged"
# type_attr: "T"
# }
# attr {
# name: "N"
# type: "int"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "T"
# type: "type"
# }
# }
# op {
# name: "PriorityQueue"
# output_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# attr {
# name: "component_types"
# type: "list(type)"
# default_value {
# list {
# }
# }
# has_minimum: true
# }
# attr {
# name: "shapes"
# type: "list(shape)"
# has_minimum: true
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: -1
# }
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "PriorityQueueV2"
# output_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# attr {
# name: "component_types"
# type: "list(type)"
# default_value {
# list {
# }
# }
# has_minimum: true
# }
# attr {
# name: "shapes"
# type: "list(shape)"
# has_minimum: true
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: -1
# }
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "QueueClose"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# attr {
# name: "cancel_pending_enqueues"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "QueueCloseV2"
# input_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# attr {
# name: "cancel_pending_enqueues"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "QueueDequeue"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# output_arg {
# name: "components"
# type_list_attr: "component_types"
# }
# attr {
# name: "component_types"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "timeout_ms"
# type: "int"
# default_value {
# i: -1
# }
# }
# }
# op {
# name: "QueueDequeueMany"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# input_arg {
# name: "n"
# type: DT_INT32
# }
# output_arg {
# name: "components"
# type_list_attr: "component_types"
# }
# attr {
# name: "component_types"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "timeout_ms"
# type: "int"
# default_value {
# i: -1
# }
# }
# }
# op {
# name: "QueueDequeueManyV2"
# input_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# input_arg {
# name: "n"
# type: DT_INT32
# }
# output_arg {
# name: "components"
# type_list_attr: "component_types"
# }
# attr {
# name: "component_types"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "timeout_ms"
# type: "int"
# default_value {
# i: -1
# }
# }
# is_stateful: true
# }
# op {
# name: "QueueDequeueUpTo"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# input_arg {
# name: "n"
# type: DT_INT32
# }
# output_arg {
# name: "components"
# type_list_attr: "component_types"
# }
# attr {
# name: "component_types"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "timeout_ms"
# type: "int"
# default_value {
# i: -1
# }
# }
# }
# op {
# name: "QueueDequeueUpToV2"
# input_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# input_arg {
# name: "n"
# type: DT_INT32
# }
# output_arg {
# name: "components"
# type_list_attr: "component_types"
# }
# attr {
# name: "component_types"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "timeout_ms"
# type: "int"
# default_value {
# i: -1
# }
# }
# is_stateful: true
# }
# op {
# name: "QueueDequeueV2"
# input_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# output_arg {
# name: "components"
# type_list_attr: "component_types"
# }
# attr {
# name: "component_types"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "timeout_ms"
# type: "int"
# default_value {
# i: -1
# }
# }
# is_stateful: true
# }
# op {
# name: "QueueEnqueue"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# input_arg {
# name: "components"
# type_list_attr: "Tcomponents"
# }
# attr {
# name: "Tcomponents"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "timeout_ms"
# type: "int"
# default_value {
# i: -1
# }
# }
# }
# op {
# name: "QueueEnqueueMany"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# input_arg {
# name: "components"
# type_list_attr: "Tcomponents"
# }
# attr {
# name: "Tcomponents"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "timeout_ms"
# type: "int"
# default_value {
# i: -1
# }
# }
# }
# op {
# name: "QueueEnqueueManyV2"
# input_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# input_arg {
# name: "components"
# type_list_attr: "Tcomponents"
# }
# attr {
# name: "Tcomponents"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "timeout_ms"
# type: "int"
# default_value {
# i: -1
# }
# }
# is_stateful: true
# }
# op {
# name: "QueueEnqueueV2"
# input_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# input_arg {
# name: "components"
# type_list_attr: "Tcomponents"
# }
# attr {
# name: "Tcomponents"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "timeout_ms"
# type: "int"
# default_value {
# i: -1
# }
# }
# is_stateful: true
# }
# op {
# name: "QueueIsClosed"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# output_arg {
# name: "is_closed"
# type: DT_BOOL
# }
# }
# op {
# name: "QueueIsClosedV2"
# input_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# output_arg {
# name: "is_closed"
# type: DT_BOOL
# }
# is_stateful: true
# }
# op {
# name: "QueueSize"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# output_arg {
# name: "size"
# type: DT_INT32
# }
# }
# op {
# name: "QueueSizeV2"
# input_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# output_arg {
# name: "size"
# type: DT_INT32
# }
# is_stateful: true
# }
# op {
# name: "RandomShuffleQueue"
# output_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# attr {
# name: "component_types"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "shapes"
# type: "list(shape)"
# default_value {
# list {
# }
# }
# has_minimum: true
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: -1
# }
# }
# attr {
# name: "min_after_dequeue"
# type: "int"
# default_value {
# i: 0
# }
# }
# attr {
# name: "seed"
# type: "int"
# default_value {
# i: 0
# }
# }
# attr {
# name: "seed2"
# type: "int"
# default_value {
# i: 0
# }
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "RandomShuffleQueueV2"
# output_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# attr {
# name: "component_types"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "shapes"
# type: "list(shape)"
# default_value {
# list {
# }
# }
# has_minimum: true
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: -1
# }
# }
# attr {
# name: "min_after_dequeue"
# type: "int"
# default_value {
# i: 0
# }
# }
# attr {
# name: "seed"
# type: "int"
# default_value {
# i: 0
# }
# }
# attr {
# name: "seed2"
# type: "int"
# default_value {
# i: 0
# }
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "RecordInput"
# output_arg {
# name: "records"
# type: DT_STRING
# }
# attr {
# name: "file_pattern"
# type: "string"
# }
# attr {
# name: "file_random_seed"
# type: "int"
# default_value {
# i: 301
# }
# }
# attr {
# name: "file_shuffle_shift_ratio"
# type: "float"
# default_value {
# f: 0
# }
# }
# attr {
# name: "file_buffer_size"
# type: "int"
# default_value {
# i: 10000
# }
# }
# attr {
# name: "file_parallelism"
# type: "int"
# default_value {
# i: 16
# }
# }
# attr {
# name: "batch_size"
# type: "int"
# default_value {
# i: 32
# }
# }
# attr {
# name: "compression_type"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "SparseAccumulatorApplyGradient"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# input_arg {
# name: "local_step"
# type: DT_INT64
# }
# input_arg {
# name: "gradient_indices"
# type: DT_INT64
# }
# input_arg {
# name: "gradient_values"
# type_attr: "dtype"
# }
# input_arg {
# name: "gradient_shape"
# type: DT_INT64
# }
# attr {
# name: "dtype"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "has_known_shape"
# type: "bool"
# }
# }
# op {
# name: "SparseAccumulatorTakeGradient"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# input_arg {
# name: "num_required"
# type: DT_INT32
# }
# output_arg {
# name: "indices"
# type: DT_INT64
# }
# output_arg {
# name: "values"
# type_attr: "dtype"
# }
# output_arg {
# name: "shape"
# type: DT_INT64
# }
# attr {
# name: "dtype"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "SparseConditionalAccumulator"
# output_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# attr {
# name: "dtype"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "shape"
# type: "shape"
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "reduction_type"
# type: "string"
# default_value {
# s: "MEAN"
# }
# allowed_values {
# list {
# s: "MEAN"
# s: "SUM"
# }
# }
# }
# is_stateful: true
# }
# op {
# name: "Stack"
# output_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# attr {
# name: "elem_type"
# type: "type"
# }
# attr {
# name: "stack_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "StackClose"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# }
# op {
# name: "StackCloseV2"
# input_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# is_stateful: true
# }
# op {
# name: "StackPop"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# output_arg {
# name: "elem"
# type_attr: "elem_type"
# }
# attr {
# name: "elem_type"
# type: "type"
# }
# }
# op {
# name: "StackPopV2"
# input_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# output_arg {
# name: "elem"
# type_attr: "elem_type"
# }
# attr {
# name: "elem_type"
# type: "type"
# }
# is_stateful: true
# }
# op {
# name: "StackPush"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# input_arg {
# name: "elem"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "swap_memory"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "StackPushV2"
# input_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# input_arg {
# name: "elem"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "swap_memory"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "StackV2"
# input_arg {
# name: "max_size"
# type: DT_INT32
# }
# output_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# attr {
# name: "elem_type"
# type: "type"
# }
# attr {
# name: "stack_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "Stage"
# input_arg {
# name: "values"
# type_list_attr: "dtypes"
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "memory_limit"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "dtypes"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "StageClear"
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "memory_limit"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "dtypes"
# type: "list(type)"
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "StagePeek"
# input_arg {
# name: "index"
# type: DT_INT32
# }
# output_arg {
# name: "values"
# type_list_attr: "dtypes"
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "memory_limit"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "dtypes"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "StageSize"
# output_arg {
# name: "size"
# type: DT_INT32
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "memory_limit"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "dtypes"
# type: "list(type)"
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "TensorArray"
# input_arg {
# name: "size"
# type: DT_INT32
# }
# output_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# attr {
# name: "dtype"
# type: "type"
# }
# attr {
# name: "dynamic_size"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "clear_after_read"
# type: "bool"
# default_value {
# b: true
# }
# }
# attr {
# name: "tensor_array_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "element_shape"
# type: "shape"
# default_value {
# shape {
# unknown_rank: true
# }
# }
# }
# deprecation {
# version: 16
# explanation: "Use TensorArrayV3"
# }
# is_stateful: true
# }
# op {
# name: "TensorArrayClose"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# deprecation {
# version: 16
# explanation: "Use TensorArrayCloseV3"
# }
# }
# op {
# name: "TensorArrayCloseV2"
# input_arg {
# name: "handle"
# type: DT_STRING
# }
# deprecation {
# version: 26
# explanation: "Use TensorArrayCloseV3"
# }
# }
# op {
# name: "TensorArrayCloseV3"
# input_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# is_stateful: true
# }
# op {
# name: "TensorArrayConcat"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "value"
# type_attr: "dtype"
# }
# output_arg {
# name: "lengths"
# type: DT_INT64
# }
# attr {
# name: "dtype"
# type: "type"
# }
# attr {
# name: "element_shape_except0"
# type: "shape"
# default_value {
# shape {
# unknown_rank: true
# }
# }
# }
# deprecation {
# version: 16
# explanation: "Use TensorArrayGradV3"
# }
# }
# op {
# name: "TensorArrayConcatV2"
# input_arg {
# name: "handle"
# type: DT_STRING
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "value"
# type_attr: "dtype"
# }
# output_arg {
# name: "lengths"
# type: DT_INT64
# }
# attr {
# name: "dtype"
# type: "type"
# }
# attr {
# name: "element_shape_except0"
# type: "shape"
# default_value {
# shape {
# unknown_rank: true
# }
# }
# }
# }
# op {
# name: "TensorArrayConcatV3"
# input_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "value"
# type_attr: "dtype"
# }
# output_arg {
# name: "lengths"
# type: DT_INT64
# }
# attr {
# name: "dtype"
# type: "type"
# }
# attr {
# name: "element_shape_except0"
# type: "shape"
# default_value {
# shape {
# unknown_rank: true
# }
# }
# }
# is_stateful: true
# }
# op {
# name: "TensorArrayGather"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# input_arg {
# name: "indices"
# type: DT_INT32
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "value"
# type_attr: "dtype"
# }
# attr {
# name: "dtype"
# type: "type"
# }
# attr {
# name: "element_shape"
# type: "shape"
# default_value {
# shape {
# unknown_rank: true
# }
# }
# }
# deprecation {
# version: 16
# explanation: "Use TensorArrayGatherV3"
# }
# }
# op {
# name: "TensorArrayGatherV2"
# input_arg {
# name: "handle"
# type: DT_STRING
# }
# input_arg {
# name: "indices"
# type: DT_INT32
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "value"
# type_attr: "dtype"
# }
# attr {
# name: "dtype"
# type: "type"
# }
# attr {
# name: "element_shape"
# type: "shape"
# default_value {
# shape {
# unknown_rank: true
# }
# }
# }
# deprecation {
# version: 26
# explanation: "Use TensorArrayGatherV3"
# }
# }
# op {
# name: "TensorArrayGatherV3"
# input_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# input_arg {
# name: "indices"
# type: DT_INT32
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "value"
# type_attr: "dtype"
# }
# attr {
# name: "dtype"
# type: "type"
# }
# attr {
# name: "element_shape"
# type: "shape"
# default_value {
# shape {
# unknown_rank: true
# }
# }
# }
# is_stateful: true
# }
# op {
# name: "TensorArrayGrad"
# input_arg {
# name: "handle"
# type: DT_STRING
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "grad_handle"
# type: DT_STRING
# is_ref: true
# }
# attr {
# name: "source"
# type: "string"
# }
# deprecation {
# version: 16
# explanation: "Use TensorArrayGradV3"
# }
# is_stateful: true
# }
# op {
# name: "TensorArrayGradV2"
# input_arg {
# name: "handle"
# type: DT_STRING
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "grad_handle"
# type: DT_STRING
# }
# attr {
# name: "source"
# type: "string"
# }
# deprecation {
# version: 26
# explanation: "Use TensorArrayGradV3"
# }
# is_stateful: true
# }
# op {
# name: "TensorArrayGradV3"
# input_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "grad_handle"
# type: DT_RESOURCE
# }
# output_arg {
# name: "flow_out"
# type: DT_FLOAT
# }
# attr {
# name: "source"
# type: "string"
# }
# is_stateful: true
# }
# op {
# name: "TensorArrayGradWithShape"
# input_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# input_arg {
# name: "shape_to_prepend"
# type: DT_INT32
# }
# output_arg {
# name: "grad_handle"
# type: DT_RESOURCE
# }
# output_arg {
# name: "flow_out"
# type: DT_FLOAT
# }
# attr {
# name: "source"
# type: "string"
# }
# is_stateful: true
# }
# op {
# name: "TensorArrayPack"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "value"
# type_attr: "dtype"
# }
# attr {
# name: "dtype"
# type: "type"
# }
# attr {
# name: "element_shape"
# type: "shape"
# default_value {
# shape {
# unknown_rank: true
# }
# }
# }
# deprecation {
# version: 16
# explanation: "Use TensorArrayGatherV3 with RangeOp"
# }
# }
# op {
# name: "TensorArrayRead"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# input_arg {
# name: "index"
# type: DT_INT32
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "value"
# type_attr: "dtype"
# }
# attr {
# name: "dtype"
# type: "type"
# }
# deprecation {
# version: 16
# explanation: "Use TensorArrayReadV3"
# }
# }
# op {
# name: "TensorArrayReadV2"
# input_arg {
# name: "handle"
# type: DT_STRING
# }
# input_arg {
# name: "index"
# type: DT_INT32
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "value"
# type_attr: "dtype"
# }
# attr {
# name: "dtype"
# type: "type"
# }
# deprecation {
# version: 26
# explanation: "Use TensorArrayReadV3"
# }
# }
# op {
# name: "TensorArrayReadV3"
# input_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# input_arg {
# name: "index"
# type: DT_INT32
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "value"
# type_attr: "dtype"
# }
# attr {
# name: "dtype"
# type: "type"
# }
# is_stateful: true
# }
# op {
# name: "TensorArrayScatter"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# input_arg {
# name: "indices"
# type: DT_INT32
# }
# input_arg {
# name: "value"
# type_attr: "T"
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "flow_out"
# type: DT_FLOAT
# }
# attr {
# name: "T"
# type: "type"
# }
# deprecation {
# version: 19
# explanation: "Use TensorArrayGradV3"
# }
# }
# op {
# name: "TensorArrayScatterV2"
# input_arg {
# name: "handle"
# type: DT_STRING
# }
# input_arg {
# name: "indices"
# type: DT_INT32
# }
# input_arg {
# name: "value"
# type_attr: "T"
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "flow_out"
# type: DT_FLOAT
# }
# attr {
# name: "T"
# type: "type"
# }
# deprecation {
# version: 26
# explanation: "Use TensorArrayScatterV3"
# }
# }
# op {
# name: "TensorArrayScatterV3"
# input_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# input_arg {
# name: "indices"
# type: DT_INT32
# }
# input_arg {
# name: "value"
# type_attr: "T"
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "flow_out"
# type: DT_FLOAT
# }
# attr {
# name: "T"
# type: "type"
# }
# is_stateful: true
# }
# op {
# name: "TensorArraySize"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "size"
# type: DT_INT32
# }
# deprecation {
# version: 16
# explanation: "Use TensorArraySizeV3"
# }
# }
# op {
# name: "TensorArraySizeV2"
# input_arg {
# name: "handle"
# type: DT_STRING
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "size"
# type: DT_INT32
# }
# deprecation {
# version: 26
# explanation: "Use TensorArraySizeV3"
# }
# }
# op {
# name: "TensorArraySizeV3"
# input_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "size"
# type: DT_INT32
# }
# is_stateful: true
# }
# op {
# name: "TensorArraySplit"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# input_arg {
# name: "value"
# type_attr: "T"
# }
# input_arg {
# name: "lengths"
# type: DT_INT64
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "flow_out"
# type: DT_FLOAT
# }
# attr {
# name: "T"
# type: "type"
# }
# deprecation {
# version: 16
# explanation: "Use TensorArraySplitV3"
# }
# }
# op {
# name: "TensorArraySplitV2"
# input_arg {
# name: "handle"
# type: DT_STRING
# }
# input_arg {
# name: "value"
# type_attr: "T"
# }
# input_arg {
# name: "lengths"
# type: DT_INT64
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "flow_out"
# type: DT_FLOAT
# }
# attr {
# name: "T"
# type: "type"
# }
# deprecation {
# version: 26
# explanation: "Use TensorArraySplitV3"
# }
# }
# op {
# name: "TensorArraySplitV3"
# input_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# input_arg {
# name: "value"
# type_attr: "T"
# }
# input_arg {
# name: "lengths"
# type: DT_INT64
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "flow_out"
# type: DT_FLOAT
# }
# attr {
# name: "T"
# type: "type"
# }
# is_stateful: true
# }
# op {
# name: "TensorArrayUnpack"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# input_arg {
# name: "value"
# type_attr: "T"
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "flow_out"
# type: DT_FLOAT
# }
# attr {
# name: "T"
# type: "type"
# }
# deprecation {
# version: 20
# explanation: "Use TensorArrayScatterV3 with RangeOp"
# }
# }
# op {
# name: "TensorArrayV2"
# input_arg {
# name: "size"
# type: DT_INT32
# }
# output_arg {
# name: "handle"
# type: DT_STRING
# }
# attr {
# name: "dtype"
# type: "type"
# }
# attr {
# name: "element_shape"
# type: "shape"
# default_value {
# shape {
# unknown_rank: true
# }
# }
# }
# attr {
# name: "dynamic_size"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "clear_after_read"
# type: "bool"
# default_value {
# b: true
# }
# }
# attr {
# name: "tensor_array_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# deprecation {
# version: 26
# explanation: "Use TensorArrayV3"
# }
# is_stateful: true
# }
# op {
# name: "TensorArrayV3"
# input_arg {
# name: "size"
# type: DT_INT32
# }
# output_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# output_arg {
# name: "flow"
# type: DT_FLOAT
# }
# attr {
# name: "dtype"
# type: "type"
# }
# attr {
# name: "element_shape"
# type: "shape"
# default_value {
# shape {
# unknown_rank: true
# }
# }
# }
# attr {
# name: "dynamic_size"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "clear_after_read"
# type: "bool"
# default_value {
# b: true
# }
# }
# attr {
# name: "identical_element_shapes"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "tensor_array_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
# op {
# name: "TensorArrayWrite"
# input_arg {
# name: "handle"
# type: DT_STRING
# is_ref: true
# }
# input_arg {
# name: "index"
# type: DT_INT32
# }
# input_arg {
# name: "value"
# type_attr: "T"
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "flow_out"
# type: DT_FLOAT
# }
# attr {
# name: "T"
# type: "type"
# }
# deprecation {
# version: 16
# explanation: "Use TensorArrayWriteV3"
# }
# }
# op {
# name: "TensorArrayWriteV2"
# input_arg {
# name: "handle"
# type: DT_STRING
# }
# input_arg {
# name: "index"
# type: DT_INT32
# }
# input_arg {
# name: "value"
# type_attr: "T"
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "flow_out"
# type: DT_FLOAT
# }
# attr {
# name: "T"
# type: "type"
# }
# deprecation {
# version: 26
# explanation: "Use TensorArrayWriteV3"
# }
# }
# op {
# name: "TensorArrayWriteV3"
# input_arg {
# name: "handle"
# type: DT_RESOURCE
# }
# input_arg {
# name: "index"
# type: DT_INT32
# }
# input_arg {
# name: "value"
# type_attr: "T"
# }
# input_arg {
# name: "flow_in"
# type: DT_FLOAT
# }
# output_arg {
# name: "flow_out"
# type: DT_FLOAT
# }
# attr {
# name: "T"
# type: "type"
# }
# is_stateful: true
# }
# op {
# name: "Unstage"
# output_arg {
# name: "values"
# type_list_attr: "dtypes"
# }
# attr {
# name: "capacity"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "memory_limit"
# type: "int"
# default_value {
# i: 0
# }
# has_minimum: true
# }
# attr {
# name: "dtypes"
# type: "list(type)"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "container"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "shared_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# is_stateful: true
# }
_op_def_lib = _InitOpDefLibrary(b"\nr\n\030AccumulatorApplyGradient\022\r\n\006handle\030\007\200\001\001\022\016\n\nlocal_step\030\t\022\021\n\010gradient\"\005dtype\"$\n\005dtype\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\n?\n\031AccumulatorNumAccumulated\022\r\n\006handle\030\007\200\001\001\032\023\n\017num_accumulated\030\003\n>\n\030AccumulatorSetGlobalStep\022\r\n\006handle\030\007\200\001\001\022\023\n\017new_global_step\030\t\nr\n\027AccumulatorTakeGradient\022\r\n\006handle\030\007\200\001\001\022\020\n\014num_required\030\003\032\020\n\007average\"\005dtype\"$\n\005dtype\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\n\255\001\n\007Barrier\032\r\n\006handle\030\007\200\001\001\"!\n\017component_types\022\nlist(type)(\0010\001\"\033\n\006shapes\022\013list(shape)\032\002\n\000(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\nB\n\014BarrierClose\022\r\n\006handle\030\007\200\001\001\"#\n\027cancel_pending_enqueues\022\004bool\032\002(\000\n0\n\025BarrierIncompleteSize\022\r\n\006handle\030\007\200\001\001\032\010\n\004size\030\003\n\\\n\021BarrierInsertMany\022\r\n\006handle\030\007\200\001\001\022\010\n\004keys\030\007\022\013\n\006values\"\001T\"\t\n\001T\022\004type\"\026\n\017component_index\022\003int\n+\n\020BarrierReadySize\022\r\n\006handle\030\007\200\001\001\032\010\n\004size\030\003\n\347\001\n\017BarrierTakeMany\022\r\n\006handle\030\007\200\001\001\022\020\n\014num_elements\030\003\032\013\n\007indices\030\t\032\010\n\004keys\030\007\032\031\n\006values2\017component_types\"!\n\017component_types\022\nlist(type)(\0010\001\"\035\n\021allow_small_batch\022\004bool\032\002(\000\"\037\n\023wait_for_incomplete\022\004bool\032\002(\000\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\n\305\001\n\026ConditionalAccumulator\032\r\n\006handle\030\007\200\001\001\"$\n\005dtype\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\016\n\005shape\022\005shape\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\"/\n\016reduction_type\022\006string\032\006\022\004MEAN:\r\n\013\022\004MEAN\022\003SUM\210\001\001\n$\n\023DeleteSessionTensor\022\n\n\006handle\030\007\210\001\001\nq\n\020DynamicPartition\022\t\n\004data\"\001T\022\016\n\npartitions\030\003\032\034\n\007outputs\"\001T*\016num_partitions\"\031\n\016num_partitions\022\003int(\0010\001\"\t\n\001T\022\004type\nS\n\rDynamicStitch\022\016\n\007indices\030\003*\001N\022\014\n\004data\"\001T*\001N\032\013\n\006merged\"\001T\"\014\n\001N\022\003int(\0010\001\"\t\n\001T\022\004type\n\257\001\n\tFIFOQueue\032\r\n\006handle\030\007\200\001\001\"!\n\017component_types\022\nlist(type)(\0010\001\"\033\n\006shapes\022\013list(shape)\032\002\n\000(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\256\001\n\013FIFOQueueV2\032\n\n\006handle\030\024\"!\n\017component_types\022\nlist(type)(\0010\001\"\033\n\006shapes\022\013list(shape)\032\002\n\000(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n+\n\tFakeQueue\022\014\n\010resource\030\024\032\r\n\006handle\030\007\200\001\001\210\001\001\n8\n\020GetSessionHandle\022\n\n\005value\"\001T\032\n\n\006handle\030\007\"\t\n\001T\022\004type\210\001\001\n:\n\022GetSessionHandleV2\022\n\n\005value\"\001T\032\n\n\006handle\030\024\"\t\n\001T\022\004type\210\001\001\n@\n\020GetSessionTensor\022\n\n\006handle\030\007\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004type\210\001\001\n\211\001\n\010MapClear\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\234\001\n\021MapIncompleteSize\032\010\n\004size\030\003\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\264\001\n\007MapPeek\022\007\n\003key\030\t\022\013\n\007indices\030\003\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\222\001\n\007MapSize\032\010\n\004size\030\003\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\325\001\n\010MapStage\022\007\n\003key\030\t\022\013\n\007indices\030\003\022\025\n\006values2\013fake_dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\035\n\013fake_dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\267\001\n\nMapUnstage\022\007\n\003key\030\t\022\013\n\007indices\030\003\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\274\001\n\017MapUnstageNoKey\022\013\n\007indices\030\003\032\007\n\003key\030\t\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\220\001\n\017OrderedMapClear\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\243\001\n\030OrderedMapIncompleteSize\032\010\n\004size\030\003\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\273\001\n\016OrderedMapPeek\022\007\n\003key\030\t\022\013\n\007indices\030\003\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\231\001\n\016OrderedMapSize\032\010\n\004size\030\003\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\334\001\n\017OrderedMapStage\022\007\n\003key\030\t\022\013\n\007indices\030\003\022\025\n\006values2\013fake_dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\035\n\013fake_dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\276\001\n\021OrderedMapUnstage\022\007\n\003key\030\t\022\013\n\007indices\030\003\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\303\001\n\026OrderedMapUnstageNoKey\022\013\n\007indices\030\003\032\007\n\003key\030\t\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\266\001\n\020PaddingFIFOQueue\032\r\n\006handle\030\007\200\001\001\"!\n\017component_types\022\nlist(type)(\0010\001\"\033\n\006shapes\022\013list(shape)\032\002\n\000(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\265\001\n\022PaddingFIFOQueueV2\032\n\n\006handle\030\024\"!\n\017component_types\022\nlist(type)(\0010\001\"\033\n\006shapes\022\013list(shape)\032\002\n\000(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n[\n\025ParallelDynamicStitch\022\016\n\007indices\030\003*\001N\022\014\n\004data\"\001T*\001N\032\013\n\006merged\"\001T\"\014\n\001N\022\003int(\0010\001\"\t\n\001T\022\004type\n\261\001\n\rPriorityQueue\032\r\n\006handle\030\007\200\001\001\"#\n\017component_types\022\nlist(type)\032\002\n\000(\001\"\027\n\006shapes\022\013list(shape)(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\260\001\n\017PriorityQueueV2\032\n\n\006handle\030\024\"#\n\017component_types\022\nlist(type)\032\002\n\000(\001\"\027\n\006shapes\022\013list(shape)(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n@\n\nQueueClose\022\r\n\006handle\030\007\200\001\001\"#\n\027cancel_pending_enqueues\022\004bool\032\002(\000\nB\n\014QueueCloseV2\022\n\n\006handle\030\024\"#\n\027cancel_pending_enqueues\022\004bool\032\002(\000\210\001\001\n\177\n\014QueueDequeue\022\r\n\006handle\030\007\200\001\001\032\035\n\ncomponents2\017component_types\"!\n\017component_types\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\n\212\001\n\020QueueDequeueMany\022\r\n\006handle\030\007\200\001\001\022\005\n\001n\030\003\032\035\n\ncomponents2\017component_types\"!\n\017component_types\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\n\214\001\n\022QueueDequeueManyV2\022\n\n\006handle\030\024\022\005\n\001n\030\003\032\035\n\ncomponents2\017component_types\"!\n\017component_types\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\210\001\001\n\212\001\n\020QueueDequeueUpTo\022\r\n\006handle\030\007\200\001\001\022\005\n\001n\030\003\032\035\n\ncomponents2\017component_types\"!\n\017component_types\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\n\214\001\n\022QueueDequeueUpToV2\022\n\n\006handle\030\024\022\005\n\001n\030\003\032\035\n\ncomponents2\017component_types\"!\n\017component_types\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\210\001\001\n\201\001\n\016QueueDequeueV2\022\n\n\006handle\030\024\032\035\n\ncomponents2\017component_types\"!\n\017component_types\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\210\001\001\nw\n\014QueueEnqueue\022\r\n\006handle\030\007\200\001\001\022\031\n\ncomponents2\013Tcomponents\"\035\n\013Tcomponents\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\n{\n\020QueueEnqueueMany\022\r\n\006handle\030\007\200\001\001\022\031\n\ncomponents2\013Tcomponents\"\035\n\013Tcomponents\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\n}\n\022QueueEnqueueManyV2\022\n\n\006handle\030\024\022\031\n\ncomponents2\013Tcomponents\"\035\n\013Tcomponents\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\210\001\001\ny\n\016QueueEnqueueV2\022\n\n\006handle\030\024\022\031\n\ncomponents2\013Tcomponents\"\035\n\013Tcomponents\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\210\001\001\n-\n\rQueueIsClosed\022\r\n\006handle\030\007\200\001\001\032\r\n\tis_closed\030\n\n/\n\017QueueIsClosedV2\022\n\n\006handle\030\024\032\r\n\tis_closed\030\n\210\001\001\n$\n\tQueueSize\022\r\n\006handle\030\007\200\001\001\032\010\n\004size\030\003\n&\n\013QueueSizeV2\022\n\n\006handle\030\024\032\010\n\004size\030\003\210\001\001\n\371\001\n\022RandomShuffleQueue\032\r\n\006handle\030\007\200\001\001\"!\n\017component_types\022\nlist(type)(\0010\001\"\033\n\006shapes\022\013list(shape)\032\002\n\000(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\034\n\021min_after_dequeue\022\003int\032\002\030\000\"\017\n\004seed\022\003int\032\002\030\000\"\020\n\005seed2\022\003int\032\002\030\000\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\370\001\n\024RandomShuffleQueueV2\032\n\n\006handle\030\024\"!\n\017component_types\022\nlist(type)(\0010\001\"\033\n\006shapes\022\013list(shape)\032\002\n\000(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\034\n\021min_after_dequeue\022\003int\032\002\030\000\"\017\n\004seed\022\003int\032\002\030\000\"\020\n\005seed2\022\003int\032\002\030\000\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\357\001\n\013RecordInput\032\013\n\007records\030\007\"\026\n\014file_pattern\022\006string\"\034\n\020file_random_seed\022\003int\032\003\030\255\002\"(\n\030file_shuffle_shift_ratio\022\005float\032\005%\000\000\000\000\"\034\n\020file_buffer_size\022\003int\032\003\030\220N\"\033\n\020file_parallelism\022\003int\032\002\030\020\"\025\n\nbatch_size\022\003int\032\002\030 \"\036\n\020compression_type\022\006string\032\002\022\000\210\001\001\n\302\001\n\036SparseAccumulatorApplyGradient\022\r\n\006handle\030\007\200\001\001\022\016\n\nlocal_step\030\t\022\024\n\020gradient_indices\030\t\022\030\n\017gradient_values\"\005dtype\022\022\n\016gradient_shape\030\t\"$\n\005dtype\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\017has_known_shape\022\004bool\n\217\001\n\035SparseAccumulatorTakeGradient\022\r\n\006handle\030\007\200\001\001\022\020\n\014num_required\030\003\032\013\n\007indices\030\t\032\017\n\006values\"\005dtype\032\t\n\005shape\030\t\"$\n\005dtype\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\n\313\001\n\034SparseConditionalAccumulator\032\r\n\006handle\030\007\200\001\001\"$\n\005dtype\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\016\n\005shape\022\005shape\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\"/\n\016reduction_type\022\006string\032\006\022\004MEAN:\r\n\013\022\004MEAN\022\003SUM\210\001\001\nF\n\005Stack\032\r\n\006handle\030\007\200\001\001\"\021\n\telem_type\022\004type\"\030\n\nstack_name\022\006string\032\002\022\000\210\001\001\n\033\n\nStackClose\022\r\n\006handle\030\007\200\001\001\n\035\n\014StackCloseV2\022\n\n\006handle\030\024\210\001\001\n?\n\010StackPop\022\r\n\006handle\030\007\200\001\001\032\021\n\004elem\"\telem_type\"\021\n\telem_type\022\004type\nA\n\nStackPopV2\022\n\n\006handle\030\024\032\021\n\004elem\"\telem_type\"\021\n\telem_type\022\004type\210\001\001\nV\n\tStackPush\022\r\n\006handle\030\007\200\001\001\022\t\n\004elem\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\027\n\013swap_memory\022\004bool\032\002(\000\nX\n\013StackPushV2\022\n\n\006handle\030\024\022\t\n\004elem\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\027\n\013swap_memory\022\004bool\032\002(\000\210\001\001\nS\n\007StackV2\022\014\n\010max_size\030\003\032\n\n\006handle\030\024\"\021\n\telem_type\022\004type\"\030\n\nstack_name\022\006string\032\002\022\000\210\001\001\n\234\001\n\005Stage\022\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\213\001\n\nStageClear\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\253\001\n\tStagePeek\022\t\n\005index\030\003\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\224\001\n\tStageSize\032\010\n\004size\030\003\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\306\001\n\013TensorArray\022\010\n\004size\030\003\032\r\n\006handle\030\007\200\001\001\"\r\n\005dtype\022\004type\"\030\n\014dynamic_size\022\004bool\032\002(\000\"\034\n\020clear_after_read\022\004bool\032\002(\001\"\037\n\021tensor_array_name\022\006string\032\002\022\000\"\034\n\relement_shape\022\005shape\032\004:\002\030\001B\025\010\020\022\021Use TensorArrayV3\210\001\001\n=\n\020TensorArrayClose\022\r\n\006handle\030\007\200\001\001B\032\010\020\022\026Use TensorArrayCloseV3\n<\n\022TensorArrayCloseV2\022\n\n\006handle\030\007B\032\010\032\022\026Use TensorArrayCloseV3\n#\n\022TensorArrayCloseV3\022\n\n\006handle\030\024\210\001\001\n\234\001\n\021TensorArrayConcat\022\r\n\006handle\030\007\200\001\001\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\032\013\n\007lengths\030\t\"\r\n\005dtype\022\004type\"$\n\025element_shape_except0\022\005shape\032\004:\002\030\001B\031\010\020\022\025Use TensorArrayGradV3\n\200\001\n\023TensorArrayConcatV2\022\n\n\006handle\030\007\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\032\013\n\007lengths\030\t\"\r\n\005dtype\022\004type\"$\n\025element_shape_except0\022\005shape\032\004:\002\030\001\n\203\001\n\023TensorArrayConcatV3\022\n\n\006handle\030\024\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\032\013\n\007lengths\030\t\"\r\n\005dtype\022\004type\"$\n\025element_shape_except0\022\005shape\032\004:\002\030\001\210\001\001\n\226\001\n\021TensorArrayGather\022\r\n\006handle\030\007\200\001\001\022\013\n\007indices\030\003\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004type\"\034\n\relement_shape\022\005shape\032\004:\002\030\001B\033\010\020\022\027Use TensorArrayGatherV3\n\225\001\n\023TensorArrayGatherV2\022\n\n\006handle\030\007\022\013\n\007indices\030\003\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004type\"\034\n\relement_shape\022\005shape\032\004:\002\030\001B\033\010\032\022\027Use TensorArrayGatherV3\n{\n\023TensorArrayGatherV3\022\n\n\006handle\030\024\022\013\n\007indices\030\003\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004type\"\034\n\relement_shape\022\005shape\032\004:\002\030\001\210\001\001\nn\n\017TensorArrayGrad\022\n\n\006handle\030\007\022\013\n\007flow_in\030\001\032\022\n\013grad_handle\030\007\200\001\001\"\020\n\006source\022\006stringB\031\010\020\022\025Use TensorArrayGradV3\210\001\001\nm\n\021TensorArrayGradV2\022\n\n\006handle\030\007\022\013\n\007flow_in\030\001\032\017\n\013grad_handle\030\007\"\020\n\006source\022\006stringB\031\010\032\022\025Use TensorArrayGradV3\210\001\001\n`\n\021TensorArrayGradV3\022\n\n\006handle\030\024\022\013\n\007flow_in\030\001\032\017\n\013grad_handle\030\024\032\014\n\010flow_out\030\001\"\020\n\006source\022\006string\210\001\001\n}\n\030TensorArrayGradWithShape\022\n\n\006handle\030\024\022\013\n\007flow_in\030\001\022\024\n\020shape_to_prepend\030\003\032\017\n\013grad_handle\030\024\032\014\n\010flow_out\030\001\"\020\n\006source\022\006string\210\001\001\n\224\001\n\017TensorArrayPack\022\r\n\006handle\030\007\200\001\001\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004type\"\034\n\relement_shape\022\005shape\032\004:\002\030\001B(\010\020\022$Use TensorArrayGatherV3 with RangeOp\nr\n\017TensorArrayRead\022\r\n\006handle\030\007\200\001\001\022\t\n\005index\030\003\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004typeB\031\010\020\022\025Use TensorArrayReadV3\nq\n\021TensorArrayReadV2\022\n\n\006handle\030\007\022\t\n\005index\030\003\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004typeB\031\010\032\022\025Use TensorArrayReadV3\nY\n\021TensorArrayReadV3\022\n\n\006handle\030\024\022\t\n\005index\030\003\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004type\210\001\001\n}\n\022TensorArrayScatter\022\r\n\006handle\030\007\200\001\001\022\013\n\007indices\030\003\022\n\n\005value\"\001T\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004typeB\031\010\023\022\025Use TensorArrayGradV3\n\177\n\024TensorArrayScatterV2\022\n\n\006handle\030\007\022\013\n\007indices\030\003\022\n\n\005value\"\001T\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004typeB\034\010\032\022\030Use TensorArrayScatterV3\nd\n\024TensorArrayScatterV3\022\n\n\006handle\030\024\022\013\n\007indices\030\003\022\n\n\005value\"\001T\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004type\210\001\001\nR\n\017TensorArraySize\022\r\n\006handle\030\007\200\001\001\022\013\n\007flow_in\030\001\032\010\n\004size\030\003B\031\010\020\022\025Use TensorArraySizeV3\nQ\n\021TensorArraySizeV2\022\n\n\006handle\030\007\022\013\n\007flow_in\030\001\032\010\n\004size\030\003B\031\010\032\022\025Use TensorArraySizeV3\n9\n\021TensorArraySizeV3\022\n\n\006handle\030\024\022\013\n\007flow_in\030\001\032\010\n\004size\030\003\210\001\001\n|\n\020TensorArraySplit\022\r\n\006handle\030\007\200\001\001\022\n\n\005value\"\001T\022\013\n\007lengths\030\t\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004typeB\032\010\020\022\026Use TensorArraySplitV3\n{\n\022TensorArraySplitV2\022\n\n\006handle\030\007\022\n\n\005value\"\001T\022\013\n\007lengths\030\t\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004typeB\032\010\032\022\026Use TensorArraySplitV3\nb\n\022TensorArraySplitV3\022\n\n\006handle\030\024\022\n\n\005value\"\001T\022\013\n\007lengths\030\t\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004type\210\001\001\n\177\n\021TensorArrayUnpack\022\r\n\006handle\030\007\200\001\001\022\n\n\005value\"\001T\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004typeB)\010\024\022%Use TensorArrayScatterV3 with RangeOp\n\305\001\n\rTensorArrayV2\022\010\n\004size\030\003\032\n\n\006handle\030\007\"\r\n\005dtype\022\004type\"\034\n\relement_shape\022\005shape\032\004:\002\030\001\"\030\n\014dynamic_size\022\004bool\032\002(\000\"\034\n\020clear_after_read\022\004bool\032\002(\001\"\037\n\021tensor_array_name\022\006string\032\002\022\000B\025\010\032\022\021Use TensorArrayV3\210\001\001\n\336\001\n\rTensorArrayV3\022\010\n\004size\030\003\032\n\n\006handle\030\024\032\010\n\004flow\030\001\"\r\n\005dtype\022\004type\"\034\n\relement_shape\022\005shape\032\004:\002\030\001\"\030\n\014dynamic_size\022\004bool\032\002(\000\"\034\n\020clear_after_read\022\004bool\032\002(\001\"$\n\030identical_element_shapes\022\004bool\032\002(\000\"\037\n\021tensor_array_name\022\006string\032\002\022\000\210\001\001\nz\n\020TensorArrayWrite\022\r\n\006handle\030\007\200\001\001\022\t\n\005index\030\003\022\n\n\005value\"\001T\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004typeB\032\010\020\022\026Use TensorArrayWriteV3\ny\n\022TensorArrayWriteV2\022\n\n\006handle\030\007\022\t\n\005index\030\003\022\n\n\005value\"\001T\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004typeB\032\010\032\022\026Use TensorArrayWriteV3\n`\n\022TensorArrayWriteV3\022\n\n\006handle\030\024\022\t\n\005index\030\003\022\n\n\005value\"\001T\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004type\210\001\001\n\236\001\n\007Unstage\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001")
| 37.883051
| 26,160
| 0.687783
|
23561396a0461fc9edd5e1c1241adac191d652f5
| 5,287
|
py
|
Python
|
hrf/trf_g.py
|
thu-spmi/semi-EBM
|
393e3ea3566dd60c48872a5c573a335e8e802707
|
[
"Apache-2.0"
] | 2
|
2021-09-18T14:21:24.000Z
|
2021-12-20T03:39:13.000Z
|
hrf/trf_g.py
|
thu-spmi/semi-EBM
|
393e3ea3566dd60c48872a5c573a335e8e802707
|
[
"Apache-2.0"
] | null | null | null |
hrf/trf_g.py
|
thu-spmi/semi-EBM
|
393e3ea3566dd60c48872a5c573a335e8e802707
|
[
"Apache-2.0"
] | 1
|
2021-09-12T07:02:23.000Z
|
2021-09-12T07:02:23.000Z
|
from base import *
from lm import *
from . import *
from .trf import DefaultOps
from trf.nce import noise
class Config(trf.Config):
def __init__(self, data):
super().__init__(data)
del self.__dict__['pi_0']
average_len = np.sum(np.arange(self.max_len+1) * self.pi_true)
self.global_logz = average_len * (np.log(self.word_vocab_size) + np.log(self.tag_vocab_size))
def __str__(self):
s = super().__str__()
s = s.replace('hrf_', 'grf_')
return s
class TRF(trf.TRF):
def __init__(self, config, data, logdir,
device='/gpu:0', name='trf'):
super().__init__(config, data, logdir, device, name)
# self.norm_const = norm.NormOne(config)
self.norm_const = norm.Norm2(config)
self.norm_const.logz = 0
noise_config = noise.Config()
noise_config.pack_size = config.sample_batch_size
noise_config.min_len = config.min_len
noise_config.max_len = config.max_len
noise_config.beg_token = config.beg_tokens[0]
noise_config.end_token = config.end_tokens[0]
noise_config.vocab_size = config.word_vocab_size
noise_config.pi_true = config.pi_true
wod_data = data.create_data(level=0)
self.noise_sampler = noise.NoiseSamplerNgram(noise_config, wod_data, 2, is_parallel=False)
def normalize(self, logp, lengths, for_eval=True):
logp_m = logp - self.norm_const.get_logz(lengths)
return logp_m
def initialize(self):
super().initialize()
# self.phi_tag.set_params()
# self.phi_mix.set_params()
# self.phi_word.set_params()
def draw(self, n):
assert n == self.config.sample_batch_size
sample_list = []
while len(sample_list) < n:
wlist, _ = self.noise_sampler.get(None)
seq_list = [seq.Seq(x) for x in wlist]
sample_list += seq_list
with self.time_recoder.recode('write_sample'):
f = self.write_files.get('sample')
for s in sample_list:
f.write(str(s))
return sample_list
def get_sample_scaler(self, sample_list):
n = len(sample_list)
x_list = [s.x[0] for s in sample_list]
logq = self.noise_sampler.noise_logps(x_list)
logp0 = self.get_logpxs(x_list, is_norm=True) + self.norm_const.logz
approx_logz = logsumexp(logp0 - logq - np.log(n))
log_scalar = logp0 - logq - np.log(n) - approx_logz
# # update logz
# self.norm_const.logz = self.norm_const.logz * 0.9 + approx_logz * 0.1
return np.exp(log_scalar)
def update(self, data_list, sample_list):
# compute the scalars
data_scalar = np.ones(len(data_list)) / len(data_list)
# sample_scalar = np.ones(len(sample_list)) / len(sample_list)
sample_scalar = self.get_sample_scaler(sample_list)
# update phi
with self.time_recoder.recode('update_word'):
self.phi_word.update(data_list, data_scalar, sample_list, sample_scalar,
learning_rate=self.cur_lr_word)
if not self.config.fix_crf_model:
sample_x_list = [s.x[0] for s in sample_list]
with self.time_recoder.recode('update_marginal'):
sample_fp_logps_list = self.marginal_logps(sample_x_list)
with self.time_recoder.recode('update_tag'):
self.phi_tag.update(data_list, data_scalar, sample_list, sample_scalar,
sample_fp_logps_list=sample_fp_logps_list,
learning_rate=self.cur_lr_tag)
with self.time_recoder.recode('update_mix'):
self.phi_mix.update(data_list, data_scalar, sample_list, sample_scalar,
sample_fp_logps_list=sample_fp_logps_list,
learning_rate=self.cur_lr_mix)
# update zeta
with self.time_recoder.recode('update_logz'):
self.norm_const.update(sample_list, sample_scalar, learning_rate=self.cur_lr_logz)
# update simulater
with self.time_recoder.recode('update_simulater'):
self.mcmc.update(sample_list)
# update dbg info
self.sample_cur_pi.fill(0)
for x in sample_list:
self.sample_cur_pi[len(x)] += 1
self.sample_acc_count += self.sample_cur_pi
self.sample_cur_pi /= self.sample_cur_pi.sum()
return None
def write_log_zeta(self, step, true_logz=None):
# write zeta, logz, pi
f = self.write_files.get('zeta')
f.write('step={}\n'.format(step))
log.write_array(f, self.sample_cur_pi[self.config.min_len:], name='cur_pi')
log.write_array(f, self.sample_acc_count[self.config.min_len:] / self.sample_acc_count.sum(), name='all_pi')
log.write_array(f, self.config.pi_true[self.config.min_len:], name='pi_0 ')
log.write_array(f, self.norm_const.get_logz(), name='logz ')
if true_logz is not None:
log.write_array(f, true_logz, name='truez ')
| 37.764286
| 117
| 0.607906
|
5ba06710b1351dee9c32b5747102f2543aa9cf87
| 76
|
py
|
Python
|
loggz/__init__.py
|
crist0phe/python-loggz
|
664c79e725161c9c7f6ab9792282256d1e2973af
|
[
"MIT"
] | null | null | null |
loggz/__init__.py
|
crist0phe/python-loggz
|
664c79e725161c9c7f6ab9792282256d1e2973af
|
[
"MIT"
] | null | null | null |
loggz/__init__.py
|
crist0phe/python-loggz
|
664c79e725161c9c7f6ab9792282256d1e2973af
|
[
"MIT"
] | null | null | null |
import handlers
GzipRotatingFileHandler = handlers.GzipRotatingFileHandler
| 19
| 58
| 0.894737
|
a2a3846a3cb0b4885f5f40658f7f244fa8362d14
| 1,609
|
py
|
Python
|
test/test_inline_response_200_13.py
|
scubawhere/scubawhere-api-python-client
|
9f8578e251492c7667f785df7b7c9d66e71f5c8e
|
[
"Apache-2.0"
] | null | null | null |
test/test_inline_response_200_13.py
|
scubawhere/scubawhere-api-python-client
|
9f8578e251492c7667f785df7b7c9d66e71f5c8e
|
[
"Apache-2.0"
] | null | null | null |
test/test_inline_response_200_13.py
|
scubawhere/scubawhere-api-python-client
|
9f8578e251492c7667f785df7b7c9d66e71f5c8e
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Scubawhere API Documentation
This is the documentation for scubawhere's RMS API. This API is only to be used by authorized parties with valid auth tokens. [Learn about scubawhere](http://www.scubawhere.com) to become an authorized consumer of our API
OpenAPI spec version: 1.0.0
Contact: bryan@scubawhere.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.inline_response_200_13 import InlineResponse20013
class TestInlineResponse20013(unittest.TestCase):
""" InlineResponse20013 unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testInlineResponse20013(self):
"""
Test InlineResponse20013
"""
model = swagger_client.models.inline_response_200_13.InlineResponse20013()
if __name__ == '__main__':
unittest.main()
| 29.796296
| 227
| 0.735861
|
a902a30a373ba7485ef94f78c666bfe52f4cfbb1
| 664
|
py
|
Python
|
main.py
|
MohammadEbrahimy/mlp
|
b915373095f50928982ec279a10db322dbe2741f
|
[
"MIT"
] | null | null | null |
main.py
|
MohammadEbrahimy/mlp
|
b915373095f50928982ec279a10db322dbe2741f
|
[
"MIT"
] | null | null | null |
main.py
|
MohammadEbrahimy/mlp
|
b915373095f50928982ec279a10db322dbe2741f
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import NeuralNetwork as NN
# Importing tratin and test data
train = pd.read_csv("dataset/nba_logreg.csv", header=None)
test = pd.read_csv("dataset/test.csv", header=None)
train = train.dropna()
test = test.dropna()
# Split the train and test data
x_train = train.values[1:, 1:20].astype(np.float)
y_train = train.values[1:, 20].astype(np.float)
x_test = test.values[1:, 2:21].astype(np.float)
y_test = test.values[1:, 21].astype(np.float)
mlp = NN.MlpClassifier()
mlp.add_layer(10)
mlp.add_layer(15)
mlp.add_layer(5)
mlp.add_layer(2)
mlp.add_layer(1)
mlp.learn(x_train, y_train, 0.08, 500)
mlp.predict(x_test, y_test)
| 22.896552
| 58
| 0.731928
|
162fe87431cde8337b5ca6386e6faa0f20d8479a
| 2,631
|
py
|
Python
|
config/settings/local.py
|
abrar78/deploy-cookie-cutter
|
226be50016a96eb2604f6e25d5ea7540e594186b
|
[
"MIT"
] | null | null | null |
config/settings/local.py
|
abrar78/deploy-cookie-cutter
|
226be50016a96eb2604f6e25d5ea7540e594186b
|
[
"MIT"
] | null | null | null |
config/settings/local.py
|
abrar78/deploy-cookie-cutter
|
226be50016a96eb2604f6e25d5ea7540e594186b
|
[
"MIT"
] | null | null | null |
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="MZsQZJnSspOrbn0OcB8m3HZR4zHXSbXGA8u8KQwYWs40nTMH4DF2Ex8LuoH9kpFK",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = env("EMAIL_HOST", default="mailhog")
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
# WhiteNoise
# ------------------------------------------------------------------------------
# http://whitenoise.evans.io/en/latest/django.html#using-whitenoise-in-development
INSTALLED_APPS = ["whitenoise.runserver_nostatic"] + INSTALLED_APPS # noqa F405
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ["debug_toolbar"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ["debug_toolbar.middleware.DebugToolbarMiddleware"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
"DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"],
"SHOW_TEMPLATE_CONTEXT": True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ["127.0.0.1", "10.0.2.2"]
if env("USE_DOCKER") == "yes":
import socket
hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
INTERNAL_IPS += [".".join(ip.split(".")[:-1] + ["1"]) for ip in ips]
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ["django_extensions"] # noqa F405
# Your stuff...
# ------------------------------------------------------------------------------
| 40.476923
| 97
| 0.581148
|
9b39dbd39b09969cd356c8b7faf1226694226b70
| 15,705
|
py
|
Python
|
test/test_subtitles.py
|
mrBliss/yt-dlp
|
aecd021656b672dbb617e5bae54a8986f9c4ebaf
|
[
"Unlicense"
] | 80
|
2021-05-25T11:33:49.000Z
|
2022-03-29T20:36:53.000Z
|
test/test_subtitles.py
|
mrBliss/yt-dlp
|
aecd021656b672dbb617e5bae54a8986f9c4ebaf
|
[
"Unlicense"
] | 22
|
2021-05-08T13:44:12.000Z
|
2022-03-30T01:27:23.000Z
|
test/test_subtitles.py
|
mrBliss/yt-dlp
|
aecd021656b672dbb617e5bae54a8986f9c4ebaf
|
[
"Unlicense"
] | 22
|
2021-05-07T05:01:27.000Z
|
2022-03-26T19:10:54.000Z
|
#!/usr/bin/env python3
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL, md5, is_download_test
from yt_dlp.extractor import (
YoutubeIE,
DailymotionIE,
TEDIE,
VimeoIE,
WallaIE,
CeskaTelevizeIE,
LyndaIE,
NPOIE,
PBSIE,
ComedyCentralIE,
NRKTVIE,
RaiPlayIE,
VikiIE,
ThePlatformIE,
ThePlatformFeedIE,
RTVEALaCartaIE,
DemocracynowIE,
)
@is_download_test
class BaseTestSubtitles(unittest.TestCase):
url = None
IE = None
def setUp(self):
self.DL = FakeYDL()
self.ie = self.IE()
self.DL.add_info_extractor(self.ie)
def getInfoDict(self):
info_dict = self.DL.extract_info(self.url, download=False)
return info_dict
def getSubtitles(self):
info_dict = self.getInfoDict()
subtitles = info_dict['requested_subtitles']
if not subtitles:
return subtitles
for sub_info in subtitles.values():
if sub_info.get('data') is None:
uf = self.DL.urlopen(sub_info['url'])
sub_info['data'] = uf.read().decode('utf-8')
return dict((l, sub_info['data']) for l, sub_info in subtitles.items())
@is_download_test
class TestYoutubeSubtitles(BaseTestSubtitles):
url = 'QRS8MkLhQmM'
IE = YoutubeIE
def test_youtube_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(len(subtitles.keys()), 13)
self.assertEqual(md5(subtitles['en']), '688dd1ce0981683867e7fe6fde2a224b')
self.assertEqual(md5(subtitles['it']), '31324d30b8430b309f7f5979a504a769')
for lang in ['fr', 'de']:
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
def test_youtube_subtitles_ttml_format(self):
self.DL.params['writesubtitles'] = True
self.DL.params['subtitlesformat'] = 'ttml'
subtitles = self.getSubtitles()
self.assertEqual(md5(subtitles['en']), 'c97ddf1217390906fa9fbd34901f3da2')
def test_youtube_subtitles_vtt_format(self):
self.DL.params['writesubtitles'] = True
self.DL.params['subtitlesformat'] = 'vtt'
subtitles = self.getSubtitles()
self.assertEqual(md5(subtitles['en']), 'ae1bd34126571a77aabd4d276b28044d')
def test_youtube_automatic_captions(self):
self.url = '8YoUxe5ncPo'
self.DL.params['writeautomaticsub'] = True
self.DL.params['subtitleslangs'] = ['it']
subtitles = self.getSubtitles()
self.assertTrue(subtitles['it'] is not None)
def test_youtube_no_automatic_captions(self):
self.url = 'QRS8MkLhQmM'
self.DL.params['writeautomaticsub'] = True
subtitles = self.getSubtitles()
self.assertTrue(not subtitles)
def test_youtube_translated_subtitles(self):
# This video has a subtitles track, which can be translated
self.url = 'i0ZabxXmH4Y'
self.DL.params['writeautomaticsub'] = True
self.DL.params['subtitleslangs'] = ['it']
subtitles = self.getSubtitles()
self.assertTrue(subtitles['it'] is not None)
def test_youtube_nosubtitles(self):
self.DL.expect_warning('video doesn\'t have subtitles')
self.url = 'n5BB19UTcdA'
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertFalse(subtitles)
@is_download_test
class TestDailymotionSubtitles(BaseTestSubtitles):
url = 'http://www.dailymotion.com/video/xczg00'
IE = DailymotionIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertTrue(len(subtitles.keys()) >= 6)
self.assertEqual(md5(subtitles['en']), '976553874490cba125086bbfea3ff76f')
self.assertEqual(md5(subtitles['fr']), '594564ec7d588942e384e920e5341792')
for lang in ['es', 'fr', 'de']:
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
def test_nosubtitles(self):
self.DL.expect_warning('video doesn\'t have subtitles')
self.url = 'http://www.dailymotion.com/video/x12u166_le-zapping-tele-star-du-08-aout-2013_tv'
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertFalse(subtitles)
@is_download_test
class TestTedSubtitles(BaseTestSubtitles):
url = 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html'
IE = TEDIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertTrue(len(subtitles.keys()) >= 28)
self.assertEqual(md5(subtitles['en']), '4262c1665ff928a2dada178f62cb8d14')
self.assertEqual(md5(subtitles['fr']), '66a63f7f42c97a50f8c0e90bc7797bb5')
for lang in ['es', 'fr', 'de']:
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
@is_download_test
class TestVimeoSubtitles(BaseTestSubtitles):
url = 'http://vimeo.com/76979871'
IE = VimeoIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['de', 'en', 'es', 'fr']))
self.assertEqual(md5(subtitles['en']), '8062383cf4dec168fc40a088aa6d5888')
self.assertEqual(md5(subtitles['fr']), 'b6191146a6c5d3a452244d853fde6dc8')
def test_nosubtitles(self):
self.DL.expect_warning('video doesn\'t have subtitles')
self.url = 'http://vimeo.com/56015672'
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertFalse(subtitles)
@is_download_test
class TestWallaSubtitles(BaseTestSubtitles):
url = 'http://vod.walla.co.il/movie/2705958/the-yes-men'
IE = WallaIE
def test_allsubtitles(self):
self.DL.expect_warning('Automatic Captions not supported by this server')
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['heb']))
self.assertEqual(md5(subtitles['heb']), 'e758c5d7cb982f6bef14f377ec7a3920')
def test_nosubtitles(self):
self.DL.expect_warning('video doesn\'t have subtitles')
self.url = 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one'
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertFalse(subtitles)
@is_download_test
class TestCeskaTelevizeSubtitles(BaseTestSubtitles):
url = 'http://www.ceskatelevize.cz/ivysilani/10600540290-u6-uzasny-svet-techniky'
IE = CeskaTelevizeIE
def test_allsubtitles(self):
self.DL.expect_warning('Automatic Captions not supported by this server')
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['cs']))
self.assertTrue(len(subtitles['cs']) > 20000)
def test_nosubtitles(self):
self.DL.expect_warning('video doesn\'t have subtitles')
self.url = 'http://www.ceskatelevize.cz/ivysilani/ivysilani/10441294653-hyde-park-civilizace/214411058091220'
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertFalse(subtitles)
@is_download_test
class TestLyndaSubtitles(BaseTestSubtitles):
url = 'http://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html'
IE = LyndaIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en']))
self.assertEqual(md5(subtitles['en']), '09bbe67222259bed60deaa26997d73a7')
@is_download_test
class TestNPOSubtitles(BaseTestSubtitles):
url = 'http://www.npo.nl/nos-journaal/28-08-2014/POW_00722860'
IE = NPOIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['nl']))
self.assertEqual(md5(subtitles['nl']), 'fc6435027572b63fb4ab143abd5ad3f4')
@is_download_test
class TestMTVSubtitles(BaseTestSubtitles):
url = 'http://www.cc.com/video-clips/p63lk0/adam-devine-s-house-party-chasing-white-swans'
IE = ComedyCentralIE
def getInfoDict(self):
return super(TestMTVSubtitles, self).getInfoDict()['entries'][0]
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en']))
self.assertEqual(md5(subtitles['en']), '78206b8d8a0cfa9da64dc026eea48961')
@is_download_test
class TestNRKSubtitles(BaseTestSubtitles):
url = 'http://tv.nrk.no/serie/ikke-gjoer-dette-hjemme/DMPV73000411/sesong-2/episode-1'
IE = NRKTVIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['no']))
self.assertEqual(md5(subtitles['no']), '544fa917d3197fcbee64634559221cc2')
@is_download_test
class TestRaiPlaySubtitles(BaseTestSubtitles):
IE = RaiPlayIE
def test_subtitles_key(self):
self.url = 'http://www.raiplay.it/video/2014/04/Report-del-07042014-cb27157f-9dd0-4aee-b788-b1f67643a391.html'
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['it']))
self.assertEqual(md5(subtitles['it']), 'b1d90a98755126b61e667567a1f6680a')
def test_subtitles_array_key(self):
self.url = 'https://www.raiplay.it/video/2020/12/Report---04-01-2021-2e90f1de-8eee-4de4-ac0e-78d21db5b600.html'
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['it']))
self.assertEqual(md5(subtitles['it']), '4b3264186fbb103508abe5311cfcb9cd')
@is_download_test
class TestVikiSubtitles(BaseTestSubtitles):
url = 'http://www.viki.com/videos/1060846v-punch-episode-18'
IE = VikiIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en']))
self.assertEqual(md5(subtitles['en']), '53cb083a5914b2d84ef1ab67b880d18a')
@is_download_test
class TestThePlatformSubtitles(BaseTestSubtitles):
# from http://www.3playmedia.com/services-features/tools/integrations/theplatform/
# (see http://theplatform.com/about/partners/type/subtitles-closed-captioning/)
url = 'theplatform:JFUjUE1_ehvq'
IE = ThePlatformIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en']))
self.assertEqual(md5(subtitles['en']), '97e7670cbae3c4d26ae8bcc7fdd78d4b')
@is_download_test
class TestThePlatformFeedSubtitles(BaseTestSubtitles):
url = 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207'
IE = ThePlatformFeedIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en']))
self.assertEqual(md5(subtitles['en']), '48649a22e82b2da21c9a67a395eedade')
@is_download_test
class TestRtveSubtitles(BaseTestSubtitles):
url = 'http://www.rtve.es/alacarta/videos/los-misterios-de-laura/misterios-laura-capitulo-32-misterio-del-numero-17-2-parte/2428621/'
IE = RTVEALaCartaIE
def test_allsubtitles(self):
print('Skipping, only available from Spain')
return
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['es']))
self.assertEqual(md5(subtitles['es']), '69e70cae2d40574fb7316f31d6eb7fca')
@is_download_test
class TestDemocracynowSubtitles(BaseTestSubtitles):
url = 'http://www.democracynow.org/shows/2015/7/3'
IE = DemocracynowIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en']))
self.assertEqual(md5(subtitles['en']), 'acaca989e24a9e45a6719c9b3d60815c')
def test_subtitles_in_page(self):
self.url = 'http://www.democracynow.org/2015/7/3/this_flag_comes_down_today_bree'
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en']))
self.assertEqual(md5(subtitles['en']), 'acaca989e24a9e45a6719c9b3d60815c')
@is_download_test
class TestPBSSubtitles(BaseTestSubtitles):
url = 'https://www.pbs.org/video/how-fantasy-reflects-our-world-picecq/'
IE = PBSIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en']))
def test_subtitles_dfxp_format(self):
self.DL.params['writesubtitles'] = True
self.DL.params['subtitlesformat'] = 'dfxp'
subtitles = self.getSubtitles()
self.assertIn(md5(subtitles['en']), ['643b034254cdc3768ff1e750b6b5873b'])
def test_subtitles_vtt_format(self):
self.DL.params['writesubtitles'] = True
self.DL.params['subtitlesformat'] = 'vtt'
subtitles = self.getSubtitles()
self.assertIn(
md5(subtitles['en']), ['937a05711555b165d4c55a9667017045', 'f49ea998d6824d94959c8152a368ff73'])
def test_subtitles_srt_format(self):
self.DL.params['writesubtitles'] = True
self.DL.params['subtitlesformat'] = 'srt'
subtitles = self.getSubtitles()
self.assertIn(md5(subtitles['en']), ['2082c21b43759d9bf172931b2f2ca371'])
def test_subtitles_sami_format(self):
self.DL.params['writesubtitles'] = True
self.DL.params['subtitlesformat'] = 'sami'
subtitles = self.getSubtitles()
self.assertIn(md5(subtitles['en']), ['4256b16ac7da6a6780fafd04294e85cd'])
if __name__ == '__main__':
unittest.main()
| 37.843373
| 137
| 0.677619
|
aa59c205a6a3710c0e7778c0c85ca22b051a3f7e
| 2,708
|
py
|
Python
|
tests/pyvoronoi_performance.py
|
Voxel8/pyvoronoi
|
8ba7957141a3f39a90b48127bf971be76275faba
|
[
"BSL-1.0",
"MIT"
] | 15
|
2015-11-21T19:03:19.000Z
|
2021-11-30T06:55:23.000Z
|
tests/pyvoronoi_performance.py
|
Voxel8/pyvoronoi
|
8ba7957141a3f39a90b48127bf971be76275faba
|
[
"BSL-1.0",
"MIT"
] | 13
|
2016-01-31T17:01:39.000Z
|
2022-02-19T13:33:21.000Z
|
tests/pyvoronoi_performance.py
|
Voxel8/pyvoronoi
|
8ba7957141a3f39a90b48127bf971be76275faba
|
[
"BSL-1.0",
"MIT"
] | 9
|
2015-10-22T03:24:14.000Z
|
2022-01-09T17:11:03.000Z
|
"""
This script is here to let user test the complexity of the problem that can be solved on their machine given
their hardware constraints and their software configuration. Can be used to measure possible performance enhancements
in the future as well.
"""
from __future__ import division
import time, logging
import pyvoronoi
def build_and_solve_voronoi_problem(max_x, max_y):
"""
A function used to test the library performance. It created a set of point and segments and then solve the voronoi
problem. It starts from a origin of a grid and then build vertical segments with a length of 1 unit until it reaches
max_x and max_y.
:param max_x: The maximum x value used for building segments.
:param max_y: The maximum y_value used for building segments.
:return:
"""
factor = 10
pyvoronoi.SILENT = True
pv = pyvoronoi.Pyvoronoi(factor)
count_points = 0
for x in xrange(max_x):
for y in xrange(max_y):
pv.AddPoint([x + 0.5, y + 0.5])
count_points += 1
count_segment = 0
for x in xrange(max_x):
for y in xrange(max_y):
pv.AddSegment([[x,y], [x, y + 1]])
count_segment += 1
time_before = time.time()
pv.Construct()
time_after = time.time()
logging.info("Run pyvoronoi. Time (sec): {0}. Number of input points: {1} - segments: {2}".format(
time_after - time_before,
count_points,
count_segment
))
logging.info("Count output structures. Vertices: {0}, Edges: {1}, Cells: {2}".format(
pv.CountVertices(),
pv.CountEdges(),
pv.CountCells(),
))
logging.info('Start parsing edges - Evaluating performance for curve computation')
time_before = time.time()
count_curved_edges = 0
for i in xrange(pv.CountEdges()):
e = pv.GetEdge(i)
startVertex = pv.GetVertex(e.start)
endVertex = pv.GetVertex(e.end)
max_distance = pyvoronoi.Distance([startVertex.X, startVertex.Y], [endVertex.X, endVertex.Y]) / 10
if startVertex != -1 and endVertex != -1:
if not e.is_linear:
points = pv.DiscretizeCurvedEdge(i, max_distance, 1 / factor)
count_curved_edges += 1
time_after = time.time()
logging.info('Done parsing {0} curved edges. Done in {1} sec'.format(count_curved_edges, time_after - time_before))
del pv
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - Script: %(filename)s - Line: %(lineno)d - %(levelname)s - %(message)s',
datefmt='%m-%d %H:%M:%S')
build_and_solve_voronoi_problem(100, 1000)
| 36.106667
| 120
| 0.639217
|
3f2371e3c7204918c1aafb9022fa0214fd7e60c7
| 308
|
py
|
Python
|
napari/layers/image/experimental/__init__.py
|
harripj/napari
|
7a284b1efeb14b1f812f0d98c608f70f0dd66ad2
|
[
"BSD-3-Clause"
] | null | null | null |
napari/layers/image/experimental/__init__.py
|
harripj/napari
|
7a284b1efeb14b1f812f0d98c608f70f0dd66ad2
|
[
"BSD-3-Clause"
] | null | null | null |
napari/layers/image/experimental/__init__.py
|
harripj/napari
|
7a284b1efeb14b1f812f0d98c608f70f0dd66ad2
|
[
"BSD-3-Clause"
] | null | null | null |
"""layers.image.experimental
"""
from .octree_intersection import OctreeIntersection
from .octree_level import OctreeLevel
from .octree_tile_builder import create_multi_scale_from_image
from .octree_util import ImageConfig, OctreeChunk, TestImageSettings
# from .octree_image import OctreeImage # circular
| 34.222222
| 68
| 0.850649
|
41f5f959188191e4c47d2c710867ed1694d41903
| 70,285
|
py
|
Python
|
src/transformers/models/bart/modeling_tf_bart.py
|
syskn/transformers
|
fafbd2574cb12b987099f69b3821814042d8f4ce
|
[
"Apache-2.0"
] | 39
|
2021-04-30T06:06:30.000Z
|
2022-03-12T11:56:06.000Z
|
src/transformers/models/bart/modeling_tf_bart.py
|
syskn/transformers
|
fafbd2574cb12b987099f69b3821814042d8f4ce
|
[
"Apache-2.0"
] | 4
|
2021-04-05T19:56:47.000Z
|
2022-01-18T17:12:28.000Z
|
src/transformers/models/bart/modeling_tf_bart.py
|
syskn/transformers
|
fafbd2574cb12b987099f69b3821814042d8f4ce
|
[
"Apache-2.0"
] | 30
|
2021-04-30T07:11:22.000Z
|
2022-03-15T19:34:58.000Z
|
# coding=utf-8
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 Bart model. """
import random
from typing import Dict, Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFBaseModelOutputWithPastAndCrossAttentions,
TFSeq2SeqLMOutput,
TFSeq2SeqModelOutput,
)
# Public API
from ...modeling_tf_utils import (
DUMMY_INPUTS,
TFCausalLanguageModelingLoss,
TFPreTrainedModel,
TFSharedEmbeddings,
TFWrappedEmbeddings,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_bart import BartConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "facebook/bart-large"
_CONFIG_FOR_DOC = "BartConfig"
_TOKENIZER_FOR_DOC = "BartTokenizer"
LARGE_NEGATIVE = -1e8
def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
shifted_input_ids = tf.roll(input_ids, 1, axis=-1)
start_tokens = tf.fill((shape_list(shifted_input_ids)[0], 1), decoder_start_token_id)
shifted_input_ids = tf.concat([start_tokens, shifted_input_ids[:, 1:]], -1)
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids = tf.where(
shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids
)
if tf.executing_eagerly():
# "Verify that `labels` has only positive values and -100"
assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0))
# Make sure the assertion op is called by wrapping the result in an identity no-op
with tf.control_dependencies([assert_gte0]):
shifted_input_ids = tf.identity(shifted_input_ids)
return shifted_input_ids
def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
mask_cond = tf.range(shape_list(mask)[-1])
mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
if past_key_values_length > 0:
mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None, past_key_values_length: int = 0):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
src_len = shape_list(mask)[1]
tgt_len = tgt_len if tgt_len is not None else src_len
one_cst = tf.constant(1.0)
mask = tf.cast(mask, dtype=one_cst.dtype)
expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
return (one_cst - expanded_mask) * LARGE_NEGATIVE
class TFBartLearnedPositionalEmbedding(TFSharedEmbeddings):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs):
# Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim, **kwargs)
def call(self, input_shape: tf.TensorShape, past_key_values_length: int = 0):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input_shape[:2]
positions = tf.range(past_key_values_length, seq_len + past_key_values_length, delta=1, name="range")
return super().call(positions + self.offset)
class TFBartAttention(tf.keras.layers.Layer):
"""Multi-headed attention from "Attention Is All You Need"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = tf.keras.layers.Dropout(dropout)
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.is_decoder = is_decoder
self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
def call(
self,
hidden_states: tf.Tensor,
key_value_states: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None,
attention_mask: Optional[tf.Tensor] = None,
layer_head_mask: Optional[tf.Tensor] = None,
training=False,
) -> Tuple[tf.Tensor, Optional[tf.Tensor]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = shape_list(hidden_states)
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = tf.concat([past_key_value[0], key_states], axis=2)
value_states = tf.concat([past_key_value[1], value_states], axis=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
key_states = tf.reshape(key_states, proj_shape)
value_states = tf.reshape(value_states, proj_shape)
src_len = shape_list(key_states)[1]
attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attn_weights),
[bsz * self.num_heads, tgt_len, src_len],
message=f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {shape_list(attn_weights)}",
)
if attention_mask is not None:
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attention_mask),
[bsz, 1, tgt_len, src_len],
message=f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}",
)
attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_weights = tf.nn.softmax(attn_weights, axis=-1)
if layer_head_mask is not None:
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(layer_head_mask),
[self.num_heads],
message=f"Head mask for a single layer should be of size {(self.num_heads)}, but is {shape_list(layer_head_mask)}",
)
attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
attn_weights, (bsz, self.num_heads, tgt_len, src_len)
)
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_probs = self.dropout(attn_weights, training=training)
attn_output = tf.matmul(attn_probs, value_states)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attn_output),
[bsz * self.num_heads, tgt_len, self.head_dim],
message=f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {shape_list(attn_output)}",
)
attn_output = tf.transpose(
tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
)
attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
attn_output = self.out_proj(attn_output)
attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
return attn_output, attn_weights, past_key_value
class TFBartEncoderLayer(tf.keras.layers.Layer):
def __init__(self, config: BartConfig, **kwargs):
super().__init__(**kwargs)
self.embed_dim = config.d_model
self.self_attn = TFBartAttention(
self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn"
)
self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.activation_fn = get_tf_activation(config.activation_function)
self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout)
self.fc1 = tf.keras.layers.Dense(config.encoder_ffn_dim, name="fc1")
self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2")
self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training=False):
"""
Args:
hidden_states (:obj:`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`tf.Tensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`tf.Tensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`
"""
residual = hidden_states
hidden_states, self_attn_weights, _ = self.self_attn(
hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask
)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(hidden_states),
shape_list(residual),
message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}",
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout(hidden_states, training=training)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
return hidden_states, self_attn_weights
class TFBartDecoderLayer(tf.keras.layers.Layer):
def __init__(self, config: BartConfig, **kwargs):
super().__init__(**kwargs)
self.embed_dim = config.d_model
self.self_attn = TFBartAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
name="self_attn",
is_decoder=True,
)
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.activation_fn = get_tf_activation(config.activation_function)
self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout)
self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
self.encoder_attn = TFBartAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
name="encoder_attn",
is_decoder=True,
)
self.encoder_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm")
self.fc1 = tf.keras.layers.Dense(config.decoder_ffn_dim, name="fc1")
self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2")
self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
def call(
self,
hidden_states,
attention_mask: Optional[tf.Tensor] = None,
encoder_hidden_states: Optional[tf.Tensor] = None,
encoder_attention_mask: Optional[tf.Tensor] = None,
layer_head_mask: Optional[tf.Tensor] = None,
cross_attn_layer_head_mask: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[tf.Tensor]] = None,
training=False,
) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
"""
Args:
hidden_states (:obj:`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`tf.Tensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (:obj:`tf.Tensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (:obj:`tf.Tensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`tf.Tensor`): mask for attention heads in a given layer of size
`(decoder_attention_heads,)`
cross_attn_layer_head_mask (:obj:`tf.Tensor`): mask for heads of the cross-attention module.
`(decoder_attention_heads,)`
past_key_value (:obj:`Tuple(tf.Tensor)`): cached past key and value projection states
"""
residual = hidden_states
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout(hidden_states, training=training)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
return (
hidden_states,
self_attn_weights,
cross_attn_weights,
present_key_value,
)
class TFBartPretrainedModel(TFPreTrainedModel):
config_class = BartConfig
base_model_prefix = "model"
@property
def dummy_inputs(self):
pad_token = 1
input_ids = tf.cast(tf.convert_to_tensor(DUMMY_INPUTS), tf.int32)
decoder_input_ids = tf.cast(tf.convert_to_tensor(DUMMY_INPUTS), tf.int32)
dummy_inputs = {
"decoder_input_ids": decoder_input_ids,
"attention_mask": tf.math.not_equal(input_ids, pad_token),
"input_ids": input_ids,
}
return dummy_inputs
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
"decoder_input_ids": tf.TensorSpec((None, None), tf.int32, name="decoder_input_ids"),
"decoder_attention_mask": tf.TensorSpec((None, None), tf.int32, name="decoder_attention_mask"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
BART_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the
generic methods the library implements for all its model (such as downloading or saving, resizing the input
embeddings, pruning heads etc.)
This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use
it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage
and behavior.
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all
the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in
the first positional argument :
- a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(input_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
Args:
config (:class:`~transformers.BartConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.TFPreTrainedModel.from_pretrained` method to load the
model weights.
"""
BART_GENERATION_EXAMPLE = r"""
Summarization example::
>>> from transformers import BartTokenizer, TFBartForConditionalGeneration, BartConfig
>>> model = TFBartForConditionalGeneration.from_pretrained('facebook/bart-large')
>>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')
>>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='tf')
>>> # Generate Summary
>>> summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=5, early_stopping=True)
>>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])
Mask filling example::
>>> from transformers import BartTokenizer, TFBartForConditionalGeneration
>>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')
>>> TXT = "My friends are <mask> but they eat too many carbs."
>>> model = TFBartForConditionalGeneration.from_pretrained('facebook/bart-large')
>>> input_ids = tokenizer([TXT], return_tensors='tf')['input_ids']
>>> logits = model(input_ids).logits
>>> probs = tf.nn.softmax(logits[0])
>>> # probs[5] is associated with the mask token
"""
BART_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`tf.Tensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BartTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are decoder input IDs? <../glossary.html#decoder-input-ids>`__
Bart uses the :obj:`eos_token_id` as the starting token for :obj:`decoder_input_ids` generation. If
:obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see
:obj:`past_key_values`).
For translation and summarization training, :obj:`decoder_input_ids` should be provided. If no
:obj:`decoder_input_ids` is provided, the model will create this tensor by shifting the :obj:`input_ids` to
the right for denoising pre-training following the paper.
decoder_attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.
head_mask (:obj:`tf.Tensor` of shape :obj:`(encoder_layers, encoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (:obj:`tf.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (:obj:`tf.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (:obj:`tf.FloatTensor`, `optional`):
hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
of shape :obj:`(batch_size, sequence_length, hidden_size)` is a sequence of
past_key_values (:obj:`Tuple[Tuple[tf.Tensor]]` of length :obj:`config.n_layers`)
contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`). Set to :obj:`False` during training, :obj:`True` during generation
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This
argument can be used in eager mode, in graph mode the value will always be set to True.
training (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@keras_serializable
class TFBartEncoder(tf.keras.layers.Layer):
config_class = BartConfig
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
:class:`TFBartEncoderLayer`.
Args:
config: BartConfig
"""
def __init__(self, config: BartConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, **kwargs):
super().__init__(**kwargs)
self.config = config
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.layerdrop = config.encoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
self.embed_tokens = embed_tokens
self.embed_positions = TFBartLearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
name="embed_positions",
)
self.layers = [TFBartEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)]
self.layernorm_embedding = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding")
def get_embed_tokens(self):
return self.embed_tokens
def set_embed_tokens(self, embed_tokens):
self.embed_tokens = embed_tokens
def call(
self,
input_ids=None,
inputs_embeds=None,
attention_mask=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
"""
Args:
input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.BartTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`tf.Tensor` of shape :obj:`(encoder_layers, encoder_attention_heads)`, `optional):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs["inputs_embeds"] is None:
inputs["inputs_embeds"] = self.embed_tokens(inputs["input_ids"]) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs["inputs_embeds"] + embed_pos
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = self.dropout(hidden_states, training=inputs["training"])
# check attention mask and invert
if inputs["attention_mask"] is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(inputs["attention_mask"])
else:
attention_mask = None
encoder_states = () if inputs["output_hidden_states"] else None
all_attentions = () if inputs["output_attentions"] else None
# check if head_mask has a correct number of layers specified if desired
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if inputs["head_mask"] is not None and tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(inputs["head_mask"])[0],
len(self.layers),
message=f"The head_mask should be specified for {len(self.layers)} layers, but it is for {shape_list(inputs['head_mask'])[0]}.",
)
# encoder layers
for idx, encoder_layer in enumerate(self.layers):
if inputs["output_hidden_states"]:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if inputs["training"] and (dropout_probability < self.layerdrop): # skip the layer
continue
hidden_states, attn = encoder_layer(
hidden_states,
attention_mask,
inputs["head_mask"][idx] if inputs["head_mask"] is not None else None,
)
if inputs["output_attentions"]:
all_attentions += (attn,)
if inputs["output_hidden_states"]:
encoder_states = encoder_states + (hidden_states,)
if not inputs["return_dict"]:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
@keras_serializable
class TFBartDecoder(tf.keras.layers.Layer):
config_class = BartConfig
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`TFBartDecoderLayer`
Args:
config: BartConfig
embed_tokens: output embedding
"""
def __init__(self, config: BartConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, **kwargs):
super().__init__(**kwargs)
self.config = config
self.padding_idx = config.pad_token_id
self.embed_tokens = embed_tokens
self.layerdrop = config.decoder_layerdrop
self.embed_positions = TFBartLearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
name="embed_positions",
)
self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
self.layers = [TFBartDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)]
self.layernorm_embedding = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding")
self.dropout = tf.keras.layers.Dropout(config.dropout)
def get_embed_tokens(self):
return self.embed_tokens
def set_embed_tokens(self, embed_tokens):
self.embed_tokens = embed_tokens
def call(
self,
input_ids=None,
inputs_embeds=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
r"""
Args:
input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.BartTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`tf.Tensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`tf.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (:obj:`tf.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (:obj:`Tuple[Tuple[tf.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last
:obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of
shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,
sequence_length)`.
inputs_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
head_mask=head_mask,
cross_attn_head_mask=cross_attn_head_mask,
inputs_embeds=inputs_embeds,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
past_key_values_length = (
shape_list(inputs["past_key_values"][0][0])[2] if inputs["past_key_values"] is not None else 0
)
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
if inputs["inputs_embeds"] is None:
inputs["inputs_embeds"] = self.embed_tokens(inputs["input_ids"]) * self.embed_scale
hidden_states = inputs["inputs_embeds"]
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)
else:
combined_attention_mask = _expand_mask(
tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]
)
if inputs["attention_mask"] is not None:
combined_attention_mask = combined_attention_mask + _expand_mask(
inputs["attention_mask"], tgt_len=input_shape[-1]
)
if inputs["encoder_hidden_states"] is not None and inputs["encoder_attention_mask"] is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
inputs["encoder_attention_mask"] = _expand_mask(inputs["encoder_attention_mask"], tgt_len=input_shape[-1])
hidden_states = self.layernorm_embedding(hidden_states + positions)
hidden_states = self.dropout(hidden_states, training=inputs["training"])
# decoder layers
all_hidden_states = () if inputs["output_hidden_states"] else None
all_self_attns = () if inputs["output_attentions"] else None
all_cross_attns = () if (inputs["output_attentions"] and inputs["encoder_hidden_states"] is not None) else None
present_key_values = () if inputs["use_cache"] else None
# check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
for attn_mask in ["head_mask", "cross_attn_head_mask"]:
if inputs[attn_mask] is not None and tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(inputs[attn_mask])[0],
len(self.layers),
message=f"The {attn_mask} should be specified for {len(self.layers)} layers, but it is for {shape_list(inputs[attn_mask])[0]}.",
)
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if inputs["output_hidden_states"]:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if inputs["training"] and (dropout_probability < self.layerdrop):
continue
past_key_value = inputs["past_key_values"][idx] if inputs["past_key_values"] is not None else None
hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(
hidden_states,
attention_mask=combined_attention_mask,
encoder_hidden_states=inputs["encoder_hidden_states"],
encoder_attention_mask=inputs["encoder_attention_mask"],
layer_head_mask=inputs["head_mask"][idx] if inputs["head_mask"] is not None else None,
cross_attn_layer_head_mask=inputs["cross_attn_head_mask"][idx]
if inputs["cross_attn_head_mask"] is not None
else None,
past_key_value=past_key_value,
)
if inputs["use_cache"]:
present_key_values += (present_key_value,)
if inputs["output_attentions"]:
all_self_attns += (layer_self_attn,)
if inputs["encoder_hidden_states"] is not None:
all_cross_attns += (layer_cross_attn,)
if inputs["output_hidden_states"]:
all_hidden_states += (hidden_states,)
if inputs["output_attentions"]:
all_self_attns = list(all_self_attns)
if inputs["encoder_hidden_states"] is not None:
all_cross_attns = list(all_cross_attns)
if inputs["use_cache"]:
present_key_values = (inputs["encoder_hidden_states"], present_key_values)
if not inputs["return_dict"]:
return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns
else:
return TFBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=present_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attns,
)
@keras_serializable
class TFBartMainLayer(tf.keras.layers.Layer):
config_class = BartConfig
def __init__(self, config: BartConfig, load_weight_prefix=None, **kwargs):
super().__init__(**kwargs)
self.config = config
self.shared = TFSharedEmbeddings(config.vocab_size, config.d_model, config.pad_token_id, name="model.shared")
# set tf scope correctly
if load_weight_prefix is None:
load_weight_prefix = "model.shared"
with tf.compat.v1.variable_scope(load_weight_prefix) as shared_abs_scope_name:
pass
# Wraps layer to avoid problems with weight restoring and ensuring we're in the correct TF scope.
embed_tokens = TFWrappedEmbeddings(self.shared, abs_scope_name=shared_abs_scope_name)
embed_tokens.vocab_size = self.shared.vocab_size
embed_tokens.hidden_size = self.shared.hidden_size
self.encoder = TFBartEncoder(config, embed_tokens, name="encoder")
self.decoder = TFBartDecoder(config, embed_tokens, name="decoder")
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared.weight = new_embeddings
self.shared.vocab_size = self.shared.weight.shape[0]
# retrieve correct absolute scope for embed token wrapper
with tf.compat.v1.variable_scope("model.shared") as shared_abs_scope_name:
pass
# Wraps layer to avoid problems with weight restoring and ensuring we're in the correct TF scope.
embed_tokens = TFWrappedEmbeddings(self.shared, abs_scope_name=shared_abs_scope_name)
self.encoder.set_embed_tokens(embed_tokens)
self.decoder.set_embed_tokens(embed_tokens)
def call(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["decoder_input_ids"] is None and inputs["decoder_inputs_embeds"] is None:
inputs["use_cache"] = False
inputs["output_hidden_states"] = (
inputs["output_hidden_states"]
if inputs["output_hidden_states"] is not None
else self.config.output_hidden_states
)
if inputs["decoder_input_ids"] is None and inputs["input_ids"] is not None:
inputs["decoder_input_ids"] = shift_tokens_right(
inputs["input_ids"], self.config.pad_token_id, self.config.decoder_start_token_id
)
if inputs["encoder_outputs"] is None:
inputs["encoder_outputs"] = self.encoder(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
# If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True
elif inputs["return_dict"] and not isinstance(inputs["encoder_outputs"], TFBaseModelOutput):
inputs["encoder_outputs"] = TFBaseModelOutput(
last_hidden_state=inputs["encoder_outputs"][0],
hidden_states=inputs["encoder_outputs"][1] if len(inputs["encoder_outputs"]) > 1 else None,
attentions=inputs["encoder_outputs"][2] if len(inputs["encoder_outputs"]) > 2 else None,
)
# If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False
elif not inputs["return_dict"] and not isinstance(inputs["encoder_outputs"], tuple):
inputs["encoder_outputs"] = inputs["encoder_outputs"].to_tuple()
decoder_outputs = self.decoder(
inputs["decoder_input_ids"],
attention_mask=inputs["decoder_attention_mask"],
encoder_hidden_states=inputs["encoder_outputs"][0],
encoder_attention_mask=inputs["attention_mask"],
head_mask=inputs["decoder_head_mask"],
cross_attn_head_mask=inputs["cross_attn_head_mask"],
past_key_values=inputs["past_key_values"],
inputs_embeds=inputs["decoder_inputs_embeds"],
use_cache=inputs["use_cache"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
if not inputs["return_dict"]:
return decoder_outputs + inputs["encoder_outputs"]
return TFSeq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=inputs["encoder_outputs"].last_hidden_state,
encoder_hidden_states=inputs["encoder_outputs"].hidden_states,
encoder_attentions=inputs["encoder_outputs"].attentions,
)
@add_start_docstrings(
"The bare BART Model outputting raw hidden-states without any specific head on top.",
BART_START_DOCSTRING,
)
class TFBartModel(TFBartPretrainedModel):
_requires_load_weight_prefix = True
def __init__(self, config: BartConfig, load_weight_prefix=None, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.model = TFBartMainLayer(config, load_weight_prefix=load_weight_prefix, name="model")
def get_encoder(self):
return self.model.encoder
def get_decoder(self):
return self.model.decoder
@add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSeq2SeqModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.model(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
decoder_input_ids=inputs["decoder_input_ids"],
decoder_attention_mask=inputs["decoder_attention_mask"],
head_mask=inputs["head_mask"],
decoder_head_mask=inputs["decoder_head_mask"],
cross_attn_head_mask=inputs["cross_attn_head_mask"],
encoder_outputs=inputs["encoder_outputs"],
past_key_values=inputs["past_key_values"],
inputs_embeds=inputs["inputs_embeds"],
decoder_inputs_embeds=inputs["decoder_inputs_embeds"],
use_cache=inputs["use_cache"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
def serving_output(self, output):
pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
return TFSeq2SeqModelOutput(
last_hidden_state=output.last_hidden_state,
past_key_values=pkv,
decoder_hidden_states=dec_hs,
decoder_attentions=dec_attns,
cross_attentions=cross_attns,
encoder_last_hidden_state=output.encoder_last_hidden_state,
encoder_hidden_states=enc_hs,
encoder_attentions=enc_attns,
)
@add_start_docstrings(
"The BART Model with a language modeling head. Can be used for summarization.",
BART_START_DOCSTRING,
)
class TFBartForConditionalGeneration(TFBartPretrainedModel, TFCausalLanguageModelingLoss):
_keys_to_ignore_on_load_unexpected = [
r"model.encoder.embed_tokens.weight",
r"model.decoder.embed_tokens.weight",
]
_requires_load_weight_prefix = True
def __init__(self, config, load_weight_prefix=None, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.model = TFBartMainLayer(config, load_weight_prefix=load_weight_prefix, name="model")
self.use_cache = config.use_cache
# final_bias_logits is registered as a buffer in pytorch, so not trainable for the the sake of consistency.
self.final_logits_bias = self.add_weight(
name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False
)
def get_decoder(self):
return self.model.decoder
def get_encoder(self):
return self.model.encoder
def get_output_embeddings(self):
return self.get_input_embeddings()
def set_output_embeddings(self, value):
self.set_input_embeddings(value)
def get_bias(self):
return {"final_logits_bias": self.final_logits_bias}
def set_bias(self, value):
self.final_logits_bias = value["final_logits_bias"]
@add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(BART_GENERATION_EXAMPLE)
def call(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs: Optional[TFBaseModelOutput] = None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,
config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.
Returns:
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
if inputs["labels"] is not None:
inputs["labels"] = tf.where(
inputs["labels"] == self.config.pad_token_id,
tf.fill(shape_list(inputs["labels"]), -100),
inputs["labels"],
)
inputs["use_cache"] = False
if inputs["decoder_input_ids"] is None:
inputs["decoder_input_ids"] = shift_tokens_right(
inputs["labels"], self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
decoder_input_ids=inputs["decoder_input_ids"],
encoder_outputs=inputs["encoder_outputs"],
decoder_attention_mask=inputs["decoder_attention_mask"],
head_mask=inputs["head_mask"],
decoder_head_mask=inputs["decoder_head_mask"],
cross_attn_head_mask=inputs["cross_attn_head_mask"],
past_key_values=inputs["past_key_values"],
inputs_embeds=inputs["inputs_embeds"],
decoder_inputs_embeds=inputs["decoder_inputs_embeds"],
use_cache=inputs["use_cache"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
lm_logits = self.model.shared(outputs[0], mode="linear")
lm_logits = lm_logits + self.final_logits_bias
masked_lm_loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], lm_logits)
if not inputs["return_dict"]:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return TFSeq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values, # index 1 of d outputs
decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs
decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs
cross_attentions=outputs.cross_attentions, # index 4 of d outputs
encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs
encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out
encoder_attentions=outputs.encoder_attentions, # 2 of e out
)
def serving_output(self, output):
pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
return TFSeq2SeqLMOutput(
logits=output.logits,
past_key_values=pkv,
decoder_hidden_states=dec_hs,
decoder_attentions=dec_attns,
cross_attentions=cross_attns,
encoder_last_hidden_state=output.encoder_last_hidden_state,
encoder_hidden_states=enc_hs,
encoder_attentions=enc_attns,
)
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past,
attention_mask,
head_mask=None,
use_cache=None,
**kwargs,
) -> Dict:
assert past is not None and len(past) in {1, 2}, f"past has to be an iterable of length 1,2 got {past}"
if len(past) == 1:
assert isinstance(past[0], tf.Tensor), f"`past[0]` has to be of type `tf.Tensor`, but is {type(past[0])}"
encoder_outputs = TFBaseModelOutput(last_hidden_state=past[0])
past_key_values = None
else:
assert (
len(past) == 2
), "`past` has to be of length 2 with the encoder_outputs at the first position and past_key_values at the second position."
encoder_outputs, past_key_values = past
if isinstance(encoder_outputs, tuple):
assert isinstance(
encoder_outputs[0], tf.Tensor
), f"`encoder_outputs[0]` has to be of type `tf.Tensor`, but is {type(encoder_outputs[0])}"
encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs[0])
elif isinstance(encoder_outputs, tf.Tensor):
encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs)
assert (
past_key_values
), f"decoder cached states must be truthy. got {past_key_values} from the 2nd element of past"
decoder_input_ids = decoder_input_ids[:, -1:]
assert isinstance(
encoder_outputs, TFBaseModelOutput
), f"encoder_outputs should be a TFBaseModelOutput, Instead got {type(encoder_outputs)}."
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past_key_values,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
@staticmethod
def _reorder_cache(past, beam_idx):
if len(past) == 1:
return past
past_key_values = past[1]
reordered_past = ()
for layer_past_key_values in past_key_values:
reordered_past += (
tuple(tf.gather(layer_past_key_value, beam_idx) for layer_past_key_value in layer_past_key_values[:2])
+ layer_past_key_values[2:],
)
return (past[0], reordered_past)
| 46.639018
| 236
| 0.657992
|
ed931e1769f353296b9c3df61f997c5d42fc8db4
| 7,422
|
py
|
Python
|
deepdoctection/extern/tp/tpfrcnn/modeling/model_box.py
|
deepdoctection/deepdoctection
|
7e0d7396e5ef8bf8109904e09c5d4ee56cb5a036
|
[
"Apache-2.0"
] | 39
|
2021-12-14T11:05:25.000Z
|
2022-03-31T18:50:58.000Z
|
deepdoctection/extern/tp/tpfrcnn/modeling/model_box.py
|
deepdoctection/deepdoctection
|
7e0d7396e5ef8bf8109904e09c5d4ee56cb5a036
|
[
"Apache-2.0"
] | 17
|
2022-01-04T14:32:26.000Z
|
2022-03-29T14:01:36.000Z
|
deepdoctection/extern/tp/tpfrcnn/modeling/model_box.py
|
deepdoctection/deepdoctection
|
7e0d7396e5ef8bf8109904e09c5d4ee56cb5a036
|
[
"Apache-2.0"
] | 4
|
2022-01-11T16:40:17.000Z
|
2022-03-30T02:09:55.000Z
|
# -*- coding: utf-8 -*-
# File: model_box.py
# Copyright (c) Tensorpack Contributors
# Licensed under the Apache License, Version 2.0 (the "License")
"""
This file is modified from
https://github.com/tensorpack/tensorpack/blob/master/examples/FasterRCNN/modeling/model_box.py
"""
from collections import namedtuple
import numpy as np
# pylint: disable=import-error
import tensorflow as tf
from tensorpack.tfutils.scope_utils import under_name_scope
# pylint: enable=import-error
@under_name_scope()
def clip_boxes(boxes, window, name=None):
"""
clip boxes
:param boxes: nx4, xyxy
:param window: [h, w]
:param name: (str)
:return:
"""
boxes = tf.maximum(boxes, 0.0)
mat = tf.tile(tf.reverse(window, [0]), [2]) # (4,)
boxes = tf.minimum(boxes, tf.cast(mat, tf.float32), name=name)
return boxes
@under_name_scope()
def decode_bbox_target(box_predictions, anchors, preproc_max_size):
"""
Decode bbox target
:param box_predictions: (..., 4), logits
:param anchors: (..., 4), float box. Must have the same shape
:param preproc_max_size: int
:return: (..., 4), float32. With the same shape.
"""
orig_shape = tf.shape(anchors)
box_pred_txtytwth = tf.reshape(box_predictions, (-1, 2, 2))
box_pred_txty, box_pred_twth = tf.split(box_pred_txtytwth, 2, axis=1) # pylint: disable =E1124, E1120
# each is (...)x1x2
anchors_x1y1x2y2 = tf.reshape(anchors, (-1, 2, 2))
anchors_x1y1, anchors_x2y2 = tf.split(anchors_x1y1x2y2, 2, axis=1) # pylint: disable =E1124, E1120
waha = anchors_x2y2 - anchors_x1y1
xaya = (anchors_x2y2 + anchors_x1y1) * 0.5
clip = np.log(preproc_max_size / 16.0)
wbhb = tf.exp(tf.minimum(box_pred_twth, clip)) * waha
xbyb = box_pred_txty * waha + xaya
x1y1 = xbyb - wbhb * 0.5
x2y2 = xbyb + wbhb * 0.5 # (...)x1x2
out = tf.concat([x1y1, x2y2], axis=-2) # pylint: disable =E1123, E1120
return tf.reshape(out, orig_shape)
@under_name_scope()
def encode_bbox_target(boxes, anchors):
"""
Encode bbox target
:param boxes: (..., 4), float32
:param anchors: (..., 4), float32
:return: (..., 4), float32 with the same shape.
"""
anchors_x1y1x2y2 = tf.reshape(anchors, (-1, 2, 2))
anchors_x1y1, anchors_x2y2 = tf.split(anchors_x1y1x2y2, 2, axis=1) # pylint: disable =E1124, E1120
waha = anchors_x2y2 - anchors_x1y1
xaya = (anchors_x2y2 + anchors_x1y1) * 0.5
boxes_x1y1x2y2 = tf.reshape(boxes, (-1, 2, 2))
boxes_x1y1, boxes_x2y2 = tf.split(boxes_x1y1x2y2, 2, axis=1) # pylint: disable =E1124, E1120
wbhb = boxes_x2y2 - boxes_x1y1
xbyb = (boxes_x2y2 + boxes_x1y1) * 0.5
# Note that here not all boxes are valid. Some may be zero
txty = (xbyb - xaya) / waha
twth = tf.math.log(wbhb / waha) # may contain -inf for invalid boxes
encoded = tf.concat([txty, twth], axis=1) # (-1x2x2) # pylint: disable =E1123, E1120
return tf.reshape(encoded, tf.shape(boxes))
@under_name_scope()
def crop_and_resize(image, boxes, box_ind, crop_size, pad_border=True):
"""
Crop and resize
:param image: NCHW
:param boxes: nx4, x1y1x2y2
:param box_ind: (n,)
:param crop_size: (int)
:param pad_border: bool
:return: n,C,size,size
"""
assert isinstance(crop_size, int), crop_size
boxes = tf.stop_gradient(boxes)
# TF's crop_and_resize produces zeros on border
if pad_border:
# this can be quite slow
image = tf.pad(image, [[0, 0], [0, 0], [1, 1], [1, 1]], mode="SYMMETRIC") # pylint: disable =E1123, E1120
boxes += 1
@under_name_scope()
def transform_fpcoor_for_tf(boxes, image_shape, crop_shape):
"""
The way tf.image.crop_and_resize works (with normalized box):
Initial point (the value of output[0]): x0_box * (W_img - 1)
Spacing: w_box * (W_img - 1) / (W_crop - 1)
Use the above grid to bi linear sample.
However, what we want is (with fpcoor box):
Spacing: w_box / W_crop
Initial point: x0_box + spacing/2 - 0.5
(-0.5 because bi linear sample (in my definition) assumes floating point coordinate
(0.0, 0.0) is the same as pixel value (0, 0))
This function transform fpcoor boxes to a format to be used by tf.image.crop_and_resize
:param boxes: nx4, x1y1x2y2
:param image_shape: shape
:param crop_shape: crop shape
:return: y1x1y2x2
"""
x_0, y_0, x_1, y_1 = tf.split(boxes, 4, axis=1) # pylint: disable =E1124, E1120
spacing_w = (x_1 - x_0) / tf.cast(crop_shape[1], tf.float32)
spacing_h = (y_1 - y_0) / tf.cast(crop_shape[0], tf.float32)
imshape = [tf.cast(image_shape[0] - 1, tf.float32), tf.cast(image_shape[1] - 1, tf.float32)]
nx0 = (x_0 + spacing_w / 2 - 0.5) / imshape[1]
ny0 = (y_0 + spacing_h / 2 - 0.5) / imshape[0]
n_w = spacing_w * tf.cast(crop_shape[1] - 1, tf.float32) / imshape[1]
n_h = spacing_h * tf.cast(crop_shape[0] - 1, tf.float32) / imshape[0]
return tf.concat([ny0, nx0, ny0 + n_h, nx0 + n_w], axis=1) # pylint: disable =E1123, E1120
image_shape = tf.shape(image)[2:]
boxes = transform_fpcoor_for_tf(boxes, image_shape, [crop_size, crop_size])
image = tf.transpose(image, [0, 2, 3, 1]) # nhwc
ret = tf.image.crop_and_resize(image, boxes, tf.cast(box_ind, tf.int32), crop_size=[crop_size, crop_size])
ret = tf.transpose(ret, [0, 3, 1, 2]) # ncss
return ret
@under_name_scope()
def roi_align(feature_map, boxes, resolution):
"""
Roi align
:param feature_map: 1xCxHxW
:param boxes: Nx4 float box
:param resolution: output spatial resolution
:return: Roi aligned tf.Tensor
"""
# sample 4 locations per roi bin
ret = crop_and_resize(feature_map, boxes, tf.zeros(tf.shape(boxes)[0], dtype=tf.int32), resolution * 2)
try:
avg_pool = tf.nn.avg_pool2d
except AttributeError:
avg_pool = tf.nn.avg_pool
ret = avg_pool(ret, [1, 1, 2, 2], [1, 1, 2, 2], padding="SAME", data_format="NCHW")
return ret
class RPNAnchors(namedtuple("_RPNAnchors", ["boxes", "gt_labels", "gt_boxes"])):
"""
boxes (FS x FS x NA x 4): The anchor boxes.
gt_labels (FS x FS x NA):
gt_boxes (FS x FS x NA x 4): Ground-truth boxes corresponding to each anchor.
"""
def encoded_gt_boxes(self):
"""
encoded ground truth boxes
"""
return encode_bbox_target(self.gt_boxes, self.boxes)
def decode_logits(self, logits, preproc_max_size):
"""
Decode logits
:param logits: logits
:param preproc_max_size: preprocess to max size
"""
return decode_bbox_target(logits, self.boxes, preproc_max_size)
@under_name_scope()
def narrow_to(self, featuremap):
"""
Slice anchors to the spatial size of this feature map.
"""
shape2d = tf.shape(featuremap)[2:] # h,w
slice3d = tf.concat([shape2d, [-1]], axis=0) # pylint: disable =E1123, E1120
slice4d = tf.concat([shape2d, [-1, -1]], axis=0) # pylint: disable =E1123, E1120
boxes = tf.slice(self.boxes, [0, 0, 0, 0], slice4d)
gt_labels = tf.slice(self.gt_labels, [0, 0, 0], slice3d)
gt_boxes = tf.slice(self.gt_boxes, [0, 0, 0, 0], slice4d)
return RPNAnchors(boxes, gt_labels, gt_boxes)
| 34.202765
| 114
| 0.634196
|
3075459d16a2d367e78f332f074290c2a1933d29
| 512
|
py
|
Python
|
domain/src/gateway/client_gateway.py
|
python-jacksonsr45/web_services
|
6e37d4f00e9e59a35f06f05ce955ba53242ed9ee
|
[
"MIT"
] | null | null | null |
domain/src/gateway/client_gateway.py
|
python-jacksonsr45/web_services
|
6e37d4f00e9e59a35f06f05ce955ba53242ed9ee
|
[
"MIT"
] | null | null | null |
domain/src/gateway/client_gateway.py
|
python-jacksonsr45/web_services
|
6e37d4f00e9e59a35f06f05ce955ba53242ed9ee
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from ..request import ClientRequest
class ClientInterface(ABC):
@classmethod
@abstractmethod
def insert_client(cls, request: ClientRequest):
raise Exception("Method not implemented")
@classmethod
@abstractmethod
def update_client(cls, request: ClientRequest):
raise Exception("Method not implemented")
@classmethod
@abstractmethod
def find_client_by_id(cls, client_id):
raise Exception("Method not implemented")
| 25.6
| 51
| 0.722656
|
100eac1fe5bf134fe922de1cda775ffac6c33a74
| 123
|
py
|
Python
|
test/test_quaero.py
|
Cynnexis/Quaero
|
39c78fd560e72eaf9af1bb8b98d9dcd250b8a5f1
|
[
"MIT"
] | null | null | null |
test/test_quaero.py
|
Cynnexis/Quaero
|
39c78fd560e72eaf9af1bb8b98d9dcd250b8a5f1
|
[
"MIT"
] | null | null | null |
test/test_quaero.py
|
Cynnexis/Quaero
|
39c78fd560e72eaf9af1bb8b98d9dcd250b8a5f1
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from qr import Quaero
class TestQuaero(TestCase):
def test_search(self):
self.fail()
| 12.3
| 29
| 0.756098
|
0cbf1677d082a7664d7a7c63f0f95cfcf2cbc2ba
| 1,438
|
py
|
Python
|
infra/base-images/base-msan-builder/wrapper_utils.py
|
rbehjati/oss-fuzz
|
a31e58fb5c9984915fdc79610660b326df20b937
|
[
"Apache-2.0"
] | 17
|
2019-05-20T18:29:33.000Z
|
2021-10-10T08:20:01.000Z
|
infra/base-images/base-msan-builder/wrapper_utils.py
|
rbehjati/oss-fuzz
|
a31e58fb5c9984915fdc79610660b326df20b937
|
[
"Apache-2.0"
] | 10
|
2020-08-06T11:48:16.000Z
|
2021-06-28T10:17:12.000Z
|
infra/base-images/base-msan-builder/wrapper_utils.py
|
rbehjati/oss-fuzz
|
a31e58fb5c9984915fdc79610660b326df20b937
|
[
"Apache-2.0"
] | 2
|
2019-09-27T17:18:27.000Z
|
2019-09-27T17:18:54.000Z
|
#!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
from __future__ import print_function
import contextlib
import os
import subprocess
def DpkgHostArchitecture():
"""Return the host architecture."""
return subprocess.check_output(
['dpkg-architecture', '-qDEB_HOST_GNU_TYPE']).strip()
def InstallWrapper(bin_dir, name, contents, extra_names=None):
"""Install a custom wrapper script into |bin_dir|."""
path = os.path.join(bin_dir, name)
with open(path, 'w') as f:
f.write(contents)
os.chmod(path, 0755)
if extra_names:
CreateSymlinks(path, bin_dir, extra_names)
def CreateSymlinks(original_path, bin_dir, extra_names):
"""Create symlinks."""
for extra_name in extra_names:
extra_path = os.path.join(bin_dir, extra_name)
os.symlink(original_path, extra_path)
| 29.958333
| 80
| 0.700278
|
e3e6aa71f774b5f066cc8e9504bcb538b38db9d9
| 142
|
py
|
Python
|
7_adapter&facade/adapter/mallard_duck.py
|
hypersport/Head-First-Design-Patterns-Python
|
0c8b831ae89ebbbef8b203b96508deb7e3063590
|
[
"MIT"
] | null | null | null |
7_adapter&facade/adapter/mallard_duck.py
|
hypersport/Head-First-Design-Patterns-Python
|
0c8b831ae89ebbbef8b203b96508deb7e3063590
|
[
"MIT"
] | null | null | null |
7_adapter&facade/adapter/mallard_duck.py
|
hypersport/Head-First-Design-Patterns-Python
|
0c8b831ae89ebbbef8b203b96508deb7e3063590
|
[
"MIT"
] | null | null | null |
from duck import Duck
class MallardDuck(Duck):
def quack(self):
print('Quack')
def fly(self):
print('I am flying')
| 14.2
| 28
| 0.591549
|
52c138fc758d0138a994169299d8535139d165b7
| 760
|
py
|
Python
|
acmicpc/python/14418.py
|
hyeongyun0916/Algorithm
|
75f77b2500bb08f24706a8be26497aeeebd2da4f
|
[
"MIT"
] | 1
|
2018-12-21T01:33:05.000Z
|
2018-12-21T01:33:05.000Z
|
acmicpc/python/14418.py
|
hyeongyun0916/Algorithm
|
75f77b2500bb08f24706a8be26497aeeebd2da4f
|
[
"MIT"
] | null | null | null |
acmicpc/python/14418.py
|
hyeongyun0916/Algorithm
|
75f77b2500bb08f24706a8be26497aeeebd2da4f
|
[
"MIT"
] | null | null | null |
first = list(map(int,input().split()))
second = list(map(int,input().split()))
third = list(map(int,input().split()))
first.sort(reverse=True)
second.sort(reverse=True)
third.sort(reverse=True)
sortedArr = [first, second, third]
# print(sortedArr)
sortedArr.sort(reverse=True)
# print(sortedArr)
a,b,c,d,e,f = sortedArr[0][0], sortedArr[0][1], sortedArr[1][0], sortedArr[1][1], sortedArr[2][0], sortedArr[2][1]
# print(a,b,c,d,e,f)
flag = False
if a-b==c:
if a-b==f:
if a==d+e:
flag = True
if a-b==e:
if a==d+f:
flag = True
elif a-b==d:
if a-b==f:
if a==c+e:
flag = True
if a-b==e:
if a==c+f:
flag = True
elif a == c == e == b+d+f:
flag = True
if flag:
print("YES")
else:
print("NO")
| 12.881356
| 114
| 0.569737
|
9b37c07fbaeb590e3047c867d019d5c7c6ef93b5
| 1,552
|
py
|
Python
|
dojo/unittests/tools/test_safety_parser.py
|
axelpavageau/django-DefectDojo
|
00b425742b783ada0f432241c2812ac1257feb73
|
[
"BSD-3-Clause"
] | 1,772
|
2018-01-22T23:32:15.000Z
|
2022-03-31T14:49:33.000Z
|
dojo/unittests/tools/test_safety_parser.py
|
axelpavageau/django-DefectDojo
|
00b425742b783ada0f432241c2812ac1257feb73
|
[
"BSD-3-Clause"
] | 3,461
|
2018-01-20T19:12:28.000Z
|
2022-03-31T17:14:39.000Z
|
dojo/unittests/tools/test_safety_parser.py
|
axelpavageau/django-DefectDojo
|
00b425742b783ada0f432241c2812ac1257feb73
|
[
"BSD-3-Clause"
] | 1,173
|
2018-01-23T07:10:23.000Z
|
2022-03-31T14:40:43.000Z
|
from django.test import TestCase
from dojo.models import Test
from dojo.tools.safety.parser import SafetyParser
class TestSafetyParser(TestCase):
def test_example_report(self):
testfile = open("dojo/unittests/scans/safety/example_report.json")
parser = SafetyParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(3, len(findings))
def test_no_cve(self):
testfile = open("dojo/unittests/scans/safety/no_cve.json")
parser = SafetyParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
def test_empty_report(self):
testfile = open("dojo/unittests/scans/safety/empty.json")
parser = SafetyParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_multiple_cves(self):
testfile = open("dojo/unittests/scans/safety/multiple_cves.json")
parser = SafetyParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
def test_multiple2(self):
testfile = open("dojo/unittests/scans/safety/many_vulns.json")
parser = SafetyParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(5, len(findings))
for finding in findings:
if "39608" == finding.unique_id_from_tool:
self.assertEqual("httplib2", finding.component_name)
self.assertEqual("0.18.1", finding.component_version)
| 37.853659
| 74
| 0.670747
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.